From 4f89775604d326d9ba3372f3e3bd11b59a5ec6ab Mon Sep 17 00:00:00 2001 From: brentstone Date: Tue, 26 Sep 2023 16:34:24 -0600 Subject: [PATCH 001/161] refactor `Epoched` for past epochs --- proof_of_stake/src/epoched.rs | 180 +++++++++++++++++++++++++--------- proof_of_stake/src/types.rs | 36 +++++-- 2 files changed, 157 insertions(+), 59 deletions(-) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index d5a567fc94..3a69c83c39 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -16,6 +16,7 @@ use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::types::storage::{self, Epoch}; use crate::parameters::PosParams; +use crate::read_pos_params; /// Sub-key holding a lazy map in storage pub const LAZY_MAP_SUB_KEY: &str = "lazy_map"; @@ -28,57 +29,48 @@ pub const OLDEST_EPOCH_SUB_KEY: &str = "oldest_epoch"; const DEFAULT_NUM_PAST_EPOCHS: u64 = 2; /// Discrete epoched data handle -pub struct Epoched< - Data, - FutureEpochs, - const NUM_PAST_EPOCHS: u64 = DEFAULT_NUM_PAST_EPOCHS, - SON = collections::Simple, -> { +pub struct Epoched { storage_prefix: storage::Key, future_epochs: PhantomData, + past_epochs: PhantomData, data: PhantomData, phantom_son: PhantomData, } /// Discrete epoched data handle with nested lazy structure -pub type NestedEpoched< - Data, - FutureEpochs, - const NUM_PAST_EPOCHS: u64 = DEFAULT_NUM_PAST_EPOCHS, -> = Epoched; +pub type NestedEpoched = + Epoched; /// Delta epoched data handle -pub struct EpochedDelta { +pub struct EpochedDelta { storage_prefix: storage::Key, future_epochs: PhantomData, + past_epochs: PhantomData, data: PhantomData, } -impl - Epoched +impl + Epoched where FutureEpochs: EpochOffset, + PastEpochs: EpochOffset, { /// Open the handle pub fn open(key: storage::Key) -> Self { Self { storage_prefix: key, future_epochs: PhantomData, + past_epochs: PhantomData, data: PhantomData, phantom_son: PhantomData, } } - - /// Return the number of past epochs to keep data for - pub fn get_num_past_epochs() -> u64 { - NUM_PAST_EPOCHS - } } -impl - Epoched +impl Epoched where FutureEpochs: EpochOffset, + PastEpochs: EpochOffset, Data: BorshSerialize + BorshDeserialize + 'static + Debug, { /// Initialize new epoched data. Sets the head to the given value. @@ -125,7 +117,8 @@ where Some(_) => return Ok(res), None => { if epoch.0 > 0 - && epoch > Self::sub_past_epochs(last_update) + && epoch + > Self::sub_past_epochs(params, last_update) { epoch = Epoch(epoch.0 - 1); } else { @@ -149,7 +142,8 @@ where where S: StorageWrite + StorageRead, { - self.update_data(storage, current_epoch)?; + let params = read_pos_params(storage)?; + self.update_data(storage, ¶ms, current_epoch)?; self.set_at_epoch(storage, value, current_epoch, offset) } @@ -177,6 +171,7 @@ where fn update_data( &self, storage: &mut S, + params: &PosParams, current_epoch: Epoch, ) -> storage_api::Result<()> where @@ -189,7 +184,7 @@ where { let oldest_to_keep = current_epoch .0 - .checked_sub(NUM_PAST_EPOCHS) + .checked_sub(PastEpochs::value(params)) .unwrap_or_default(); if oldest_epoch.0 < oldest_to_keep { let diff = oldest_to_keep - oldest_epoch.0; @@ -211,7 +206,8 @@ where } } if let Some(latest_value) = latest_value { - let new_oldest_epoch = Self::sub_past_epochs(current_epoch); + let new_oldest_epoch = + Self::sub_past_epochs(params, current_epoch); // TODO we can add `contains_key` to LazyMap if data_handler.get(storage, &new_oldest_epoch)?.is_none() { tracing::debug!( @@ -269,8 +265,13 @@ where LazyMap::open(key) } - fn sub_past_epochs(epoch: Epoch) -> Epoch { - Epoch(epoch.0.checked_sub(NUM_PAST_EPOCHS).unwrap_or_default()) + fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { + Epoch( + epoch + .0 + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default(), + ) } fn get_oldest_epoch_storage_key(&self) -> storage::Key { @@ -303,10 +304,11 @@ where } } -impl - Epoched +impl + Epoched where FutureEpochs: EpochOffset, + PastEpochs: EpochOffset, Data: LazyCollection + Debug, { /// Get the inner LazyCollection value by the outer key @@ -393,10 +395,11 @@ where // } // } -impl - EpochedDelta +impl + EpochedDelta where FutureEpochs: EpochOffset, + PastEpochs: EpochOffset, Data: BorshSerialize + BorshDeserialize + ops::Add @@ -409,6 +412,7 @@ where Self { storage_prefix: key, future_epochs: PhantomData, + past_epochs: PhantomData, data: PhantomData, } } @@ -457,7 +461,7 @@ where None => Ok(None), Some(last_update) => { let data_handler = self.get_data_handler(); - let start_epoch = Self::sub_past_epochs(last_update); + let start_epoch = Self::sub_past_epochs(params, last_update); let future_most_epoch = last_update + FutureEpochs::value(params); @@ -493,7 +497,8 @@ where where S: StorageWrite + StorageRead, { - self.update_data(storage, current_epoch)?; + let params = read_pos_params(storage)?; + self.update_data(storage, ¶ms, current_epoch)?; self.set_at_epoch(storage, value, current_epoch, offset) } @@ -519,6 +524,7 @@ where fn update_data( &self, storage: &mut S, + params: &PosParams, current_epoch: Epoch, ) -> storage_api::Result<()> where @@ -531,7 +537,7 @@ where { let oldest_to_keep = current_epoch .0 - .checked_sub(NUM_PAST_EPOCHS) + .checked_sub(PastEpochs::value(params)) .unwrap_or_default(); if oldest_epoch.0 < oldest_to_keep { let diff = oldest_to_keep - oldest_epoch.0; @@ -557,7 +563,8 @@ where } } if let Some(sum) = sum { - let new_oldest_epoch = Self::sub_past_epochs(current_epoch); + let new_oldest_epoch = + Self::sub_past_epochs(params, current_epoch); let new_oldest_epoch_data = match data_handler.get(storage, &new_oldest_epoch)? { Some(oldest_epoch_data) => oldest_epoch_data + sum, @@ -631,8 +638,13 @@ where handle.iter(storage)?.collect() } - fn sub_past_epochs(epoch: Epoch) -> Epoch { - Epoch(epoch.0.checked_sub(NUM_PAST_EPOCHS).unwrap_or_default()) + fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { + Epoch( + epoch + .0 + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default(), + ) } fn get_oldest_epoch_storage_key(&self) -> storage::Key { @@ -688,6 +700,29 @@ impl EpochOffset for OffsetZero { } } +/// Default offset +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetDefaultNumPastEpochs; +impl EpochOffset for OffsetDefaultNumPastEpochs { + fn value(_params: &PosParams) -> u64 { + DEFAULT_NUM_PAST_EPOCHS + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::DefaultNumPastEpoch + } +} + /// Offset at pipeline length. #[derive( Debug, @@ -757,11 +792,59 @@ impl EpochOffset for OffsetPipelinePlusUnbondingLen { } } +/// Offset at the slash processing delay. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetSlashProcessingLen; +impl EpochOffset for OffsetSlashProcessingLen { + fn value(params: &PosParams) -> u64 { + params.slash_processing_epoch_offset() + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::SlashProcessingLen + } +} + +/// Maximum offset. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetMaxU64; +impl EpochOffset for OffsetMaxU64 { + fn value(_params: &PosParams) -> u64 { + u64::MAX + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::MaxU64 + } +} + /// Offset length dynamic choice. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub enum DynEpochOffset { /// Zero offset Zero, + /// Offset at the const default num past epochs (above) + DefaultNumPastEpoch, /// Offset at pipeline length - 1 PipelineLenMinusOne, /// Offset at pipeline length. @@ -770,6 +853,11 @@ pub enum DynEpochOffset { UnbondingLen, /// Offset at pipeline + unbonding length. PipelinePlusUnbondingLen, + /// Offset at slash processing delay (unbonding + + /// cubic_slashing_window + 1). + SlashProcessingLen, + /// Offset of the max u64 value + MaxU64, } /// Which offset should be used to set data. The value is read from @@ -794,11 +882,9 @@ mod test { fn test_epoched_data_trimming() -> storage_api::Result<()> { let mut s = TestWlStorage::default(); - const NUM_PAST_EPOCHS: u64 = 2; let key_prefix = storage::Key::parse("test").unwrap(); - let epoched = Epoched::::open( - key_prefix, - ); + let epoched = + Epoched::::open(key_prefix); let data_handler = epoched.get_data_handler(); assert!(epoched.get_last_update(&s)?.is_none()); assert!(epoched.get_oldest_epoch(&s)?.is_none()); @@ -865,11 +951,9 @@ mod test { fn test_epoched_without_data_trimming() -> storage_api::Result<()> { let mut s = TestWlStorage::default(); - const NUM_PAST_EPOCHS: u64 = u64::MAX; let key_prefix = storage::Key::parse("test").unwrap(); - let epoched = Epoched::::open( - key_prefix, - ); + let epoched = + Epoched::::open(key_prefix); let data_handler = epoched.get_data_handler(); assert!(epoched.get_last_update(&s)?.is_none()); assert!(epoched.get_oldest_epoch(&s)?.is_none()); @@ -935,10 +1019,9 @@ mod test { fn test_epoched_delta_data_trimming() -> storage_api::Result<()> { let mut s = TestWlStorage::default(); - const NUM_PAST_EPOCHS: u64 = 2; let key_prefix = storage::Key::parse("test").unwrap(); let epoched = - EpochedDelta::::open( + EpochedDelta::::open( key_prefix, ); let data_handler = epoched.get_data_handler(); @@ -1010,10 +1093,9 @@ mod test { let mut s = TestWlStorage::default(); // Nothing should ever get trimmed - const NUM_PAST_EPOCHS: u64 = u64::MAX; let key_prefix = storage::Key::parse("test").unwrap(); let epoched = - EpochedDelta::::open( + EpochedDelta::::open( key_prefix, ); let data_handler = epoched.get_data_handler(); diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 736ffe7a46..4c2eb8bbed 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -38,6 +38,7 @@ const VALIDATOR_DELTAS_SLASHES_LEN: u64 = 23; pub type ValidatorSetPositions = crate::epoched::NestedEpoched< LazyMap, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, >; impl ValidatorSetPositions { @@ -84,23 +85,29 @@ impl ValidatorSetPositions { pub type ValidatorConsensusKeys = crate::epoched::Epoched< common::PublicKey, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, >; /// Epoched validator's eth hot key. pub type ValidatorEthHotKeys = crate::epoched::Epoched< common::PublicKey, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, >; /// Epoched validator's eth cold key. pub type ValidatorEthColdKeys = crate::epoched::Epoched< common::PublicKey, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, >; /// Epoched validator's state. -pub type ValidatorStates = - crate::epoched::Epoched; +pub type ValidatorStates = crate::epoched::Epoched< + ValidatorState, + crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, +>; /// A map from a position to an address in a Validator Set pub type ValidatorPositionAddresses = LazyMap; @@ -117,41 +124,49 @@ pub type BelowCapacityValidatorSet = pub type ConsensusValidatorSets = crate::epoched::NestedEpoched< ConsensusValidatorSet, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, >; /// Epoched below-capacity validator sets. pub type BelowCapacityValidatorSets = crate::epoched::NestedEpoched< BelowCapacityValidatorSet, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, >; /// Epoched total consensus validator stake -pub type TotalConsensusStakes = - crate::epoched::Epoched; +pub type TotalConsensusStakes = crate::epoched::Epoched< + Amount, + crate::epoched::OffsetZero, + crate::epoched::OffsetMaxU64, +>; /// Epoched validator's deltas. pub type ValidatorDeltas = crate::epoched::EpochedDelta< token::Change, crate::epoched::OffsetUnbondingLen, - VALIDATOR_DELTAS_SLASHES_LEN, + crate::epoched::OffsetSlashProcessingLen, >; /// Epoched total deltas. pub type TotalDeltas = crate::epoched::EpochedDelta< token::Change, crate::epoched::OffsetUnbondingLen, - VALIDATOR_DELTAS_SLASHES_LEN, + crate::epoched::OffsetSlashProcessingLen, >; /// Epoched validator commission rate -pub type CommissionRates = - crate::epoched::Epoched; +pub type CommissionRates = crate::epoched::Epoched< + Dec, + crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, +>; /// Epoched validator's bonds pub type Bonds = crate::epoched::EpochedDelta< token::Change, crate::epoched::OffsetPipelineLen, - U64_MAX, + crate::epoched::OffsetMaxU64, >; /// An epoched lazy set of all known active validator addresses (consensus, @@ -159,6 +174,7 @@ pub type Bonds = crate::epoched::EpochedDelta< pub type ValidatorAddresses = crate::epoched::NestedEpoched< LazySet
, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, >; /// Slashes indexed by validator address and then block height (for easier @@ -172,7 +188,7 @@ pub type ValidatorSlashes = NestedMap; pub type EpochedSlashes = crate::epoched::NestedEpoched< ValidatorSlashes, crate::epoched::OffsetUnbondingLen, - VALIDATOR_DELTAS_SLASHES_LEN, + crate::epoched::OffsetSlashProcessingLen, >; /// Epoched validator's unbonds From d8a29effe87bef203ffc4146a8c062bb423bcc34 Mon Sep 17 00:00:00 2001 From: brentstone Date: Tue, 26 Sep 2023 16:38:15 -0600 Subject: [PATCH 002/161] cleaning: remove unused code + improve docstrings --- proof_of_stake/src/epoched.rs | 116 +--------------------------------- proof_of_stake/src/types.rs | 47 -------------- 2 files changed, 3 insertions(+), 160 deletions(-) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 3a69c83c39..8483a61c62 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -344,7 +344,7 @@ where .unwrap() } - /// TODO + /// Get the epoch of the most recent update pub fn get_last_update( &self, storage: &S, @@ -356,7 +356,7 @@ where storage.read(&key) } - /// TODO + /// Set the epoch of the most recent update pub fn set_last_update( &self, storage: &mut S, @@ -368,33 +368,8 @@ where let key = self.get_last_update_storage_key(); storage.write(&key, current_epoch) } - - /// TODO - pub fn sub_past_epochs(epoch: Epoch) -> Epoch { - Epoch(epoch.0.checked_sub(NUM_PAST_EPOCHS).unwrap_or_default()) - } - - // pub fn get_inner_by_epoch(&self) -> storage_api::Result {} - - // TODO: we may need an update_data() method, figure out when it should be - // called (in at()?) } -// impl -// Epoched< -// LazyMap, -// FutureEpochs, -// NUM_PAST_EPOCHS, -// collections::Nested, -// > -// where -// FutureEpochs: EpochOffset, -// { -// pub fn get_inner_by_epoch(&self, epoch: &Epoch) -> LazyMap { -// self.at() -// } -// } - impl EpochedDelta where @@ -691,7 +666,7 @@ where )] pub struct OffsetZero; impl EpochOffset for OffsetZero { - fn value(_paras: &PosParams) -> u64 { + fn value(_params: &PosParams) -> u64 { 0 } @@ -1158,89 +1133,4 @@ mod test { Ok(()) } - - // use namada_core::ledger::storage::testing::TestStorage; - // use namada_core::types::address::{self, Address}; - // use namada_core::types::storage::Key; - // - // use super::{ - // storage, storage_api, Epoch, LazyMap, NestedEpoched, NestedMap, - // OffsetPipelineLen, - // }; - // - // #[test] - // fn testing_epoched_new() -> storage_api::Result<()> { - // let mut storage = TestStorage::default(); - // - // let key1 = storage::Key::parse("test_nested1").unwrap(); - // let nested1 = - // NestedEpoched::, OffsetPipelineLen>::open( - // key1, - // ); - // nested1.init(&mut storage, Epoch(0))?; - // - // let key2 = storage::Key::parse("test_nested2").unwrap(); - // let nested2 = NestedEpoched::< - // NestedMap>, - // OffsetPipelineLen, - // >::open(key2); - // nested2.init(&mut storage, Epoch(0))?; - // - // dbg!(&nested1.get_last_update_storage_key()); - // dbg!(&nested1.get_last_update(&storage)); - // - // nested1.at(&Epoch(0)).insert( - // &mut storage, - // address::testing::established_address_1(), - // 1432, - // )?; - // dbg!(&nested1.at(&Epoch(0)).iter(&mut storage)?.next()); - // dbg!(&nested1.at(&Epoch(1)).iter(&mut storage)?.next()); - // - // nested2.at(&Epoch(0)).at(&100).insert( - // &mut storage, - // 1, - // address::testing::established_address_2(), - // )?; - // dbg!(&nested2.at(&Epoch(0)).iter(&mut storage)?.next()); - // dbg!(&nested2.at(&Epoch(1)).iter(&mut storage)?.next()); - // - // dbg!(&nested_epoched.get_epoch_key(&Epoch::from(0))); - // - // let epoch = Epoch::from(0); - // let addr = address::testing::established_address_1(); - // let amount: u64 = 234235; - // - // nested_epoched - // .at(&epoch) - // .insert(&mut storage, addr.clone(), amount)?; - // - // let epoch = epoch + 3_u64; - // nested_epoched.at(&epoch).insert( - // &mut storage, - // addr.clone(), - // 999_u64, - // )?; - // - // dbg!(nested_epoched.contains_epoch(&storage, &Epoch::from(0))?); - // dbg!( - // nested_epoched - // .get_data_handler() - // .get_data_key(&Epoch::from(3)) - // ); - // dbg!(nested_epoched.contains_epoch(&storage, &Epoch::from(3))?); - // dbg!( - // nested_epoched - // .at(&Epoch::from(0)) - // .get(&storage, &addr.clone())? - // ); - // dbg!( - // nested_epoched - // .at(&Epoch::from(3)) - // .get(&storage, &addr.clone())? - // ); - // dbg!(nested_epoched.at(&Epoch::from(3)).get_data_key(&addr)); - // - // Ok(()) - // } } diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 4c2eb8bbed..2ec8e4fd86 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -14,7 +14,6 @@ use namada_core::ledger::storage_api::collections::lazy_map::NestedMap; use namada_core::ledger::storage_api::collections::{ LazyMap, LazySet, LazyVec, }; -use namada_core::ledger::storage_api::{self, StorageRead}; use namada_core::types::address::Address; use namada_core::types::dec::Dec; use namada_core::types::key::common; @@ -25,14 +24,6 @@ pub use rev_order::ReverseOrdTokenAmount; use crate::parameters::PosParams; -// TODO: replace `POS_MAX_DECIMAL_PLACES` with -// core::types::token::NATIVE_MAX_DECIMAL_PLACES?? -const U64_MAX: u64 = u64::MAX; - -/// Number of epochs below the current epoch for which validator deltas and -/// slashes are stored -const VALIDATOR_DELTAS_SLASHES_LEN: u64 = 23; - // TODO: add this to the spec /// Stored positions of validators in validator sets pub type ValidatorSetPositions = crate::epoched::NestedEpoched< @@ -41,44 +32,6 @@ pub type ValidatorSetPositions = crate::epoched::NestedEpoched< crate::epoched::OffsetDefaultNumPastEpochs, >; -impl ValidatorSetPositions { - /// TODO - pub fn get_position( - &self, - storage: &S, - epoch: &Epoch, - address: &Address, - params: &PosParams, - ) -> storage_api::Result> - where - S: StorageRead, - { - let last_update = self.get_last_update(storage)?; - // dbg!(&last_update); - if last_update.is_none() { - return Ok(None); - } - let last_update = last_update.unwrap(); - let future_most_epoch: Epoch = last_update + params.pipeline_len; - // dbg!(future_most_epoch); - let mut epoch = std::cmp::min(*epoch, future_most_epoch); - loop { - // dbg!(epoch); - match self.at(&epoch).get(storage, address)? { - Some(val) => return Ok(Some(val)), - None => { - if epoch.0 > 0 && epoch > Self::sub_past_epochs(last_update) - { - epoch = Epoch(epoch.0 - 1); - } else { - return Ok(None); - } - } - } - } - } -} - // TODO: check the offsets for each epoched type!! /// Epoched validator's consensus key. From cd64efff5262bb7fa8fe1addefb080e6c5fa4609 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Mon, 11 Sep 2023 15:04:36 +0200 Subject: [PATCH 003/161] Test vector formatting changes. --- scripts/generator.sh | 318 +++++++++++---------------- shared/src/sdk/signing.rs | 452 ++++++++++++++++++++++++++------------ shared/src/sdk/tx.rs | 11 +- 3 files changed, 437 insertions(+), 344 deletions(-) diff --git a/scripts/generator.sh b/scripts/generator.sh index 3fe1792a49..c9635d498d 100755 --- a/scripts/generator.sh +++ b/scripts/generator.sh @@ -9,8 +9,10 @@ # vectors. NAMADA_DIR="$(pwd)" +NAMADA_BASE_DIR_FILE="$(pwd)/namada_base_dir" export NAMADA_LEDGER_LOG_PATH="$(pwd)/vectors.json" export NAMADA_TX_LOG_PATH="$(pwd)/debugs.txt" +export NAMADA_DEV=false if [ "$#" -ne 1 ]; then echo "Illegal number of parameters" @@ -19,11 +21,14 @@ elif [ "$1" = "server" ]; then sed -i 's/^epochs_per_year = 31_536_000$/epochs_per_year = 262_800/' genesis/test-vectors-single-node.toml - NAMADA_GENESIS_FILE=$(cargo run --bin namadac -- utils init-network --genesis-path genesis/test-vectors-single-node.toml --wasm-checksums-path wasm/checksums.json --chain-prefix e2e-test --unsafe-dont-encrypt --localhost --allow-duplicate-ip | grep 'Genesis file generated at ' | sed 's/^Genesis file generated at //') + NAMADA_GENESIS_FILE=$(cargo run --bin namadac --package namada_apps --manifest-path Cargo.toml -- utils init-network --genesis-path genesis/test-vectors-single-node.toml --wasm-checksums-path wasm/checksums.json --chain-prefix e2e-test --unsafe-dont-encrypt --localhost --dont-archive --allow-duplicate-ip | grep 'Genesis file generated at ' | sed 's/^Genesis file generated at //') rm genesis/test-vectors-single-node.toml NAMADA_BASE_DIR=${NAMADA_GENESIS_FILE%.toml} + echo $NAMADA_BASE_DIR > $NAMADA_BASE_DIR_FILE + + sed -i 's/^mode = "RemoteEndpoint"$/mode = "Off"/' $NAMADA_BASE_DIR/config.toml cp wasm/*.wasm $NAMADA_BASE_DIR/wasm/ @@ -31,8 +36,14 @@ elif [ "$1" = "server" ]; then cp $NAMADA_BASE_DIR/setup/other/wallet.toml $NAMADA_BASE_DIR/wallet.toml - cargo run --bin namadan -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada/ ledger + sed -i 's/^mode = "RemoteEndpoint"$/mode = "Off"/' $NAMADA_BASE_DIR/setup/validator-0/.namada/$(basename $NAMADA_BASE_DIR)/config.toml + + cargo run --bin namadan --package namada_apps --manifest-path Cargo.toml -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada/ ledger elif [ "$1" = "client" ]; then + if test -f "$NAMADA_BASE_DIR_FILE"; then + NAMADA_BASE_DIR="$(cat $NAMADA_BASE_DIR_FILE)" + fi + echo > $NAMADA_TX_LOG_PATH echo $'[' > $NAMADA_LEDGER_LOG_PATH @@ -40,120 +51,49 @@ elif [ "$1" = "client" ]; then ALBERT_ADDRESS=$(cargo run --bin namadaw -- address find --alias albert | sed 's/^Found address Established: //') echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" - }, - "grace_epoch":30, - "type":{ - "Default":"'$NAMADA_DIR'/wasm_for_tests/tx_proposal_code.wasm" - }, - "voting_end_epoch":24, - "voting_start_epoch":12 -} -' > proposal_submission_valid_proposal.json - + "proposal": { + "author":"'$ALBERT_ADDRESS'", + "content":{ + "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", + "authors":"test@test.com", + "created":"2022-03-10T08:54:37Z", + "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", + "discussions-to":"www.github.com/anoma/aip/1", + "license":"MIT", + "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", + "requires":"2", + "title":"TheTitle" + }, + "grace_epoch":30, + "voting_end_epoch":24, + "voting_start_epoch":12 + } + }' > proposal_default.json + echo '{ - "content": { - "abstract": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors": "test@test.com", - "created": "2022-03-10T08:54:37Z", - "details": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to": "www.github.com/anoma/aip/1", - "license": "MIT", - "motivation": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires": "2", - "title": "TheTitle" - }, - "author": "'$ALBERT_ADDRESS'", - "tally_epoch": 18, - "signature": { - "Ed25519": { - "R_bytes": [ - 113, - 196, - 231, - 134, - 101, - 191, - 75, - 17, - 245, - 19, - 50, - 231, - 183, - 80, - 162, - 38, - 108, - 72, - 72, - 2, - 116, - 112, - 121, - 33, - 197, - 67, - 64, - 116, - 21, - 250, - 196, - 121 - ], - "s_bytes": [ - 87, - 163, - 134, - 87, - 42, - 156, - 121, - 211, - 189, - 19, - 255, - 5, - 23, - 178, - 143, - 39, - 118, - 249, - 37, - 53, - 121, - 136, - 59, - 103, - 190, - 91, - 121, - 95, - 46, - 54, - 168, - 9 - ] + "data":['$(od -An -tu1 -v wasm_for_tests/tx_proposal_code.wasm | tr '\n' ' ' | sed 's/\b\s\+\b/,/g')'], + "proposal": { + "author":"'$ALBERT_ADDRESS'", + "content":{ + "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", + "authors":"test@test.com", + "created":"2022-03-10T08:54:37Z", + "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", + "discussions-to":"www.github.com/anoma/aip/1", + "license":"MIT", + "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", + "requires":"2", + "title":"TheTitle" + }, + "grace_epoch":30, + "voting_end_epoch":24, + "voting_start_epoch":12 } - }, - "address": "'$ALBERT_ADDRESS'" -} -' > proposal_offline_proposal + }' > proposal_default_with_data.json echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ + "author":"'$ALBERT_ADDRESS'", + "content":{ "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", "authors":"test@test.com", "created":"2022-03-10T08:54:37Z", @@ -164,59 +104,41 @@ elif [ "$1" = "client" ]; then "requires":"2", "title":"TheTitle" }, - "grace_epoch":18, - "type":{ - "Default":null - }, - "voting_end_epoch":9, - "voting_start_epoch":3 -}' > proposal_offline_valid_proposal.json + "tally_epoch":1 + }' > proposal_offline.json echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" + "proposal": { + "author":"'$ALBERT_ADDRESS'", + "content":{ + "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", + "authors":"test@test.com", + "created":"2022-03-10T08:54:37Z", + "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", + "discussions-to":"www.github.com/anoma/aip/1", + "license":"MIT", + "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", + "requires":"2", + "title":"TheTitle" + }, + "grace_epoch":30, + "voting_end_epoch":24, + "voting_start_epoch":12 }, - "grace_epoch":30, - "type":"ETHBridge", - "voting_end_epoch":24, - "voting_start_epoch":12 -}' > eth_governance_proposal_valid_proposal.json + "data": {"add":"'$ALBERT_ADDRESS'","remove":[]} + }' > proposal_pgf_steward_add.json - echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" - }, - "grace_epoch":30, - "type":"PGFCouncil", - "voting_end_epoch":24, - "voting_start_epoch":12 -}' > pgf_governance_proposal_valid_proposal.json + # proposal_default - # proposal_submission + cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-token NAM --node 127.0.0.1:27657 - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- unjail-validator --validator Bertha --gas-token NAM --force --node 127.0.0.1:27657 - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.02 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.02 --gas-token NAM --force --node 127.0.0.1:27657 - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_submission_valid_proposal.json --node 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_default.json --node 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + + cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_default_with_data.json --node 127.0.0.1:27657 cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address validator-0 --node 127.0.0.1:27657 @@ -226,41 +148,29 @@ elif [ "$1" = "client" ]; then # proposal_offline - cargo run --bin namadac --features std -- bond --validator validator-0 --source Albert --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- change-commission-rate --validator Albert --commission-rate 0.05 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_offline_valid_proposal.json --offline --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- bond --validator validator-0 --source Albert --amount 900 --gas-token NAM --node 127.0.0.1:27657 - cargo run --bin namadac --features std -- vote-proposal --data-path proposal_offline_proposal --vote yay --address Albert --offline --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Albert --commission-rate 0.05 --gas-token NAM --force --node 127.0.0.1:27657 - # eth_governance_proposal + PROPOSAL_OFFLINE_SIGNED=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_offline.json --signing-keys albert-key --offline --node 127.0.0.1:27657 | grep -o -P '(?<=Proposal serialized to:\s).*') - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.07 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 - - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path eth_governance_proposal_valid_proposal.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id 0 --vote yay --eth '011586062748ba53bc53155e817ec1ea708de75878dcb9a5713bf6986d87fe14e7 fd34672ab5' --address Bertha --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --eth '011586062748ba53bc53155e817ec1ea708de75878dcb9a5713bf6986d87fe14e7 fd34672ab5' --address validator-0 --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- vote-proposal --data-path $PROPOSAL_OFFLINE_SIGNED --vote yay --address Albert --offline --node 127.0.0.1:27657 # pgf_governance_proposal - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-token NAM --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.09 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.09 --gas-token NAM --force --node 127.0.0.1:27657 - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path pgf_governance_proposal_valid_proposal.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --pgf-stewards --force --data-path proposal_pgf_steward_add.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - PROPOSAL_ID_1=$(cargo run --bin namadac --features std -- init-proposal --force --data-path pgf_governance_proposal_valid_proposal.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + PROPOSAL_ID_1=$(cargo run --bin namadac --features std -- init-proposal --pgf-stewards --force --data-path proposal_pgf_steward_add.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --pgf "$ALBERT_ADDRESS 1000" --address validator-0 --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address validator-0 --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --pgf "$ALBERT_ADDRESS 900" --address Bertha --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address Bertha --signing-keys bertha-key --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_1 --vote yay --pgf "$ALBERT_ADDRESS 900" --address Bertha --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_1 --vote yay --address Bertha --signing-keys bertha-key --ledger-address 127.0.0.1:27657 # non-proposal tests @@ -268,24 +178,38 @@ elif [ "$1" = "client" ]; then cargo run --bin namadac --features std -- bond --validator bertha --amount 25 --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.11 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.11 --gas-token NAM --force --node 127.0.0.1:27657 cargo run --bin namadac --features std -- reveal-pk --public-key albert-key --gas-payer albert-key --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- init-validator --alias bertha-validator --account-keys bertha --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys bertha-key --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --public-keys albert-key,bertha-key --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --public-keys albert-key,bertha-key,christel-key --threshold 2 --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- init-validator --alias bertha-validator --account-keys bertha-key --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys bertha-key --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- init-validator --alias validator-mult --account-keys albert-key,bertha-key --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys albert-key,bertha-key --threshold 2 --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 + # TODO works but panics cargo run --bin namadac --features std -- unbond --validator christel --amount 5 --signing-keys christel-key --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- withdraw --validator albert --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- init-account --alias albert-account --public-keys albert-key --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- tx --code-path $NAMADA_DIR/wasm_for_tests/tx_no_op.wasm --data-path README.md --signing-keys albert-key --owner albert --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- init-account --alias account-mul --public-keys albert-key,bertha-key,christel-key --signing-keys albert-key,bertha-key,christel-key --threshold 2 --force --ledger-address 127.0.0.1:27657 + + # TODO panics, no vector produced + # cargo run --bin namadac --features std -- tx --code-path $NAMADA_DIR/wasm_for_tests/tx_no_op.wasm --data-path README.md --signing-keys albert-key --owner albert --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- ibc-transfer --source bertha --receiver christel --token btc --amount 24 --channel-id channel-141 --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- ibc-transfer --source albert --receiver bertha --token nam --amount 100000 --channel-id channel-0 --port-id transfer --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- ibc-transfer --source albert --receiver bertha --token nam --amount 100000 --channel-id channel-0 --port-id transfer --signing-keys albert-key --timeout-sec-offset 5 --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadaw -- masp add --alias a_spending_key --value xsktest1qqqqqqqqqqqqqq9v0sls5r5de7njx8ehu49pqgmqr9ygelg87l5x8y4s9r0pjlvu69au6gn3su5ewneas486hdccyayx32hxvt64p3d0hfuprpgcgv2q9gdx3jvxrn02f0nnp3jtdd6f5vwscfuyum083cvfv4jun75ak5sdgrm2pthzj3sflxc0jx0edrakx3vdcngrfjmru8ywkguru8mxss2uuqxdlglaz6undx5h8w7g70t2es850g48xzdkqay5qs0yw06rtxcvedhsv --unsafe-dont-encrypt cargo run --bin namadaw -- masp add --alias b_spending_key --value xsktest1qqqqqqqqqqqqqqpagte43rsza46v55dlz8cffahv0fnr6eqacvnrkyuf9lmndgal7c2k4r7f7zu2yr5rjwr374unjjeuzrh6mquzy6grfdcnnu5clzaq2llqhr70a8yyx0p62aajqvrqjxrht3myuyypsvm725uyt5vm0fqzrzuuedtf6fala4r4nnazm9y9hq5yu6pq24arjskmpv4mdgfn3spffxxv8ugvym36kmnj45jcvvmm227vqjm5fq8882yhjsq97p7xrwqqd82s0 --unsafe-dont-encrypt @@ -296,27 +220,31 @@ elif [ "$1" = "client" ]; then cargo run --bin namadaw -- masp add --alias bb_payment_address --value patest1vqe0vyxh6wmhahwa52gthgd6edgqxfmgyv8e94jtwn55mdvpvylcyqnp59595272qrz3zxn0ysg + # TODO vector produced only when epoch boundaries not straddled cargo run --bin namadac --features std -- transfer --source albert --target aa_payment_address --token btc --amount 20 --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- transfer --source a_spending_key --target ab_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target ab_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 - until cargo run --bin namadac -- epoch --ledger-address 127.0.0.1:27657 | grep -m1 "Last committed epoch: 2" ; do sleep 10 ; done; + # TODO fragile + until cargo run --bin namadac -- epoch --ledger-address 127.0.0.1:27657 | grep -m1 "Last committed epoch: 2" ; do sleep 10 ; done; - cargo run --bin namadac --features std -- transfer --source a_spending_key --target bb_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target bb_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- transfer --source a_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- transfer --source b_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source b_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 - rm proposal_submission_valid_proposal.json - - rm proposal_offline_proposal - - rm proposal_offline_valid_proposal.json + rm -f proposal_default.json + + rm -f proposal_default_with_data.json - rm eth_governance_proposal_valid_proposal.json + rm -f proposal_offline.json - rm pgf_governance_proposal_valid_proposal.json + rm -f proposal_pgf_steward_add.json perl -0777 -i.original -pe 's/,\s*$//igs' $NAMADA_LEDGER_LOG_PATH diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 042be03a63..a4e984077b 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -1,5 +1,6 @@ //! Functions to sign transactions use std::collections::{BTreeMap, HashMap}; +use std::fmt::Display; use std::path::PathBuf; use borsh::{BorshDeserialize, BorshSerialize}; @@ -23,6 +24,10 @@ use serde::{Deserialize, Serialize}; use sha2::Digest; use zeroize::Zeroizing; +use crate::core::ledger::governance::storage::proposal::ProposalType; +use crate::core::ledger::governance::storage::vote::{ + StorageProposalVote, VoteType, +}; use crate::display_line; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc_proto::google::protobuf::Any; @@ -38,8 +43,8 @@ use crate::sdk::rpc::{ use crate::sdk::tx::{ TX_BOND_WASM, TX_CHANGE_COMMISSION_WASM, TX_IBC_WASM, TX_INIT_ACCOUNT_WASM, TX_INIT_PROPOSAL, TX_INIT_VALIDATOR_WASM, TX_REVEAL_PK, TX_TRANSFER_WASM, - TX_UNBOND_WASM, TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, - VP_USER_WASM, + TX_UNBOND_WASM, TX_UNJAIL_VALIDATOR_WASM, TX_UPDATE_ACCOUNT_WASM, + TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, VP_USER_WASM, }; pub use crate::sdk::wallet::store::AddressVpType; use crate::sdk::wallet::{Wallet, WalletUtils}; @@ -603,11 +608,20 @@ fn make_ledger_amount_addr( prefix: &str, ) { if let Some(token) = tokens.get(token) { - output.push(format!("{}Amount {}: {}", prefix, token, amount)); + output.push(format!( + "{}Amount : {} {}", + prefix, + token.to_uppercase(), + to_ledger_decimal(&amount.to_string()), + )); } else { output.extend(vec![ - format!("{}Token: {}", prefix, token), - format!("{}Amount: {}", prefix, amount), + format!("{}Token : {}", prefix, token), + format!( + "{}Amount : {}", + prefix, + to_ledger_decimal(&amount.to_string()) + ), ]); } } @@ -632,21 +646,31 @@ async fn make_ledger_amount_asset< format_denominated_amount::<_, IO>(client, token, amount.into()) .await; if let Some(token) = tokens.get(token) { - output - .push( - format!("{}Amount: {} {}", prefix, token, formatted_amt,), - ); + output.push(format!( + "{}Amount : {} {}", + prefix, + token.to_uppercase(), + to_ledger_decimal(&formatted_amt), + )); } else { output.extend(vec![ - format!("{}Token: {}", prefix, token), - format!("{}Amount: {}", prefix, formatted_amt), + format!("{}Token : {}", prefix, token), + format!( + "{}Amount : {}", + prefix, + to_ledger_decimal(&formatted_amt) + ), ]); } } else { // Otherwise display the raw AssetTypes output.extend(vec![ - format!("{}Token: {}", prefix, token), - format!("{}Amount: {}", prefix, amount), + format!("{}Token : {}", prefix, token), + format!( + "{}Amount : {}", + prefix, + to_ledger_decimal(&amount.to_string()) + ), ]); } } @@ -654,54 +678,44 @@ async fn make_ledger_amount_asset< /// Split the lines in the vector that are longer than the Ledger device's /// character width fn format_outputs(output: &mut Vec) { - const LEDGER_WIDTH: usize = 60; + const MAX_KEY_LEN: usize = 39; + const MAX_VALUE_LEN: usize = 39; let mut i = 0; let mut pos = 0; // Break down each line that is too long one-by-one while pos < output.len() { - let prefix_len = i.to_string().len() + 3; let curr_line = output[pos].clone(); - if curr_line.len() + prefix_len < LEDGER_WIDTH { + let (key, mut value) = + curr_line.split_once(':').unwrap_or(("", &curr_line)); + // Truncate the key length to the declared maximum + let key = key.trim().chars().take(MAX_KEY_LEN - 1).collect::(); + // Trim value because we will insert spaces later + value = value.trim(); + if value.is_empty() { + value = "(none)" + } + if value.chars().count() < MAX_VALUE_LEN { // No need to split the line in this case - output[pos] = format!("{} | {}", i, curr_line); + output[pos] = format!("{} | {} : {}", i, key, value); pos += 1; } else { // Line is too long so split it up. Repeat the key on each line - let (mut key, mut value) = - curr_line.split_once(':').unwrap_or(("", &curr_line)); - key = key.trim(); - value = value.trim(); - if value.is_empty() { - value = "(none)" - } - - // First comput how many lines we will break the current one up into - let mut digits = 1; - let mut line_space; - let mut lines; - loop { - let prefix_len = prefix_len + 7 + 2 * digits + key.len(); - line_space = LEDGER_WIDTH - prefix_len; - lines = (value.len() + line_space - 1) / line_space; - if lines.to_string().len() <= digits { - break; - } else { - digits += 1; - } - } - - // Then break up this line according to the above plan output.remove(pos); - for (idx, part) in - value.chars().chunks(line_space).into_iter().enumerate() + let part_count = (value.chars().count() + MAX_VALUE_LEN - 2) + / (MAX_VALUE_LEN - 1); + for (idx, part) in value + .chars() + .chunks(MAX_VALUE_LEN - 1) + .into_iter() + .enumerate() { let line = format!( "{} | {} [{}/{}] : {}", i, key, idx + 1, - lines, + part_count, part.collect::(), ); output.insert(pos, line); @@ -847,6 +861,64 @@ pub async fn generate_test_vector< Ok(()) } +/// Convert decimal numbers into the format used by Ledger. Specifically remove +/// all insignificant zeros occuring after decimal point. +fn to_ledger_decimal(amount: &str) -> String { + if amount.contains('.') { + let mut amount = amount.trim_end_matches('0').to_string(); + if amount.ends_with('.') { + amount.push('0') + } + amount + } else { + amount.to_string() + ".0" + } +} + +/// A ProposalVote wrapper that prints the spending cap with Ledger decimal +/// formatting. +struct LedgerProposalVote<'a>(&'a StorageProposalVote); + +impl<'a> Display for LedgerProposalVote<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &self.0 { + StorageProposalVote::Yay(vote_type) => match vote_type { + VoteType::Default => write!(f, "yay"), + VoteType::PGFSteward => write!(f, "yay for PGF steward"), + VoteType::PGFPayment => { + write!(f, "yay for PGF payment proposal") + } + }, + + StorageProposalVote::Nay => write!(f, "nay"), + } + } +} + +/// A ProposalType wrapper that prints the hash of the contained WASM code if it +/// is present. +struct LedgerProposalType<'a>(&'a ProposalType, &'a Tx); + +impl<'a> Display for LedgerProposalType<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self.0 { + ProposalType::Default(None) => write!(f, "Default"), + ProposalType::Default(Some(hash)) => { + let extra = self + .1 + .get_section(hash) + .and_then(|x| Section::extra_data_sec(x.as_ref())) + .expect("unable to load vp code") + .code + .hash(); + write!(f, "{}", HEXLOWER.encode(&extra.0)) + } + ProposalType::PGFSteward(_) => write!(f, "PGF Steward"), + ProposalType::PGFPayment(_) => write!(f, "PGF Payment"), + } + } +} + /// Converts the given transaction to the form that is displayed on the Ledger /// device pub async fn to_ledger_vector< @@ -882,18 +954,14 @@ pub async fn to_ledger_vector< query_wasm_code_hash::<_, IO>(client, TX_CHANGE_COMMISSION_WASM) .await?; let user_hash = query_wasm_code_hash::<_, IO>(client, VP_USER_WASM).await?; + let unjail_validator_hash = + query_wasm_code_hash::<_, IO>(client, TX_UNJAIL_VALIDATOR_WASM).await?; // To facilitate lookups of human-readable token names let tokens: HashMap = wallet - .get_addresses_with_vp_type(AddressVpType::Token) + .get_addresses() .into_iter() - .map(|addr| { - let alias = match wallet.find_alias(&addr) { - Some(alias) => alias.to_string(), - None => addr.to_string(), - }; - (addr, alias) - }) + .map(|(alias, addr)| (addr, alias)) .collect(); let mut tv = LedgerVector { @@ -902,7 +970,7 @@ pub async fn to_ledger_vector< })?), index: 0, valid: true, - name: "Custom 0".to_string(), + name: "Custom_0".to_string(), ..Default::default() }; @@ -928,7 +996,7 @@ pub async fn to_ledger_vector< .map_err(|err| { Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Init Account 0".to_string(); + tv.name = "Init_Account_0".to_string(); let extra = tx .get_section(&init_account.vp_code_hash) @@ -941,15 +1009,26 @@ pub async fn to_ledger_vector< } else { HEXLOWER.encode(&extra.0) }; - + tv.output.extend(vec![format!("Type : Init Account")]); + tv.output.extend( + init_account + .public_keys + .iter() + .map(|k| format!("Public key : {}", k)), + ); tv.output.extend(vec![ - format!("Type : Init Account"), - format!("Public key : {:?}", init_account.public_keys), + format!("Threshold : {}", init_account.threshold), format!("VP type : {}", vp_code), ]); + tv.output_expert.extend( + init_account + .public_keys + .iter() + .map(|k| format!("Public key : {}", k)), + ); tv.output_expert.extend(vec![ - format!("Public key : {:?}", init_account.public_keys), + format!("Threshold : {}", init_account.threshold), format!("VP type : {}", HEXLOWER.encode(&extra.0)), ]); } else if code_hash == init_validator_hash { @@ -961,7 +1040,7 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Init Validator 0".to_string(); + tv.name = "Init_Validator_0".to_string(); let extra = tx .get_section(&init_validator.validator_vp_code_hash) @@ -975,10 +1054,18 @@ pub async fn to_ledger_vector< HEXLOWER.encode(&extra.0) }; + tv.output.extend(vec!["Type : Init Validator".to_string()]); + tv.output.extend( + init_validator + .account_keys + .iter() + .map(|k| format!("Account key : {}", k)), + ); tv.output.extend(vec![ - format!("Type : Init Validator"), - format!("Account key : {:?}", init_validator.account_keys), + format!("Threshold : {}", init_validator.threshold), format!("Consensus key : {}", init_validator.consensus_key), + format!("Ethereum cold key : {}", init_validator.eth_cold_key), + format!("Ethereum hot key : {}", init_validator.eth_hot_key), format!("Protocol key : {}", init_validator.protocol_key), format!("DKG key : {}", init_validator.dkg_key), format!("Commission rate : {}", init_validator.commission_rate), @@ -989,9 +1076,17 @@ pub async fn to_ledger_vector< format!("Validator VP type : {}", vp_code,), ]); + tv.output_expert.extend( + init_validator + .account_keys + .iter() + .map(|k| format!("Account key : {}", k)), + ); tv.output_expert.extend(vec![ - format!("Account key : {:?}", init_validator.account_keys), + format!("Threshold : {}", init_validator.threshold), format!("Consensus key : {}", init_validator.consensus_key), + format!("Ethereum cold key : {}", init_validator.eth_cold_key), + format!("Ethereum hot key : {}", init_validator.eth_hot_key), format!("Protocol key : {}", init_validator.protocol_key), format!("DKG key : {}", init_validator.dkg_key), format!("Commission rate : {}", init_validator.commission_rate), @@ -1010,16 +1105,24 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Init Proposal 0".to_string(); + tv.name = "Init_Proposal_0".to_string(); + + let extra = tx + .get_section(&init_proposal_data.content) + .and_then(|x| Section::extra_data_sec(x.as_ref())) + .expect("unable to load vp code") + .code + .hash(); - let init_proposal_data_id = init_proposal_data - .id - .as_ref() - .map(u64::to_string) - .unwrap_or_else(|| "(none)".to_string()); + tv.output.push("Type : Init proposal".to_string()); + if let Some(id) = init_proposal_data.id.as_ref() { + tv.output.push(format!("ID : {}", id)); + } tv.output.extend(vec![ - format!("Type : Init proposal"), - format!("ID : {}", init_proposal_data_id), + format!( + "Proposal type : {}", + LedgerProposalType(&init_proposal_data.r#type, tx) + ), format!("Author : {}", init_proposal_data.author), format!( "Voting start epoch : {}", @@ -1030,12 +1133,17 @@ pub async fn to_ledger_vector< init_proposal_data.voting_end_epoch ), format!("Grace epoch : {}", init_proposal_data.grace_epoch), + format!("Content : {}", HEXLOWER.encode(&extra.0)), ]); - tv.output - .push(format!("Content: {}", init_proposal_data.content)); + if let Some(id) = init_proposal_data.id.as_ref() { + tv.output_expert.push(format!("ID : {}", id)); + } tv.output_expert.extend(vec![ - format!("ID : {}", init_proposal_data_id), + format!( + "Proposal type : {}", + LedgerProposalType(&init_proposal_data.r#type, tx) + ), format!("Author : {}", init_proposal_data.author), format!( "Voting start epoch : {}", @@ -1046,9 +1154,8 @@ pub async fn to_ledger_vector< init_proposal_data.voting_end_epoch ), format!("Grace epoch : {}", init_proposal_data.grace_epoch), + format!("Content : {}", HEXLOWER.encode(&extra.0)), ]); - tv.output - .push(format!("Content: {}", init_proposal_data.content)); } else if code_hash == vote_proposal_hash { let vote_proposal = VoteProposalData::try_from_slice( &tx.data() @@ -1058,26 +1165,26 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Vote Proposal 0".to_string(); + tv.name = "Vote_Proposal_0".to_string(); tv.output.extend(vec![ format!("Type : Vote Proposal"), format!("ID : {}", vote_proposal.id), - format!("Vote : {}", vote_proposal.vote), + format!("Vote : {}", LedgerProposalVote(&vote_proposal.vote)), format!("Voter : {}", vote_proposal.voter), ]); for delegation in &vote_proposal.delegations { - tv.output.push(format!("Delegations : {}", delegation)); + tv.output.push(format!("Delegation : {}", delegation)); } tv.output_expert.extend(vec![ format!("ID : {}", vote_proposal.id), - format!("Vote : {}", vote_proposal.vote), + format!("Vote : {}", LedgerProposalVote(&vote_proposal.vote)), format!("Voter : {}", vote_proposal.voter), ]); for delegation in vote_proposal.delegations { tv.output_expert - .push(format!("Delegations : {}", delegation)); + .push(format!("Delegation : {}", delegation)); } } else if code_hash == reveal_pk_hash { let public_key = common::PublicKey::try_from_slice( @@ -1088,17 +1195,17 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Init Account 0".to_string(); + tv.name = "Reveal_Pubkey_0".to_string(); tv.output.extend(vec![ - format!("Type : Reveal PK"), + format!("Type : Reveal Pubkey"), format!("Public key : {}", public_key), ]); tv.output_expert .extend(vec![format!("Public key : {}", public_key)]); } else if code_hash == update_account_hash { - let transfer = UpdateAccount::try_from_slice( + let update_account = UpdateAccount::try_from_slice( &tx.data() .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) @@ -1106,9 +1213,9 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Update VP 0".to_string(); + tv.name = "Update_VP_0".to_string(); - match &transfer.vp_code_hash { + match &update_account.vp_code_hash { Some(hash) => { let extra = tx .get_section(hash) @@ -1125,14 +1232,40 @@ pub async fn to_ledger_vector< }; tv.output.extend(vec![ format!("Type : Update VP"), - format!("Address : {}", transfer.addr), - format!("VP type : {}", vp_code), - ]); - - tv.output_expert.extend(vec![ - format!("Address : {}", transfer.addr), - format!("VP type : {}", HEXLOWER.encode(&extra.0)), + format!("Address : {}", update_account.addr), ]); + tv.output.extend( + update_account + .public_keys + .iter() + .map(|k| format!("Public key : {}", k)), + ); + if update_account.threshold.is_some() { + tv.output.extend(vec![format!( + "Threshold : {}", + update_account.threshold.unwrap() + )]) + } + tv.output.extend(vec![format!("VP type : {}", vp_code)]); + + tv.output_expert + .extend(vec![format!("Address : {}", update_account.addr)]); + tv.output_expert.extend( + update_account + .public_keys + .iter() + .map(|k| format!("Public key : {}", k)), + ); + if update_account.threshold.is_some() { + tv.output_expert.extend(vec![format!( + "Threshold : {}", + update_account.threshold.unwrap() + )]) + } + tv.output_expert.extend(vec![format!( + "VP type : {}", + HEXLOWER.encode(&extra.0) + )]); } None => (), }; @@ -1171,7 +1304,7 @@ pub async fn to_ledger_vector< None }; - tv.name = "Transfer 0".to_string(); + tv.name = "Transfer_0".to_string(); tv.output.push("Type : Transfer".to_string()); make_ledger_masp_endpoints::<_, IO>( @@ -1200,7 +1333,7 @@ pub async fn to_ledger_vector< ) .map_err(|x| Error::from(EncodingError::Conversion(x.to_string())))?; - tv.name = "IBC 0".to_string(); + tv.name = "IBC_0".to_string(); tv.output.push("Type : IBC".to_string()); match MsgTransfer::try_from(any_msg.clone()) { @@ -1222,7 +1355,11 @@ pub async fn to_ledger_vector< ), format!( "Timeout timestamp : {}", - transfer.timeout_timestamp_on_b + transfer + .timeout_timestamp_on_b + .into_tm_time() + .map_or("(none)".to_string(), |time| time + .to_rfc3339()) ), ]); tv.output_expert.extend(vec![ @@ -1237,7 +1374,11 @@ pub async fn to_ledger_vector< ), format!( "Timeout timestamp : {}", - transfer.timeout_timestamp_on_b + transfer + .timeout_timestamp_on_b + .into_tm_time() + .map_or("(none)".to_string(), |time| time + .to_rfc3339()) ), ]); } @@ -1258,24 +1399,29 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Bond 0".to_string(); + tv.name = "Bond_0".to_string(); - let bond_source = bond - .source - .as_ref() - .map(Address::to_string) - .unwrap_or_else(|| "(none)".to_string()); + tv.output.push("Type : Bond".to_string()); + if let Some(source) = bond.source.as_ref() { + tv.output.push(format!("Source : {}", source)); + } tv.output.extend(vec![ - format!("Type : Bond"), - format!("Source : {}", bond_source), format!("Validator : {}", bond.validator), - format!("Amount : {}", bond.amount.to_string_native()), + format!( + "Amount : NAM {}", + to_ledger_decimal(&bond.amount.to_string_native()) + ), ]); + if let Some(source) = bond.source.as_ref() { + tv.output_expert.push(format!("Source : {}", source)); + } tv.output_expert.extend(vec![ - format!("Source : {}", bond_source), format!("Validator : {}", bond.validator), - format!("Amount : {}", bond.amount.to_string_native()), + format!( + "Amount : NAM {}", + to_ledger_decimal(&bond.amount.to_string_native()) + ), ]); } else if code_hash == unbond_hash { let unbond = pos::Unbond::try_from_slice( @@ -1286,24 +1432,29 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Unbond 0".to_string(); + tv.name = "Unbond_0".to_string(); - let unbond_source = unbond - .source - .as_ref() - .map(Address::to_string) - .unwrap_or_else(|| "(none)".to_string()); + tv.output.push("Type : Unbond".to_string()); + if let Some(source) = unbond.source.as_ref() { + tv.output.push(format!("Source : {}", source)); + } tv.output.extend(vec![ - format!("Code : Unbond"), - format!("Source : {}", unbond_source), format!("Validator : {}", unbond.validator), - format!("Amount : {}", unbond.amount.to_string_native()), + format!( + "Amount : NAM {}", + to_ledger_decimal(&unbond.amount.to_string_native()) + ), ]); + if let Some(source) = unbond.source.as_ref() { + tv.output_expert.push(format!("Source : {}", source)); + } tv.output_expert.extend(vec![ - format!("Source : {}", unbond_source), format!("Validator : {}", unbond.validator), - format!("Amount : {}", unbond.amount.to_string_native()), + format!( + "Amount : NAM {}", + to_ledger_decimal(&unbond.amount.to_string_native()) + ), ]); } else if code_hash == withdraw_hash { let withdraw = pos::Withdraw::try_from_slice( @@ -1314,23 +1465,20 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Withdraw 0".to_string(); + tv.name = "Withdraw_0".to_string(); - let withdraw_source = withdraw - .source - .as_ref() - .map(Address::to_string) - .unwrap_or_else(|| "(none)".to_string()); - tv.output.extend(vec![ - format!("Type : Withdraw"), - format!("Source : {}", withdraw_source), - format!("Validator : {}", withdraw.validator), - ]); + tv.output.push("Type : Withdraw".to_string()); + if let Some(source) = withdraw.source.as_ref() { + tv.output.push(format!("Source : {}", source)); + } + tv.output + .push(format!("Validator : {}", withdraw.validator)); - tv.output_expert.extend(vec![ - format!("Source : {}", withdraw_source), - format!("Validator : {}", withdraw.validator), - ]); + if let Some(source) = withdraw.source.as_ref() { + tv.output_expert.push(format!("Source : {}", source)); + } + tv.output_expert + .push(format!("Validator : {}", withdraw.validator)); } else if code_hash == change_commission_hash { let commission_change = pos::CommissionChange::try_from_slice( &tx.data() @@ -1340,7 +1488,7 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Change Commission 0".to_string(); + tv.name = "Change_Commission_0".to_string(); tv.output.extend(vec![ format!("Type : Change commission"), @@ -1352,6 +1500,26 @@ pub async fn to_ledger_vector< format!("New rate : {}", commission_change.new_rate), format!("Validator : {}", commission_change.validator), ]); + } else if code_hash == unjail_validator_hash { + let address = Address::try_from_slice( + &tx.data() + .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, + ) + .map_err(|err| { + Error::from(EncodingError::Conversion(err.to_string())) + })?; + + tv.name = "Unjail_Validator_0".to_string(); + + tv.output.extend(vec![ + format!("Type : Unjail Validator"), + format!("Validator : {}", address), + ]); + + tv.output_expert.push(format!("Validator : {}", address)); + } else { + tv.name = "Custom_0".to_string(); + tv.output.push("Type : Custom".to_string()); } if let Some(wrapper) = tx.header.wrapper() { @@ -1370,21 +1538,21 @@ pub async fn to_ledger_vector< .await; tv.output_expert.extend(vec![ format!("Timestamp : {}", tx.header.timestamp.0), - format!("PK : {}", wrapper.pk), + format!("Pubkey : {}", wrapper.pk), format!("Epoch : {}", wrapper.epoch), format!("Gas limit : {}", gas_limit), - format!("Fee token : {}", gas_token), ]); if let Some(token) = tokens.get(&wrapper.fee.token) { tv.output_expert.push(format!( - "Fee amount per gas unit : {} {}", - token, fee_amount_per_gas_unit + "Fees/gas unit : {} {}", + token.to_uppercase(), + to_ledger_decimal(&fee_amount_per_gas_unit), )); } else { - tv.output_expert.push(format!( - "Fee amount per gas unit : {}", - fee_amount_per_gas_unit - )); + tv.output_expert.extend(vec![ + format!("Fee token : {}", gas_token), + format!("Fees/gas unit : {}", fee_amount_per_gas_unit), + ]); } } diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 9d7fe0cfe4..b5489aaddc 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -70,6 +70,8 @@ use crate::{display_line, edisplay_line, vm}; pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; /// Initialize validator transaction WASM path pub const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; +/// Unjail validator transaction WASM path +pub const TX_UNJAIL_VALIDATOR_WASM: &str = "tx_unjail_validator.wasm"; /// Initialize proposal transaction WASM path pub const TX_INIT_PROPOSAL: &str = "tx_init_proposal.wasm"; /// Vote transaction WASM path @@ -738,13 +740,8 @@ pub async fn build_unjail_validator< let validator_state_at_pipeline = rpc::get_validator_state(client, &validator, Some(pipeline_epoch)) - .await? - .ok_or_else(|| { - Error::from(TxError::Other( - "Validator state should be defined.".to_string(), - )) - })?; - if validator_state_at_pipeline != ValidatorState::Jailed { + .await?; + if validator_state_at_pipeline != Some(ValidatorState::Jailed) { edisplay_line!( IO, "The given validator address {} is not jailed at the pipeline \ From 4d0d333d55d9e26f06a287359332756c34e3afdd Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Fri, 22 Sep 2023 17:30:36 +0200 Subject: [PATCH 004/161] Added a changelog record. --- .../1888-ledger-test-vector-generator-fix-0.22.0.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1888-ledger-test-vector-generator-fix-0.22.0.md diff --git a/.changelog/unreleased/improvements/1888-ledger-test-vector-generator-fix-0.22.0.md b/.changelog/unreleased/improvements/1888-ledger-test-vector-generator-fix-0.22.0.md new file mode 100644 index 0000000000..0aeb7196d1 --- /dev/null +++ b/.changelog/unreleased/improvements/1888-ledger-test-vector-generator-fix-0.22.0.md @@ -0,0 +1,2 @@ +- Updated the generation of hardware wallet test vectors to cover current + codebase ([\#1888](https://github.com/anoma/namada/pull/1888)) \ No newline at end of file From 1783a94459b0c9d7ad1654e11a7cafe5280d121d Mon Sep 17 00:00:00 2001 From: yito88 Date: Wed, 27 Sep 2023 14:49:52 +0200 Subject: [PATCH 005/161] transfer and query with trace-path --- apps/src/lib/cli.rs | 14 ++++ apps/src/lib/client/rpc.rs | 91 ++++++++++++++++++++++++-- benches/lib.rs | 1 + core/src/ledger/ibc/context/common.rs | 5 +- core/src/ledger/ibc/mod.rs | 57 +++++++++++----- core/src/ledger/ibc/storage.rs | 35 +++++++--- core/src/types/ibc.rs | 5 ++ core/src/types/storage.rs | 2 +- shared/src/ledger/native_vp/ibc/mod.rs | 4 +- shared/src/sdk/args.rs | 9 ++- shared/src/sdk/signing.rs | 1 + shared/src/sdk/tx.rs | 29 ++++---- tests/src/e2e/ibc_tests.rs | 88 +++++++++++++++++-------- tests/src/vm_host_env/mod.rs | 4 -- 14 files changed, 264 insertions(+), 81 deletions(-) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 135ff1e3c5..13ed0d2e84 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -2516,6 +2516,7 @@ pub mod args { use std::path::PathBuf; use std::str::FromStr; + use namada::ibc::applications::transfer::TracePath; use namada::ibc::core::ics24_host::identifier::{ChannelId, PortId}; pub use namada::sdk::args::*; use namada::types::address::Address; @@ -2725,6 +2726,7 @@ pub mod args { pub const TM_ADDRESS: Arg = arg("tm-address"); pub const TOKEN_OPT: ArgOpt = TOKEN.opt(); pub const TOKEN: Arg = arg("token"); + pub const TRACE_PATH: ArgOpt = arg_opt("trace-path"); pub const TRANSFER_SOURCE: Arg = arg("source"); pub const TRANSFER_TARGET: Arg = arg("target"); pub const TX_HASH: Arg = arg("tx-hash"); @@ -3504,6 +3506,7 @@ pub mod args { source: ctx.get_cached(&self.source), target: ctx.get(&self.target), token: ctx.get(&self.token), + trace_path: self.trace_path, amount: self.amount, native_token: ctx.native_token.clone(), tx_code_path: self.tx_code_path.to_path_buf(), @@ -3517,6 +3520,7 @@ pub mod args { let source = TRANSFER_SOURCE.parse(matches); let target = TRANSFER_TARGET.parse(matches); let token = TOKEN.parse(matches); + let trace_path = TRACE_PATH.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_TRANSFER_WASM); Self { @@ -3524,6 +3528,7 @@ pub mod args { source, target, token, + trace_path, amount, native_token: (), tx_code_path, @@ -3541,6 +3546,7 @@ pub mod args { to produce the signature.", )) .arg(TOKEN.def().help("The transfer token.")) + .arg(TRACE_PATH.def().help("The transfer token's trace path.")) .arg(AMOUNT.def().help("The amount to transfer in decimal.")) } } @@ -3552,6 +3558,7 @@ pub mod args { source: ctx.get(&self.source), receiver: self.receiver, token: ctx.get(&self.token), + trace_path: self.trace_path, amount: self.amount, port_id: self.port_id, channel_id: self.channel_id, @@ -3569,6 +3576,7 @@ pub mod args { let source = SOURCE.parse(matches); let receiver = RECEIVER.parse(matches); let token = TOKEN.parse(matches); + let trace_path = TRACE_PATH.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let port_id = PORT_ID.parse(matches); let channel_id = CHANNEL_ID.parse(matches); @@ -3581,6 +3589,7 @@ pub mod args { source, receiver, token, + trace_path, amount, port_id, channel_id, @@ -3601,6 +3610,7 @@ pub mod args { "The receiver address on the destination chain as string.", )) .arg(TOKEN.def().help("The transfer token.")) + .arg(TRACE_PATH.def().help("The transfer token's trace path.")) .arg(AMOUNT.def().help("The amount to transfer in decimal.")) .arg(PORT_ID.def().help("The port ID.")) .arg(CHANNEL_ID.def().help("The channel ID.")) @@ -4448,6 +4458,7 @@ pub mod args { query: self.query.to_sdk(ctx), owner: self.owner.map(|x| ctx.get_cached(&x)), token: self.token.map(|x| ctx.get(&x)), + trace_path: self.trace_path, no_conversions: self.no_conversions, } } @@ -4458,11 +4469,13 @@ pub mod args { let query = Query::parse(matches); let owner = BALANCE_OWNER.parse(matches); let token = TOKEN_OPT.parse(matches); + let trace_path = TRACE_PATH.parse(matches); let no_conversions = NO_CONVERSIONS.parse(matches); Self { query, owner, token, + trace_path, no_conversions, } } @@ -4479,6 +4492,7 @@ pub mod args { .def() .help("The token's address whose balance to query."), ) + .arg(TRACE_PATH.def().help("The transfer token's trace path.")) .arg( NO_CONVERSIONS.def().help( "Whether not to automatically perform conversions.", diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index d750ccc759..0606efd06e 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -29,6 +29,9 @@ use namada::core::ledger::governance::utils::{ use namada::core::ledger::pgf::parameters::PgfParameters; use namada::core::ledger::pgf::storage::steward::StewardDetail; use namada::ledger::events::Event; +use namada::ledger::ibc::storage::{ + ibc_denom_key, ibc_denom_key_prefix, ibc_token, is_ibc_denom_key, +}; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; use namada::ledger::pos::{CommissionPair, PosParams, Slash}; use namada::ledger::queries::RPC; @@ -44,7 +47,7 @@ use namada::sdk::rpc::{ TxResponse, }; use namada::sdk::wallet::{AddressVpType, Wallet}; -use namada::types::address::{masp, Address}; +use namada::types::address::{masp, Address, InternalAddress}; use namada::types::control_flow::ProceedOrElse; use namada::types::hash::Hash; use namada::types::io::Io; @@ -348,12 +351,24 @@ pub async fn query_transparent_balance< Address::Internal(namada::types::address::InternalAddress::Multitoken) .to_db_key(), ); - let tokens = wallet.tokens_with_aliases(); - match (args.token, args.owner) { + let token = args.token.as_ref().map(|token| { + if let Some(trace_path) = &args.trace_path { + ibc_token(format!("{}/{}", trace_path, token)) + } else { + token.clone() + } + }); + match (token, args.owner) { (Some(token), Some(owner)) => { let balance_key = token::balance_key(&token, &owner.address().unwrap()); - let token_alias = wallet.lookup_alias(&token); + let base_token_alias = + wallet.lookup_alias(&args.token.expect("No token")); + let token_alias = if let Some(trace_path) = args.trace_path { + format!("{}/{}", trace_path, base_token_alias) + } else { + base_token_alias + }; match query_storage_value::(client, &balance_key) .await { @@ -377,6 +392,9 @@ pub async fn query_transparent_balance< } (None, Some(owner)) => { let owner = owner.address().unwrap(); + let tokens = + query_tokens::<_, IO>(client, &wallet, Some(&owner)).await; + println!("DEBUG: tokens {:?}", tokens); for (token_alias, token) in tokens { let balance = get_token_balance(client, &token, &owner).await; if !balance.is_zero() { @@ -416,6 +434,66 @@ pub async fn query_transparent_balance< } } +async fn get_token_alias( + client: &C, + wallet: &Wallet, + token: &Address, + owner: &Address, +) -> String { + if let Address::Internal(InternalAddress::IbcToken(trace_hash)) = token { + let ibc_denom_key = ibc_denom_key(owner, trace_hash); + match query_storage_value::(client, &ibc_denom_key).await { + Ok(ibc_denom) => get_ibc_denom_alias(wallet, ibc_denom), + Err(_) => token.to_string(), + } + } else { + wallet.lookup_alias(token) + } +} + +async fn query_tokens( + client: &C, + wallet: &Wallet, + owner: Option<&Address>, +) -> BTreeMap { + // Base tokens + let mut tokens = wallet.tokens_with_aliases(); + + let prefix = ibc_denom_key_prefix(owner); + let ibc_denoms = + query_storage_prefix::(client, &prefix).await; + if let Some(ibc_denoms) = ibc_denoms { + for (key, ibc_denom) in ibc_denoms { + if let Some((_, hash)) = is_ibc_denom_key(&key) { + let ibc_denom_alias = get_ibc_denom_alias(wallet, ibc_denom); + let ibc_token = + Address::Internal(InternalAddress::IbcToken(hash)); + tokens.insert(ibc_denom_alias, ibc_token); + } + } + } + tokens +} + +fn get_ibc_denom_alias( + wallet: &Wallet, + ibc_denom: impl AsRef, +) -> String { + let (trace_path, base_denom) = ibc_denom + .as_ref() + .rsplit_once('/') + .unwrap_or(("", ibc_denom.as_ref())); + let token_alias = match Address::decode(&base_denom) { + Ok(token) => wallet.lookup_alias(&token), + Err(_) => base_denom.to_string(), + }; + if trace_path.is_empty() { + token_alias + } else { + format!("{}/{}", trace_path, token_alias) + } +} + /// Query the token pinned balance(s) pub async fn query_pinned_balance< C: namada::ledger::queries::Client + Sync, @@ -606,6 +684,7 @@ async fn print_balances( ), None => continue, }; + let token_alias = get_token_alias(client, wallet, &t, &o).await; // Get the token and the balance let (t, s) = match (token, target) { // the given token and the given target are the same as the @@ -628,7 +707,6 @@ async fn print_balances( // the token has been already printed } _ => { - let token_alias = wallet.lookup_alias(&t); display_line!(IO, &mut w; "Token {}", token_alias).unwrap(); print_token = Some(t); } @@ -768,7 +846,8 @@ pub async fn query_shielded_balance< .expect("context should contain viewing key") }; - let token_alias = wallet.lookup_alias(&token); + let token_alias = + get_token_alias(client, wallet, &token, &masp()).await; let total_balance = balance .get(&(epoch, token.clone())) diff --git a/benches/lib.rs b/benches/lib.rs index 47645abdf4..30765d8beb 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -783,6 +783,7 @@ impl BenchShieldedCtx { source: source.clone(), target: target.clone(), token: address::nam(), + trace_path: None, amount: InputAmount::Validated(DenominatedAmount { amount, denom: 0.into(), diff --git a/core/src/ledger/ibc/context/common.rs b/core/src/ledger/ibc/context/common.rs index 5e963e7a5f..b5f0326767 100644 --- a/core/src/ledger/ibc/context/common.rs +++ b/core/src/ledger/ibc/context/common.rs @@ -361,15 +361,16 @@ pub trait IbcCommonContext: IbcStorageContext { /// Write the IBC denom fn store_ibc_denom( &mut self, + receiver: &Address, trace_hash: impl AsRef, denom: impl AsRef, ) -> Result<(), ContextError> { - let key = storage::ibc_denom_key(trace_hash.as_ref()); + let key = storage::ibc_denom_key(receiver, trace_hash.as_ref()); let has_key = self.has_key(&key).map_err(|_| { ContextError::ChannelError(ChannelError::Other { description: format!( "Reading the IBC denom failed: Key {}", - key + key, ), }) })?; diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index fcabcee745..4cc86277f8 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -26,7 +26,9 @@ use crate::ibc::core::ics24_host::identifier::{ChainId as IbcChainId, PortId}; use crate::ibc::core::router::{Module, ModuleId, Router}; use crate::ibc::core::{execute, validate, MsgEnvelope, RouterError}; use crate::ibc_proto::google::protobuf::Any; +use crate::types::address::Address; use crate::types::chain::ChainId; +use crate::types::ibc::{EVENT_TYPE_DENOM_TRACE, EVENT_TYPE_PACKET}; #[allow(missing_docs)] #[derive(Error, Debug)] @@ -135,20 +137,8 @@ where fn store_denom(&mut self, envelope: MsgEnvelope) -> Result<(), Error> { match envelope { MsgEnvelope::Packet(PacketMsg::Recv(_)) => { - let result = self - .ctx - .borrow() - .get_ibc_event("denomination_trace") - .map_err(|_| { - Error::Denom("Reading the IBC event failed".to_string()) - })?; - if let Some((trace_hash, ibc_denom)) = - result.as_ref().and_then(|event| { - event - .attributes - .get("trace_hash") - .zip(event.attributes.get("denom")) - }) + if let Some((trace_hash, ibc_denom, receiver)) = + self.get_minted_token_info()? { // If the denomination trace event has the trace hash and // the IBC denom, a token has been minted. The raw IBC denom @@ -157,7 +147,7 @@ where // denomination is also set for the minting. self.ctx .borrow_mut() - .store_ibc_denom(trace_hash, ibc_denom) + .store_ibc_denom(&receiver, trace_hash, &ibc_denom) .map_err(|e| { Error::Denom(format!( "Writing the IBC denom failed: {}", @@ -182,6 +172,43 @@ where } } + /// Get the minted IBC denom, the trace hash, and the receiver from IBC + /// events + fn get_minted_token_info( + &self, + ) -> Result, Error> { + let receive_event = + self.ctx.borrow().get_ibc_event(EVENT_TYPE_PACKET).map_err( + |_| Error::Denom("Reading the IBC event failed".to_string()), + )?; + let receiver = match receive_event + .as_ref() + .and_then(|event| event.attributes.get("receiver")) + { + Some(receiver) => { + Some(Address::decode(receiver).map_err(|_| { + Error::Denom(format!( + "Decoding the receiver address failed: {:?}", + receive_event + )) + })?) + } + None => None, + }; + let denom_event = self + .ctx + .borrow() + .get_ibc_event(EVENT_TYPE_DENOM_TRACE) + .map_err(|_| { + Error::Denom("Reading the IBC event failed".to_string()) + })?; + Ok(denom_event.as_ref().and_then(|event| { + let trace_hash = event.attributes.get("trace_hash").cloned()?; + let denom = event.attributes.get("denom").cloned()?; + Some((trace_hash, denom, receiver?)) + })) + } + /// Validate according to the message in IBC VP pub fn validate(&self, tx_data: &[u8]) -> Result<(), Error> { let any_msg = Any::decode(tx_data).map_err(Error::DecodingData)?; diff --git a/core/src/ledger/ibc/storage.rs b/core/src/ledger/ibc/storage.rs index 317aa108bf..349c4b6ef3 100644 --- a/core/src/ledger/ibc/storage.rs +++ b/core/src/ledger/ibc/storage.rs @@ -367,10 +367,26 @@ pub fn port_id(key: &Key) -> Result { } } -/// The storage key to get the denom name from the hashed token -pub fn ibc_denom_key(token_hash: impl AsRef) -> Key { - let path = format!("{}/{}", DENOM, token_hash.as_ref()); - ibc_key(path).expect("Creating a key for the denom key shouldn't fail") +/// The storage key prefix to get the denom name with the hashed IBC denom +pub fn ibc_denom_key_prefix(owner: Option<&Address>) -> Key { + let prefix = Key::from(Address::Internal(InternalAddress::Ibc).to_db_key()) + .push(&DENOM.to_string().to_db_key()) + .expect("Cannot obtain a storage key"); + + if let Some(owner) = owner { + prefix + .push(&owner.to_db_key()) + .expect("Cannot obtain a storage key") + } else { + prefix + } +} + +/// The storage key to get the denom name with the hashed IBC denom +pub fn ibc_denom_key(owner: &Address, token_hash: impl AsRef) -> Key { + ibc_denom_key_prefix(Some(owner)) + .push(&token_hash.as_ref().to_string().to_db_key()) + .expect("Cannot obtain a storage key") } /// Hash the denom @@ -392,20 +408,19 @@ pub fn is_ibc_key(key: &Key) -> bool { DbKeySeg::AddressSeg(addr) if *addr == Address::Internal(InternalAddress::Ibc)) } -/// Returns the token hash if the given key is the denom key -pub fn is_ibc_denom_key(key: &Key) -> Option { +/// Returns the owner and the token hash if the given key is the denom key +pub fn is_ibc_denom_key(key: &Key) -> Option<(Address, String)> { match &key.segments[..] { [ DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(Address::Internal(InternalAddress::IbcToken( - hash, - ))), + DbKeySeg::AddressSeg(owner), + DbKeySeg::StringSeg(hash), ] => { if addr == &Address::Internal(InternalAddress::Ibc) && prefix == DENOM { - Some(hash.clone()) + Some((owner.clone(), hash.clone())) } else { None } diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs index 7a412ecb05..32a93218eb 100644 --- a/core/src/types/ibc.rs +++ b/core/src/types/ibc.rs @@ -5,6 +5,11 @@ use std::collections::HashMap; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +/// The event type defined in ibc-rs for receiving a token +pub const EVENT_TYPE_PACKET: &str = "fungible_token_packet"; +/// The event type defined in ibc-rs for IBC denom +pub const EVENT_TYPE_DENOM_TRACE: &str = "denomination_trace"; + /// Wrapped IbcEvent #[derive( Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema, PartialEq, Eq, diff --git a/core/src/types/storage.rs b/core/src/types/storage.rs index ad0c14f499..7dcfd14726 100644 --- a/core/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -26,7 +26,7 @@ use crate::types::keccak::{KeccakHash, TryFromError}; use crate::types::time::DateTimeUtc; /// The maximum size of an IBC key (in bytes) allowed in merkle-ized storage -pub const IBC_KEY_LIMIT: usize = 120; +pub const IBC_KEY_LIMIT: usize = 240; #[allow(missing_docs)] #[derive(Error, Debug, Clone)] diff --git a/shared/src/ledger/native_vp/ibc/mod.rs b/shared/src/ledger/native_vp/ibc/mod.rs index 3b6521905b..c50ee89600 100644 --- a/shared/src/ledger/native_vp/ibc/mod.rs +++ b/shared/src/ledger/native_vp/ibc/mod.rs @@ -169,7 +169,7 @@ where fn validate_denom(&self, keys_changed: &BTreeSet) -> VpResult<()> { for key in keys_changed { - if let Some(hash) = is_ibc_denom_key(key) { + if let Some((_, hash)) = is_ibc_denom_key(key) { match self.ctx.read_post::(key).map_err(|e| { Error::Denom(format!( "Getting the denom failed: Key {}, Error {}", @@ -2191,7 +2191,7 @@ mod tests { packet.chan_id_on_b.clone(), )); let trace_hash = calc_hash(coin.denom.to_string()); - let denom_key = ibc_denom_key(&trace_hash); + let denom_key = ibc_denom_key(&receiver, &trace_hash); let bytes = coin.denom.to_string().try_to_vec().unwrap(); wl_storage .write_log diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index b765dece5a..3c31b17ba7 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -11,6 +11,7 @@ use namada_core::types::time::DateTimeUtc; use serde::{Deserialize, Serialize}; use zeroize::Zeroizing; +use crate::ibc::applications::transfer::TracePath; use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use crate::types::address::Address; use crate::types::keccak::KeccakHash; @@ -135,6 +136,8 @@ pub struct TxTransfer { pub target: C::TransferTarget, /// Transferred token address pub token: C::Address, + /// Transferred token's trace path + pub trace_path: Option, /// Transferred token amount pub amount: InputAmount, /// Native token address @@ -163,8 +166,10 @@ pub struct TxIbcTransfer { pub source: C::Address, /// Transfer target address pub receiver: String, - /// Transferred token addres s + /// Transferred token address pub token: C::Address, + /// Transferred token's trace path + pub trace_path: Option, /// Transferred token amount pub amount: InputAmount, /// Port ID @@ -391,6 +396,8 @@ pub struct QueryBalance { pub owner: Option, /// Address of a token pub token: Option, + /// Transferred token's trace path + pub trace_path: Option, /// Whether not to convert balances pub no_conversions: bool, } diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 042be03a63..9f1d541faf 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -426,6 +426,7 @@ pub async fn wrap_tx< fee_payer_address.clone(), ), token: args.fee_token.clone(), + trace_path: None, amount: args::InputAmount::Validated(DenominatedAmount { // NOTE: must unshield the total fee amount, not the // diff, because the ledger evaluates the transaction in diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 9d7fe0cfe4..5b477de47e 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -23,7 +23,7 @@ use namada_core::ledger::governance::cli::onchain::{ use namada_core::ledger::governance::storage::proposal::ProposalType; use namada_core::ledger::governance::storage::vote::StorageProposalVote; use namada_core::ledger::pgf::cli::steward::Commission; -use namada_core::types::address::{masp, Address, InternalAddress}; +use namada_core::types::address::{masp, Address}; use namada_core::types::dec::Dec; use namada_core::types::hash::Hash; use namada_core::types::token::MaspDenom; @@ -36,12 +36,12 @@ use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc::applications::transfer::packet::PacketData; -use crate::ibc::applications::transfer::PrefixedCoin; +use crate::ibc::applications::transfer::{PrefixedCoin, PrefixedDenom}; use crate::ibc::core::ics04_channel::timeout::TimeoutHeight; use crate::ibc::core::timestamp::Timestamp as IbcTimestamp; use crate::ibc::core::Msg; use crate::ibc::Height as IbcHeight; -use crate::ledger::ibc::storage::ibc_denom_key; +use crate::ledger::ibc::storage::ibc_token; use crate::proto::{MaspBuilder, Tx}; use crate::sdk::args::{self, InputAmount}; use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; @@ -1414,17 +1414,16 @@ pub async fn build_ibc_transfer< .await .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; - let ibc_denom = match &args.token { - Address::Internal(InternalAddress::IbcToken(hash)) => { - let ibc_denom_key = ibc_denom_key(hash); - rpc::query_storage_value::(client, &ibc_denom_key) - .await - .map_err(|_e| TxError::TokenDoesNotExist(args.token.clone()))? - } - _ => args.token.to_string(), + let ibc_denom = PrefixedDenom { + trace_path: args.trace_path.unwrap_or_default(), + base_denom: args + .token + .to_string() + .parse() + .expect("Conversion from the token shouldn't fail"), }; let token = PrefixedCoin { - denom: ibc_denom.parse().expect("Invalid IBC denom"), + denom: ibc_denom, // Set the IBC amount as an integer amount: validated_amount.into(), }; @@ -1667,7 +1666,11 @@ pub async fn build_transfer< ) -> Result<(Tx, Option)> { let source = args.source.effective_address(); let target = args.target.effective_address(); - let token = args.token.clone(); + let token = if let Some(trace_path) = &args.trace_path { + ibc_token(format!("{}/{}", trace_path.clone(), args.token)) + } else { + args.token.clone() + }; // Check that the source address exists on chain source_exists_or_err::<_, IO>(source.clone(), args.tx.force, client) diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index c294367f1c..a539efaae8 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -637,6 +637,7 @@ fn transfer_token( ALBERT, &receiver, NAM, + None, "100000", ALBERT_KEY, port_id_a, @@ -706,6 +707,7 @@ fn try_invalid_transfers( ALBERT, &receiver, NAM, + None, "10.1", ALBERT_KEY, port_id_a, @@ -721,6 +723,7 @@ fn try_invalid_transfers( ALBERT, &receiver, NAM, + None, "10", ALBERT_KEY, &"port".parse().unwrap(), @@ -736,6 +739,7 @@ fn try_invalid_transfers( ALBERT, &receiver, NAM, + None, "10", ALBERT_KEY, port_id_a, @@ -753,10 +757,8 @@ fn transfer_received_token( channel_id: &ChannelId, test: &Test, ) -> Result<()> { - let nam = find_address(test, NAM)?; // token received via the port and channel - let denom = format!("{port_id}/{channel_id}/{nam}"); - let ibc_token = ibc_token(denom).to_string(); + let trace_path = format!("{port_id}/{channel_id}"); let rpc = get_actor_rpc(test, &Who::Validator(0)); let amount = Amount::native_whole(50000).to_string_native(); @@ -767,7 +769,9 @@ fn transfer_received_token( "--target", ALBERT, "--token", - &ibc_token, + NAM, + "--trace-path", + &trace_path, "--amount", &amount, "--gas-token", @@ -791,18 +795,17 @@ fn transfer_back( port_id_b: &PortId, channel_id_b: &ChannelId, ) -> Result<()> { - let token = find_address(test_b, NAM)?.to_string(); let receiver = find_address(test_a, ALBERT)?; // Chain A was the source for the sent token - let denom_raw = format!("{}/{}/{}", port_id_b, channel_id_b, token); - let ibc_token = ibc_token(denom_raw).to_string(); + let trace_path = format!("{}/{}", port_id_b, channel_id_b); // Send a token from Chain B let height = transfer( test_b, BERTHA, &receiver, - ibc_token, + NAM, + Some(&trace_path), "50000", BERTHA_KEY, port_id_b, @@ -866,6 +869,7 @@ fn transfer_timeout( ALBERT, &receiver, NAM, + None, "100000", ALBERT_KEY, port_id_a, @@ -995,6 +999,7 @@ fn transfer( sender: impl AsRef, receiver: &Address, token: impl AsRef, + trace_path: Option<&str>, amount: impl AsRef, signer: impl AsRef, port_id: &PortId, @@ -1028,6 +1033,11 @@ fn transfer( &rpc, ]; + if let Some(trace_path) = trace_path { + tx_args.push("--trace-path"); + tx_args.push(&trace_path.clone()); + } + let timeout = timeout_sec.unwrap_or_default().as_secs().to_string(); if timeout_sec.is_some() { tx_args.push("--timeout-sec-offset"); @@ -1211,8 +1221,6 @@ fn check_balances( test_a: &Test, test_b: &Test, ) -> Result<()> { - let token = find_address(test_a, NAM)?; - // Check the balances on Chain A let rpc_a = get_actor_rpc(test_a, &Who::Validator(0)); let query_args = vec!["balance", "--token", NAM, "--node", &rpc_a]; @@ -1229,13 +1237,20 @@ fn check_balances( client.assert_success(); // Check the balance on Chain B - let denom = format!("{}/{}/{}", &dest_port_id, &dest_channel_id, &token,); - let ibc_token = ibc_token(denom).to_string(); + let trace_path = format!("{}/{}", &dest_port_id, &dest_channel_id); let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); let query_args = vec![ - "balance", "--owner", BERTHA, "--token", &ibc_token, "--node", &rpc_b, + "balance", + "--owner", + BERTHA, + "--token", + NAM, + "--trace-path", + &trace_path, + "--node", + &rpc_b, ]; - let expected = format!("{}: 100000", ibc_token); + let expected = format!("{}: 100000", format!("{}/nam", trace_path)); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); @@ -1249,25 +1264,39 @@ fn check_balances_after_non_ibc( test: &Test, ) -> Result<()> { // Check the balance on Chain B - let token = find_address(test, NAM)?; - let denom = format!("{}/{}/{}", port_id, channel_id, token); - let ibc_token = ibc_token(denom).to_string(); + let trace_path = format!("{}/{}", port_id, channel_id); // Check the source let rpc = get_actor_rpc(test, &Who::Validator(0)); let query_args = vec![ - "balance", "--owner", BERTHA, "--token", &ibc_token, "--node", &rpc, + "balance", + "--owner", + BERTHA, + "--token", + NAM, + "--trace-path", + &trace_path, + "--node", + &rpc, ]; - let expected = format!("{}: 50000", ibc_token); + let expected = format!("{}: 50000", format!("{}/nam", trace_path)); let mut client = run!(test, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); // Check the traget let query_args = vec![ - "balance", "--owner", ALBERT, "--token", &ibc_token, "--node", &rpc, + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--trace-path", + &trace_path, + "--node", + &rpc, ]; - let expected = format!("{}: 50000", ibc_token); + let expected = format!("{}: 50000", format!("{}/nam", trace_path)); let mut client = run!(test, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); @@ -1282,8 +1311,6 @@ fn check_balances_after_back( test_a: &Test, test_b: &Test, ) -> Result<()> { - let token = find_address(test_b, NAM)?; - // Check the balances on Chain A let rpc_a = get_actor_rpc(test_a, &Who::Validator(0)); let query_args = vec!["balance", "--token", NAM, "--node", &rpc_a]; @@ -1300,13 +1327,20 @@ fn check_balances_after_back( client.assert_success(); // Check the balance on Chain B - let denom = format!("{}/{}/{}", dest_port_id, dest_channel_id, &token,); - let ibc_token = ibc_token(denom).to_string(); + let trace_path = format!("{}/{}", dest_port_id, dest_channel_id); let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); let query_args = vec![ - "balance", "--owner", BERTHA, "--token", &ibc_token, "--node", &rpc_b, + "balance", + "--owner", + BERTHA, + "--token", + NAM, + "--trace-path", + &trace_path, + "--node", + &rpc_b, ]; - let expected = format!("{}: 0", ibc_token); + let expected = format!("{}: 0", format!("{}/nam", trace_path)); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index 68ebd76dff..b22e9ff6b2 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -1260,10 +1260,6 @@ mod tests { .try_to_vec() .unwrap(), ); - // original denom - let hash = ibc_storage::calc_hash(&denom); - let denom_key = ibc_storage::ibc_denom_key(hash); - writes.insert(denom_key, denom.try_to_vec().unwrap()); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { env.wl_storage From 4f45b8139ffff29704a9a5ed102b074a168e5cfa Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 28 Aug 2023 16:43:37 +0200 Subject: [PATCH 006/161] Inner tx signer also signs tx header --- core/src/proto/types.rs | 9 ++++++++- shared/src/vm/host_env.rs | 2 +- vp_prelude/src/lib.rs | 9 ++++++++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index a6082fbbab..5b03c6c7ef 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -1764,8 +1764,15 @@ impl Tx { account_public_keys_map: AccountPublicKeysMap, signer: Option
, ) -> &mut Self { + // The inner tx signer signs the Raw version of the Header + let mut header = self.header(); + header.tx_type = TxType::Raw; + + let mut hashes = vec![Section::Header(header).get_hash()]; self.protocol_filter(); - let hashes = self.inner_section_targets(); + let sections_hashes = self.inner_section_targets(); + hashes.extend(sections_hashes); + self.add_section(Section::Signature(Signature::new( hashes, account_public_keys_map.index_secret_keys(keypairs), diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index 7806d1abef..3e3b78e31b 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -1809,7 +1809,7 @@ where let gas_meter = unsafe { env.ctx.gas_meter.get() }; vp_host_fns::add_gas(gas_meter, gas)?; - let hashes = <[Hash; 2]>::try_from_slice(&hash_list) + let hashes = <[Hash; 3]>::try_from_slice(&hash_list) .map_err(vp_host_fns::RuntimeError::EncodingError)?; let (public_keys_map, gas) = env diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index 0962628363..162d26b9dd 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -30,6 +30,7 @@ use namada_core::types::internal::HostEnvResult; use namada_core::types::storage::{ BlockHash, BlockHeight, Epoch, Header, TxIndex, BLOCK_HASH_LENGTH, }; +use namada_core::types::transaction::TxType; pub use namada_core::types::*; pub use namada_macros::validity_predicate; pub use namada_proof_of_stake::storage as proof_of_stake; @@ -88,7 +89,13 @@ pub fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { let threshold = storage_api::account::threshold(&ctx.pre(), owner)?.unwrap_or(1); - let targets = [*tx.data_sechash(), *tx.code_sechash()]; + let mut header = tx.header(); + header.tx_type = TxType::Raw; + let targets = [ + Section::Header(header).get_hash(), + *tx.data_sechash(), + *tx.code_sechash(), + ]; // Serialize parameters let max_signatures = max_signatures_per_transaction.try_to_vec().unwrap(); From ee31f471d6b9da98d61f33bb6aebebf6f0f0b04c Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 28 Aug 2023 18:44:32 +0200 Subject: [PATCH 007/161] Adds `raw_header_hash` method for `Tx` --- .../lib/node/ledger/shell/finalize_block.rs | 159 +++++++----------- apps/src/lib/node/ledger/shell/mod.rs | 23 +-- .../lib/node/ledger/shell/prepare_proposal.rs | 3 +- .../lib/node/ledger/shell/process_proposal.rs | 44 ++--- core/src/proto/types.rs | 13 +- shared/src/ledger/protocol/mod.rs | 4 +- vp_prelude/src/lib.rs | 9 +- 7 files changed, 103 insertions(+), 152 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..14c24addce 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -213,9 +213,7 @@ where .pop() .expect("Missing wrapper tx in queue") .tx - .clone() - .update_header(TxType::Raw) - .header_hash(); + .raw_header_hash(); let tx_hash_key = replay_protection::get_replay_protection_key(&tx_hash); self.wl_storage @@ -276,7 +274,7 @@ where continue; } - let (mut tx_event, tx_unsigned_hash, mut tx_gas_meter, wrapper) = + let (mut tx_event, tx_header_hash, mut tx_gas_meter, wrapper) = match &tx_header.tx_type { TxType::Wrapper(wrapper) => { stats.increment_wrapper_txs(); @@ -286,7 +284,7 @@ where } TxType::Decrypted(inner) => { // We remove the corresponding wrapper tx from the queue - let mut tx_in_queue = self + let tx_in_queue = self .wl_storage .storage .tx_queue @@ -323,12 +321,7 @@ where ( event, - Some( - tx_in_queue - .tx - .update_header(TxType::Raw) - .header_hash(), - ), + Some(tx_in_queue.tx.raw_header_hash()), TxGasMeter::new_from_sub_limit(tx_in_queue.gas), None, ) @@ -511,7 +504,7 @@ where // If transaction type is Decrypted and failed because of // out of gas, remove its hash from storage to allow // rewrapping it - if let Some(hash) = tx_unsigned_hash { + if let Some(hash) = tx_header_hash { if let Error::TxApply(protocol::Error::GasError(_)) = msg { @@ -2081,11 +2074,9 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap()); // FINALIZE BLOCK 2. Tell Namada that val1 is the block proposer. // Include votes that correspond to block 1. Make val2 the next block's @@ -2095,11 +2086,9 @@ mod test_finalize_block { assert!(rewards_prod_2.is_empty(&shell.wl_storage).unwrap()); assert!(rewards_prod_3.is_empty(&shell.wl_storage).unwrap()); assert!(rewards_prod_4.is_empty(&shell.wl_storage).unwrap()); - assert!( - !rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(!rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap()); // Val1 was the proposer, so its reward should be larger than all // others, which should themselves all be equal let acc_sum = get_rewards_sum(&shell.wl_storage); @@ -2213,11 +2202,9 @@ mod test_finalize_block { None, ); } - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap()); let rp1 = rewards_prod_1 .get(&shell.wl_storage, &Epoch::default()) .unwrap() @@ -2307,26 +2294,22 @@ mod test_finalize_block { assert!(shell.shell.wl_storage.has_key(&wrapper_hash_key).unwrap()); assert!(shell.shell.wl_storage.has_key(&decrypted_hash_key).unwrap()); // Check that non of the hashes is present in the merkle tree - assert!( - !shell - .shell - .wl_storage - .storage - .block - .tree - .has_key(&wrapper_hash_key) - .unwrap() - ); - assert!( - !shell - .shell - .wl_storage - .storage - .block - .tree - .has_key(&decrypted_hash_key) - .unwrap() - ); + assert!(!shell + .shell + .wl_storage + .storage + .block + .tree + .has_key(&wrapper_hash_key) + .unwrap()); + assert!(!shell + .shell + .wl_storage + .storage + .block + .tree + .has_key(&decrypted_hash_key) + .unwrap()); } /// Test that if a decrypted transaction fails because of out-of-gas, its @@ -2362,7 +2345,7 @@ mod test_finalize_block { // Write inner hash in storage let inner_hash_key = replay_protection::get_replay_protection_key( - &wrapper_tx.clone().update_header(TxType::Raw).header_hash(), + &wrapper_tx.raw_header_hash(), ); shell .wl_storage @@ -2397,12 +2380,10 @@ mod test_finalize_block { let code = event.attributes.get("code").expect("Testfailed").as_str(); assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); - assert!( - !shell - .wl_storage - .has_key(&inner_hash_key) - .expect("Test failed") - ) + assert!(!shell + .wl_storage + .has_key(&inner_hash_key) + .expect("Test failed")) } #[test] @@ -2439,7 +2420,7 @@ mod test_finalize_block { &wrapper.header_hash(), ); let inner_hash_key = replay_protection::get_replay_protection_key( - &wrapper.clone().update_header(TxType::Raw).header_hash(), + &wrapper.raw_header_hash(), ); let processed_tx = ProcessedTx { @@ -2463,18 +2444,14 @@ mod test_finalize_block { let code = event.attributes.get("code").expect("Testfailed").as_str(); assert_eq!(code, String::from(ErrorCodes::InvalidTx).as_str()); - assert!( - shell - .wl_storage - .has_key(&wrapper_hash_key) - .expect("Test failed") - ); - assert!( - !shell - .wl_storage - .has_key(&inner_hash_key) - .expect("Test failed") - ) + assert!(shell + .wl_storage + .has_key(&wrapper_hash_key) + .expect("Test failed")); + assert!(!shell + .wl_storage + .has_key(&inner_hash_key) + .expect("Test failed")) } // Test that if the fee payer doesn't have enough funds for fee payment the @@ -2761,11 +2738,9 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Consensus) ); - assert!( - enqueued_slashes_handle() - .at(&Epoch::default()) - .is_empty(&shell.wl_storage)? - ); + assert!(enqueued_slashes_handle() + .at(&Epoch::default()) + .is_empty(&shell.wl_storage)?); assert_eq!( get_num_consensus_validators(&shell.wl_storage, Epoch::default()) .unwrap(), @@ -2784,21 +2759,17 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Jailed) ); - assert!( - enqueued_slashes_handle() - .at(&epoch) - .is_empty(&shell.wl_storage)? - ); + assert!(enqueued_slashes_handle() + .at(&epoch) + .is_empty(&shell.wl_storage)?); assert_eq!( get_num_consensus_validators(&shell.wl_storage, epoch).unwrap(), 5_u64 ); } - assert!( - !enqueued_slashes_handle() - .at(&processing_epoch) - .is_empty(&shell.wl_storage)? - ); + assert!(!enqueued_slashes_handle() + .at(&processing_epoch) + .is_empty(&shell.wl_storage)?); // Advance to the processing epoch loop { @@ -2821,11 +2792,9 @@ mod test_finalize_block { // println!("Reached processing epoch"); break; } else { - assert!( - enqueued_slashes_handle() - .at(&shell.wl_storage.storage.block.epoch) - .is_empty(&shell.wl_storage)? - ); + assert!(enqueued_slashes_handle() + .at(&shell.wl_storage.storage.block.epoch) + .is_empty(&shell.wl_storage)?); let stake1 = read_validator_stake( &shell.wl_storage, ¶ms, @@ -3371,15 +3340,13 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); - assert!( - namada_proof_of_stake::is_validator_frozen( - &shell.wl_storage, - &val1.address, - current_epoch, - ¶ms - ) - .unwrap() - ); + assert!(namada_proof_of_stake::is_validator_frozen( + &shell.wl_storage, + &val1.address, + current_epoch, + ¶ms + ) + .unwrap()); assert!( namada_proof_of_stake::validator_slashes_handle(&val1.address) .is_empty(&shell.wl_storage) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index a1c17fe450..bd58e06af8 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -932,8 +932,7 @@ where tx_bytes: &[u8], temp_wl_storage: &mut TempWlStorage, ) -> Result<()> { - let inner_tx_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); + let inner_tx_hash = wrapper.raw_header_hash(); let inner_hash_key = replay_protection::get_replay_protection_key(&inner_tx_hash); if temp_wl_storage @@ -1089,22 +1088,19 @@ where } }; - let tx_chain_id = tx.header.chain_id.clone(); - let tx_expiration = tx.header.expiration; - // Tx chain id - if tx_chain_id != self.chain_id { + if tx.header.chain_id != self.chain_id { response.code = ErrorCodes::InvalidChainId.into(); response.log = format!( "{INVALID_MSG}: Tx carries a wrong chain id: expected {}, \ found {}", - self.chain_id, tx_chain_id + self.chain_id, tx.header.chain_id ); return response; } // Tx expiration - if let Some(exp) = tx_expiration { + if let Some(exp) = tx.header.expiration { let last_block_timestamp = self.get_block_timestamp(None); if last_block_timestamp > exp { @@ -1263,11 +1259,11 @@ where } // Replay protection check - let mut inner_tx = tx; - inner_tx.update_header(TxType::Raw); - let inner_tx_hash = &inner_tx.header_hash(); + let inner_tx_hash = tx.raw_header_hash(); let inner_hash_key = - replay_protection::get_replay_protection_key(inner_tx_hash); + replay_protection::get_replay_protection_key( + &inner_tx_hash, + ); if self .wl_storage .storage @@ -2502,8 +2498,7 @@ mod tests { ) ); - let inner_tx_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); + let inner_tx_hash = wrapper.raw_header_hash(); // Write inner hash in storage let inner_hash_key = replay_protection::get_replay_protection_key(&inner_tx_hash); diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 3687a6d39b..10f9cbc6c1 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -1279,8 +1279,7 @@ mod test_prepare_proposal { [(0, keypair)].into_iter().collect(), None, ))); - let inner_unsigned_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); + let inner_unsigned_hash = wrapper.raw_header_hash(); // Write inner hash to storage let hash_key = diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index ab544de3f8..281bd04399 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -721,14 +721,7 @@ where metadata.has_decrypted_txs = true; match tx_queue_iter.next() { Some(wrapper) => { - let mut inner_tx = tx.clone(); - inner_tx.update_header(TxType::Raw); - if wrapper - .tx - .clone() - .update_header(TxType::Raw) - .header_hash() - != inner_tx.header_hash() + if wrapper.tx.raw_header_hash() != tx.raw_header_hash() { TxResult { code: ErrorCodes::InvalidOrder.into(), @@ -742,7 +735,9 @@ where wrapper.tx.clone(), privkey, ) { - // Tx chain id + // FIXME: remove these first 2 checks (also from + // prepare proposal if they are there and finalize + // block?) Tx chain id if wrapper.tx.header.chain_id != self.chain_id { return TxResult { code: ErrorCodes::InvalidDecryptedChainId @@ -1124,13 +1119,11 @@ mod test_process_proposal { shell.chain_id.clone(), ) .to_bytes(); - assert!( - shell - .process_proposal(ProcessProposal { - txs: vec![tx.clone(), tx] - }) - .is_err() - ); + assert!(shell + .process_proposal(ProcessProposal { + txs: vec![tx.clone(), tx] + }) + .is_err()); } #[cfg(feature = "abcipp")] @@ -1287,11 +1280,9 @@ mod test_process_proposal { sig, } .sign(shell.mode.get_protocol_key().expect("Test failed")); - let mut txs = vec![ - EthereumTxData::BridgePool(vote_ext.into()) - .sign(protocol_key, shell.chain_id.clone()) - .to_bytes(), - ]; + let mut txs = vec![EthereumTxData::BridgePool(vote_ext.into()) + .sign(protocol_key, shell.chain_id.clone()) + .to_bytes()]; let event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), @@ -2226,10 +2217,7 @@ mod test_process_proposal { format!( "Transaction replay attempt: Inner transaction hash \ {} already in storage", - wrapper - .clone() - .update_header(TxType::Raw) - .header_hash(), + wrapper.raw_header_hash() ) ); } @@ -2263,10 +2251,9 @@ mod test_process_proposal { [(0, keypair)].into_iter().collect(), None, ))); - let inner_unsigned_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash to storage + let inner_unsigned_hash = wrapper.raw_header_hash(); let hash_key = replay_protection::get_replay_protection_key(&inner_unsigned_hash); shell @@ -2327,8 +2314,7 @@ mod test_process_proposal { [(0, keypair)].into_iter().collect(), None, ))); - let inner_unsigned_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); + let inner_unsigned_hash = wrapper.raw_header_hash(); new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 5b03c6c7ef..b5935e504b 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -1250,6 +1250,14 @@ impl Tx { Section::Header(self.header.clone()).get_hash() } + /// Gets the hash of the raw transaction's header + pub fn raw_header_hash(&self) -> crate::types::hash::Hash { + let mut raw_header = self.header(); + raw_header.tx_type = TxType::Raw; + + Section::Header(raw_header).get_hash() + } + /// Get hashes of all the sections in this transaction pub fn sechashes(&self) -> Vec { let mut hashes = vec![self.header_hash()]; @@ -1765,10 +1773,7 @@ impl Tx { signer: Option
, ) -> &mut Self { // The inner tx signer signs the Raw version of the Header - let mut header = self.header(); - header.tx_type = TxType::Raw; - - let mut hashes = vec![Section::Header(header).get_hash()]; + let mut hashes = vec![self.raw_header_hash()]; self.protocol_filter(); let sections_hashes = self.inner_section_targets(); hashes.extend(sections_hashes); diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index a23b026eea..2f2af49cd7 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -231,7 +231,7 @@ where WLS: WriteLogAndStorage, { let mut changed_keys = BTreeSet::default(); - let mut tx: Tx = tx_bytes.try_into().unwrap(); + let tx: Tx = tx_bytes.try_into().unwrap(); // Writes wrapper tx hash to block write log (changes must be persisted even // in case of failure) @@ -258,7 +258,7 @@ where // If wrapper was succesful, write inner tx hash to storage let inner_hash_key = replay_protection::get_replay_protection_key( - &hash::Hash(tx.update_header(TxType::Raw).header_hash().0), + &hash::Hash(tx.raw_header_hash().0), ); shell_params .wl_storage diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index 162d26b9dd..c42a29864a 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -89,13 +89,12 @@ pub fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { let threshold = storage_api::account::threshold(&ctx.pre(), owner)?.unwrap_or(1); + // FIXME: add a test to check the invalid signature in vp of the tx header + // hash FIXME: tryo a replay attack on a local devnet let mut header = tx.header(); header.tx_type = TxType::Raw; - let targets = [ - Section::Header(header).get_hash(), - *tx.data_sechash(), - *tx.code_sechash(), - ]; + let targets = + [tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()]; // Serialize parameters let max_signatures = max_signatures_per_transaction.try_to_vec().unwrap(); From 8c7bdf05f45981fb6fa39791d437e05fd36e04f3 Mon Sep 17 00:00:00 2001 From: yito88 Date: Thu, 28 Sep 2023 00:12:56 +0200 Subject: [PATCH 008/161] query balances for IbcToken --- apps/src/lib/cli.rs | 4 - apps/src/lib/client/rpc.rs | 487 ++++++++++++++----------- core/src/ledger/ibc/context/common.rs | 6 +- core/src/ledger/ibc/mod.rs | 21 +- core/src/ledger/ibc/storage.rs | 24 +- core/src/types/ibc.rs | 15 + shared/src/ledger/native_vp/ibc/mod.rs | 9 +- shared/src/sdk/args.rs | 2 - tests/src/e2e/ibc_tests.rs | 48 +-- 9 files changed, 334 insertions(+), 282 deletions(-) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 13ed0d2e84..79db0b3824 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -4458,7 +4458,6 @@ pub mod args { query: self.query.to_sdk(ctx), owner: self.owner.map(|x| ctx.get_cached(&x)), token: self.token.map(|x| ctx.get(&x)), - trace_path: self.trace_path, no_conversions: self.no_conversions, } } @@ -4469,13 +4468,11 @@ pub mod args { let query = Query::parse(matches); let owner = BALANCE_OWNER.parse(matches); let token = TOKEN_OPT.parse(matches); - let trace_path = TRACE_PATH.parse(matches); let no_conversions = NO_CONVERSIONS.parse(matches); Self { query, owner, token, - trace_path, no_conversions, } } @@ -4492,7 +4489,6 @@ pub mod args { .def() .help("The token's address whose balance to query."), ) - .arg(TRACE_PATH.def().help("The transfer token's trace path.")) .arg( NO_CONVERSIONS.def().help( "Whether not to automatically perform conversions.", diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 0606efd06e..27073fe2d1 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -30,7 +30,7 @@ use namada::core::ledger::pgf::parameters::PgfParameters; use namada::core::ledger::pgf::storage::steward::StewardDetail; use namada::ledger::events::Event; use namada::ledger::ibc::storage::{ - ibc_denom_key, ibc_denom_key_prefix, ibc_token, is_ibc_denom_key, + ibc_denom_key, ibc_denom_key_prefix, is_ibc_denom_key, }; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; use namada::ledger::pos::{CommissionPair, PosParams, Slash}; @@ -50,6 +50,7 @@ use namada::sdk::wallet::{AddressVpType, Wallet}; use namada::types::address::{masp, Address, InternalAddress}; use namada::types::control_flow::ProceedOrElse; use namada::types::hash::Hash; +use namada::types::ibc::split_ibc_denom; use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; @@ -217,7 +218,8 @@ pub async fn query_transfers< for (account, MaspChange { ref asset, change }) in tfer_delta { if account != masp() { display!(IO, " {}:", account); - let token_alias = wallet.lookup_alias(asset); + let token_alias = + lookup_token_alias(client, wallet, asset, &account).await; let sign = match change.cmp(&Change::zero()) { Ordering::Greater => "+", Ordering::Less => "-", @@ -245,7 +247,13 @@ pub async fn query_transfers< if fvk_map.contains_key(&account) { display!(IO, " {}:", fvk_map[&account]); for (token_addr, val) in masp_change { - let token_alias = wallet.lookup_alias(&token_addr); + let token_alias = lookup_token_alias( + client, + wallet, + &token_addr, + &masp(), + ) + .await; let sign = match val.cmp(&Change::zero()) { Ordering::Greater => "+", Ordering::Less => "-", @@ -351,50 +359,47 @@ pub async fn query_transparent_balance< Address::Internal(namada::types::address::InternalAddress::Multitoken) .to_db_key(), ); - let token = args.token.as_ref().map(|token| { - if let Some(trace_path) = &args.trace_path { - ibc_token(format!("{}/{}", trace_path, token)) - } else { - token.clone() - } - }); - match (token, args.owner) { - (Some(token), Some(owner)) => { - let balance_key = - token::balance_key(&token, &owner.address().unwrap()); - let base_token_alias = - wallet.lookup_alias(&args.token.expect("No token")); - let token_alias = if let Some(trace_path) = args.trace_path { - format!("{}/{}", trace_path, base_token_alias) - } else { - base_token_alias - }; - match query_storage_value::(client, &balance_key) + match (args.token, args.owner) { + (Some(base_token), Some(owner)) => { + let owner = owner.address().unwrap(); + let tokens = query_tokens::<_, IO>( + client, + wallet, + Some(&base_token), + Some(&owner), + ) + .await; + for (token_alias, token) in tokens { + let balance_key = token::balance_key(&token, &owner); + match query_storage_value::( + client, + &balance_key, + ) .await - { - Ok(balance) => { - let balance = format_denominated_amount::<_, IO>( - client, &token, balance, - ) - .await; - display_line!(IO, "{}: {}", token_alias, balance); - } - Err(e) => { - display_line!(IO, "Eror in querying: {e}"); - display_line!( - IO, - "No {} balance found for {}", - token_alias, - owner - ) + { + Ok(balance) => { + let balance = format_denominated_amount::<_, IO>( + client, &token, balance, + ) + .await; + display_line!(IO, "{}: {}", token_alias, balance); + } + Err(e) => { + display_line!(IO, "Eror in querying: {e}"); + display_line!( + IO, + "No {} balance found for {}", + token_alias, + owner + ) + } } } } (None, Some(owner)) => { let owner = owner.address().unwrap(); let tokens = - query_tokens::<_, IO>(client, &wallet, Some(&owner)).await; - println!("DEBUG: tokens {:?}", tokens); + query_tokens::<_, IO>(client, wallet, None, Some(&owner)).await; for (token_alias, token) in tokens { let balance = get_token_balance(client, &token, &owner).await; if !balance.is_zero() { @@ -406,20 +411,26 @@ pub async fn query_transparent_balance< } } } - (Some(token), None) => { - let prefix = token::balance_prefix(&token); - let balances = - query_storage_prefix::(client, &prefix) + (Some(base_token), None) => { + let tokens = + query_tokens::<_, IO>(client, wallet, Some(&base_token), None) .await; - if let Some(balances) = balances { - print_balances::<_, IO>( - client, - wallet, - balances, - Some(&token), - None, + for (_, token) in tokens { + let prefix = token::balance_prefix(&token); + let balances = query_storage_prefix::( + client, &prefix, ) .await; + if let Some(balances) = balances { + print_balances::<_, IO>( + client, + wallet, + balances, + Some(&token), + None, + ) + .await; + } } } (None, None) => { @@ -434,14 +445,14 @@ pub async fn query_transparent_balance< } } -async fn get_token_alias( +async fn lookup_token_alias( client: &C, wallet: &Wallet, token: &Address, owner: &Address, ) -> String { if let Address::Internal(InternalAddress::IbcToken(trace_hash)) = token { - let ibc_denom_key = ibc_denom_key(owner, trace_hash); + let ibc_denom_key = ibc_denom_key(owner.to_string(), trace_hash); match query_storage_value::(client, &ibc_denom_key).await { Ok(ibc_denom) => get_ibc_denom_alias(wallet, ibc_denom), Err(_) => token.to_string(), @@ -451,24 +462,51 @@ async fn get_token_alias( } } +/// Returns pairs of token alias and token address async fn query_tokens( client: &C, wallet: &Wallet, + base_token: Option<&Address>, owner: Option<&Address>, ) -> BTreeMap { // Base tokens - let mut tokens = wallet.tokens_with_aliases(); - - let prefix = ibc_denom_key_prefix(owner); - let ibc_denoms = - query_storage_prefix::(client, &prefix).await; - if let Some(ibc_denoms) = ibc_denoms { - for (key, ibc_denom) in ibc_denoms { - if let Some((_, hash)) = is_ibc_denom_key(&key) { - let ibc_denom_alias = get_ibc_denom_alias(wallet, ibc_denom); - let ibc_token = - Address::Internal(InternalAddress::IbcToken(hash)); - tokens.insert(ibc_denom_alias, ibc_token); + let mut tokens = match base_token { + Some(base_token) => { + let mut map = BTreeMap::new(); + map.insert(wallet.lookup_alias(base_token), base_token.clone()); + map + } + None => wallet.tokens_with_aliases(), + }; + + let prefixes = match (base_token, owner) { + (Some(base_token), Some(owner)) => vec![ + ibc_denom_key_prefix(Some(base_token.to_string())), + ibc_denom_key_prefix(Some(owner.to_string())), + ], + (Some(base_token), None) => { + vec![ibc_denom_key_prefix(Some(base_token.to_string()))] + } + (None, Some(_)) => { + // Check all IBC denoms because the owner might not know IBC token + // transfers in the same chain + vec![ibc_denom_key_prefix(None)] + } + (None, None) => vec![ibc_denom_key_prefix(None)], + }; + + for prefix in prefixes { + let ibc_denoms = + query_storage_prefix::(client, &prefix).await; + if let Some(ibc_denoms) = ibc_denoms { + for (key, ibc_denom) in ibc_denoms { + if let Some((_, hash)) = is_ibc_denom_key(&key) { + let ibc_denom_alias = + get_ibc_denom_alias(wallet, ibc_denom); + let ibc_token = + Address::Internal(InternalAddress::IbcToken(hash)); + tokens.insert(ibc_denom_alias, ibc_token); + } } } } @@ -479,19 +517,19 @@ fn get_ibc_denom_alias( wallet: &Wallet, ibc_denom: impl AsRef, ) -> String { - let (trace_path, base_denom) = ibc_denom - .as_ref() - .rsplit_once('/') - .unwrap_or(("", ibc_denom.as_ref())); - let token_alias = match Address::decode(&base_denom) { - Ok(token) => wallet.lookup_alias(&token), - Err(_) => base_denom.to_string(), - }; - if trace_path.is_empty() { - token_alias - } else { - format!("{}/{}", trace_path, token_alias) - } + split_ibc_denom(&ibc_denom) + .map(|(trace_path, base_token)| { + let base_token_alias = match Address::decode(&base_token) { + Ok(base_token) => wallet.lookup_alias(&base_token), + Err(_) => base_token, + }; + if trace_path.is_empty() { + base_token_alias + } else { + format!("{}/{}", trace_path, base_token_alias) + } + }) + .unwrap_or(ibc_denom.as_ref().to_string()) } /// Query the token pinned balance(s) @@ -505,8 +543,6 @@ pub async fn query_pinned_balance< shielded: &mut ShieldedContext, args: args::QueryBalance, ) { - // Map addresses to token names - let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); let owners = if let Some(pa) = args.owner.and_then(|x| x.payment_address()) { vec![pa] @@ -578,39 +614,46 @@ pub async fn query_pinned_balance< (Err(other), _) => { display_line!(IO, "Error in Querying Pinned balance {}", other) } - (Ok((balance, epoch)), Some(token)) => { - let token_alias = wallet.lookup_alias(token); - - let total_balance = balance - .get(&(epoch, token.clone())) - .cloned() - .unwrap_or_default(); + (Ok((balance, epoch)), Some(base_token)) => { + let tokens = query_tokens::<_, IO>( + client, + wallet, + Some(base_token), + None, + ) + .await; + for (token_alias, token) in &tokens { + let total_balance = balance + .get(&(epoch, token.clone())) + .cloned() + .unwrap_or_default(); - if total_balance.is_zero() { - display_line!( - IO, - "Payment address {} was consumed during epoch {}. \ - Received no shielded {}", - owner, - epoch, - token_alias - ); - } else { - let formatted = format_denominated_amount::<_, IO>( - client, - token, - total_balance.into(), - ) - .await; - display_line!( - IO, - "Payment address {} was consumed during epoch {}. \ - Received {} {}", - owner, - epoch, - formatted, - token_alias, - ); + if total_balance.is_zero() { + display_line!( + IO, + "Payment address {} was consumed during epoch {}. \ + Received no shielded {}", + owner, + epoch, + token_alias + ); + } else { + let formatted = format_denominated_amount::<_, IO>( + client, + token, + total_balance.into(), + ) + .await; + display_line!( + IO, + "Payment address {} was consumed during epoch {}. \ + Received {} {}", + owner, + epoch, + formatted, + token_alias, + ); + } } } (Ok((balance, epoch)), None) => { @@ -636,10 +679,9 @@ pub async fn query_pinned_balance< (*value).into(), ) .await; - let token_alias = tokens - .get(token_addr) - .map(|a| a.to_string()) - .unwrap_or_else(|| token_addr.to_string()); + let token_alias = + lookup_token_alias(client, wallet, token_addr, &masp()) + .await; display_line!(IO, " {}: {}", token_alias, formatted,); } if !found_any { @@ -684,7 +726,7 @@ async fn print_balances( ), None => continue, }; - let token_alias = get_token_alias(client, wallet, &t, &o).await; + let token_alias = lookup_token_alias(client, wallet, &t, &o).await; // Get the token and the balance let (t, s) = match (token, target) { // the given token and the given target are the same as the @@ -821,56 +863,61 @@ pub async fn query_shielded_balance< // The epoch is required to identify timestamped tokens let epoch = query_and_print_epoch::<_, IO>(client).await; // Map addresses to token names - let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); match (args.token, owner.is_some()) { // Here the user wants to know the balance for a specific token - (Some(token), true) => { - // Query the multi-asset balance at the given spending key - let viewing_key = - ExtendedFullViewingKey::from(viewing_keys[0]).fvk.vk; - let balance: MaspAmount = if no_conversions { - shielded - .compute_shielded_balance(client, &viewing_key) - .await - .unwrap() - .expect("context should contain viewing key") - } else { - shielded - .compute_exchanged_balance::<_, IO>( - client, - &viewing_key, - epoch, - ) - .await - .unwrap() - .expect("context should contain viewing key") - }; - - let token_alias = - get_token_alias(client, wallet, &token, &masp()).await; + (Some(base_token), true) => { + let tokens = query_tokens::<_, IO>( + client, + wallet, + Some(&base_token), + Some(&masp()), + ) + .await; + for (token_alias, token) in tokens { + // Query the multi-asset balance at the given spending key + let viewing_key = + ExtendedFullViewingKey::from(viewing_keys[0]).fvk.vk; + let balance: MaspAmount = if no_conversions { + shielded + .compute_shielded_balance(client, &viewing_key) + .await + .unwrap() + .expect("context should contain viewing key") + } else { + shielded + .compute_exchanged_balance::<_, IO>( + client, + &viewing_key, + epoch, + ) + .await + .unwrap() + .expect("context should contain viewing key") + }; - let total_balance = balance - .get(&(epoch, token.clone())) - .cloned() - .unwrap_or_default(); - if total_balance.is_zero() { - display_line!( - IO, - "No shielded {} balance found for given key", - token_alias - ); - } else { - display_line!( - IO, - "{}: {}", - token_alias, - format_denominated_amount::<_, IO>( - client, - &token, - token::Amount::from(total_balance) - ) - .await - ); + let total_balance = balance + .get(&(epoch, token.clone())) + .cloned() + .unwrap_or_default(); + if total_balance.is_zero() { + display_line!( + IO, + "No shielded {} balance found for given key", + token_alias + ); + } else { + display_line!( + IO, + "{}: {}", + token_alias, + format_denominated_amount::<_, IO>( + client, + &token, + token::Amount::from(total_balance) + ) + .await + ); + } } } // Here the user wants to know the balance of all tokens across users @@ -927,10 +974,8 @@ pub async fn query_shielded_balance< } for ((fvk, token), token_balance) in balance_map { // Only assets with the current timestamp count - let alias = tokens - .get(&token) - .map(|a| a.to_string()) - .unwrap_or_else(|| token.to_string()); + let alias = + lookup_token_alias(client, wallet, &token, &masp()).await; display_line!(IO, "Shielded Token {}:", alias); let formatted = format_denominated_amount::<_, IO>( client, @@ -943,61 +988,63 @@ pub async fn query_shielded_balance< } // Here the user wants to know the balance for a specific token across // users - (Some(token), false) => { - // Compute the unique asset identifier from the token address - let token = token; - let _asset_type = AssetType::new( - (token.clone(), epoch.0) - .try_to_vec() - .expect("token addresses should serialize") - .as_ref(), - ) - .unwrap(); - let token_alias = wallet.lookup_alias(&token); - display_line!(IO, "Shielded Token {}:", token_alias); - let mut found_any = false; - let token_alias = wallet.lookup_alias(&token); - display_line!(IO, "Shielded Token {}:", token_alias,); - for fvk in viewing_keys { - // Query the multi-asset balance at the given spending key - let viewing_key = ExtendedFullViewingKey::from(fvk).fvk.vk; - let balance = if no_conversions { - shielded - .compute_shielded_balance(client, &viewing_key) - .await - .unwrap() - .expect("context should contain viewing key") - } else { - shielded - .compute_exchanged_balance::<_, IO>( + (Some(base_token), false) => { + let tokens = + query_tokens::<_, IO>(client, wallet, Some(&base_token), None) + .await; + for (token_alias, token) in tokens { + // Compute the unique asset identifier from the token address + let token = token; + let _asset_type = AssetType::new( + (token.clone(), epoch.0) + .try_to_vec() + .expect("token addresses should serialize") + .as_ref(), + ) + .unwrap(); + let mut found_any = false; + display_line!(IO, "Shielded Token {}:", token_alias); + for fvk in &viewing_keys { + // Query the multi-asset balance at the given spending key + let viewing_key = ExtendedFullViewingKey::from(*fvk).fvk.vk; + let balance = if no_conversions { + shielded + .compute_shielded_balance(client, &viewing_key) + .await + .unwrap() + .expect("context should contain viewing key") + } else { + shielded + .compute_exchanged_balance::<_, IO>( + client, + &viewing_key, + epoch, + ) + .await + .unwrap() + .expect("context should contain viewing key") + }; + + for ((_, address), val) in balance.iter() { + if !val.is_zero() { + found_any = true; + } + let formatted = format_denominated_amount::<_, IO>( client, - &viewing_key, - epoch, + address, + (*val).into(), ) - .await - .unwrap() - .expect("context should contain viewing key") - }; - - for ((_, address), val) in balance.iter() { - if !val.is_zero() { - found_any = true; + .await; + display_line!(IO, " {}, owned by {}", formatted, fvk); } - let formatted = format_denominated_amount::<_, IO>( - client, - address, - (*val).into(), - ) - .await; - display_line!(IO, " {}, owned by {}", formatted, fvk); } - } - if !found_any { - display_line!( - IO, - "No shielded {} balance found for any wallet key", - token_alias, - ); + if !found_any { + display_line!( + IO, + "No shielded {} balance found for any wallet key", + token_alias, + ); + } } } // Here the user wants to know all possible token balances for a key @@ -1053,7 +1100,7 @@ pub async fn print_decoded_balance< display_line!( IO, "{} : {}", - wallet.lookup_alias(token_addr), + lookup_token_alias(client, wallet, token_addr, &masp()).await, format_denominated_amount::<_, IO>( client, token_addr, diff --git a/core/src/ledger/ibc/context/common.rs b/core/src/ledger/ibc/context/common.rs index b5f0326767..c47b78a38c 100644 --- a/core/src/ledger/ibc/context/common.rs +++ b/core/src/ledger/ibc/context/common.rs @@ -358,14 +358,14 @@ pub trait IbcCommonContext: IbcStorageContext { }) } - /// Write the IBC denom + /// Write the IBC denom. The given address could be a non-Namada token. fn store_ibc_denom( &mut self, - receiver: &Address, + addr: impl AsRef, trace_hash: impl AsRef, denom: impl AsRef, ) -> Result<(), ContextError> { - let key = storage::ibc_denom_key(receiver, trace_hash.as_ref()); + let key = storage::ibc_denom_key(addr, trace_hash.as_ref()); let has_key = self.has_key(&key).map_err(|_| { ContextError::ChannelError(ChannelError::Other { description: format!( diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index 4cc86277f8..4900312a7f 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -28,7 +28,9 @@ use crate::ibc::core::{execute, validate, MsgEnvelope, RouterError}; use crate::ibc_proto::google::protobuf::Any; use crate::types::address::Address; use crate::types::chain::ChainId; -use crate::types::ibc::{EVENT_TYPE_DENOM_TRACE, EVENT_TYPE_PACKET}; +use crate::types::ibc::{ + split_ibc_denom, EVENT_TYPE_DENOM_TRACE, EVENT_TYPE_PACKET, +}; #[allow(missing_docs)] #[derive(Error, Debug)] @@ -147,13 +149,28 @@ where // denomination is also set for the minting. self.ctx .borrow_mut() - .store_ibc_denom(&receiver, trace_hash, &ibc_denom) + .store_ibc_denom( + &receiver.to_string(), + &trace_hash, + &ibc_denom, + ) .map_err(|e| { Error::Denom(format!( "Writing the IBC denom failed: {}", e )) })?; + if let Some((_, base_token)) = split_ibc_denom(&ibc_denom) { + self.ctx + .borrow_mut() + .store_ibc_denom(base_token, trace_hash, &ibc_denom) + .map_err(|e| { + Error::Denom(format!( + "Writing the IBC denom failed: {}", + e + )) + })?; + } let token = storage::ibc_token(ibc_denom); self.ctx.borrow_mut().store_token_denom(&token).map_err( |e| { diff --git a/core/src/ledger/ibc/storage.rs b/core/src/ledger/ibc/storage.rs index 349c4b6ef3..717991b9f1 100644 --- a/core/src/ledger/ibc/storage.rs +++ b/core/src/ledger/ibc/storage.rs @@ -367,24 +367,30 @@ pub fn port_id(key: &Key) -> Result { } } -/// The storage key prefix to get the denom name with the hashed IBC denom -pub fn ibc_denom_key_prefix(owner: Option<&Address>) -> Key { +/// The storage key prefix to get the denom name with the hashed IBC denom. The +/// address is given as string because the given address could be non-Namada +/// token. +pub fn ibc_denom_key_prefix(addr: Option) -> Key { let prefix = Key::from(Address::Internal(InternalAddress::Ibc).to_db_key()) .push(&DENOM.to_string().to_db_key()) .expect("Cannot obtain a storage key"); - if let Some(owner) = owner { + if let Some(addr) = addr { prefix - .push(&owner.to_db_key()) + .push(&addr.to_db_key()) .expect("Cannot obtain a storage key") } else { prefix } } -/// The storage key to get the denom name with the hashed IBC denom -pub fn ibc_denom_key(owner: &Address, token_hash: impl AsRef) -> Key { - ibc_denom_key_prefix(Some(owner)) +/// The storage key to get the denom name with the hashed IBC denom. The address +/// is given as string because the given address could be non-Namada token. +pub fn ibc_denom_key( + addr: impl AsRef, + token_hash: impl AsRef, +) -> Key { + ibc_denom_key_prefix(Some(addr.as_ref().to_string())) .push(&token_hash.as_ref().to_string().to_db_key()) .expect("Cannot obtain a storage key") } @@ -409,12 +415,12 @@ pub fn is_ibc_key(key: &Key) -> bool { } /// Returns the owner and the token hash if the given key is the denom key -pub fn is_ibc_denom_key(key: &Key) -> Option<(Address, String)> { +pub fn is_ibc_denom_key(key: &Key) -> Option<(String, String)> { match &key.segments[..] { [ DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(owner), + DbKeySeg::StringSeg(owner), DbKeySeg::StringSeg(hash), ] => { if addr == &Address::Internal(InternalAddress::Ibc) diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs index 32a93218eb..4ee504fb4a 100644 --- a/core/src/types/ibc.rs +++ b/core/src/types/ibc.rs @@ -53,10 +53,12 @@ impl std::fmt::Display for IbcEvent { #[cfg(any(feature = "abciplus", feature = "abcipp"))] mod ibc_rs_conversion { use std::collections::HashMap; + use std::str::FromStr; use thiserror::Error; use super::IbcEvent; + use crate::ibc::applications::transfer::{PrefixedDenom, TracePath}; use crate::ibc::core::events::{ Error as IbcEventError, IbcEvent as RawIbcEvent, }; @@ -89,6 +91,19 @@ mod ibc_rs_conversion { }) } } + + /// Returns the trace path and the token string if the denom is an IBC + /// denom. + pub fn split_ibc_denom( + denom: impl AsRef, + ) -> Option<(TracePath, String)> { + let prefixed_denom = PrefixedDenom::from_str(denom.as_ref()).ok()?; + // The base token isn't decoded because it could be non Namada token + Some(( + prefixed_denom.trace_path, + prefixed_denom.base_denom.to_string(), + )) + } } #[cfg(any(feature = "abciplus", feature = "abcipp"))] diff --git a/shared/src/ledger/native_vp/ibc/mod.rs b/shared/src/ledger/native_vp/ibc/mod.rs index c50ee89600..5e48a8613f 100644 --- a/shared/src/ledger/native_vp/ibc/mod.rs +++ b/shared/src/ledger/native_vp/ibc/mod.rs @@ -2191,7 +2191,14 @@ mod tests { packet.chan_id_on_b.clone(), )); let trace_hash = calc_hash(coin.denom.to_string()); - let denom_key = ibc_denom_key(&receiver, &trace_hash); + let denom_key = ibc_denom_key(receiver.to_string(), &trace_hash); + let bytes = coin.denom.to_string().try_to_vec().unwrap(); + wl_storage + .write_log + .write(&denom_key, bytes) + .expect("write failed"); + keys_changed.insert(denom_key); + let denom_key = ibc_denom_key(nam().to_string(), &trace_hash); let bytes = coin.denom.to_string().try_to_vec().unwrap(); wl_storage .write_log diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index 3c31b17ba7..af65abcad2 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -396,8 +396,6 @@ pub struct QueryBalance { pub owner: Option, /// Address of a token pub token: Option, - /// Transferred token's trace path - pub trace_path: Option, /// Whether not to convert balances pub no_conversions: bool, } diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index a539efaae8..7e717b4a54 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -1035,7 +1035,7 @@ fn transfer( if let Some(trace_path) = trace_path { tx_args.push("--trace-path"); - tx_args.push(&trace_path.clone()); + tx_args.push(trace_path.clone()); } let timeout = timeout_sec.unwrap_or_default().as_secs().to_string(); @@ -1240,15 +1240,7 @@ fn check_balances( let trace_path = format!("{}/{}", &dest_port_id, &dest_channel_id); let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); let query_args = vec![ - "balance", - "--owner", - BERTHA, - "--token", - NAM, - "--trace-path", - &trace_path, - "--node", - &rpc_b, + "balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc_b, ]; let expected = format!("{}: 100000", format!("{}/nam", trace_path)); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; @@ -1268,34 +1260,16 @@ fn check_balances_after_non_ibc( // Check the source let rpc = get_actor_rpc(test, &Who::Validator(0)); - let query_args = vec![ - "balance", - "--owner", - BERTHA, - "--token", - NAM, - "--trace-path", - &trace_path, - "--node", - &rpc, - ]; + let query_args = + vec!["balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc]; let expected = format!("{}: 50000", format!("{}/nam", trace_path)); let mut client = run!(test, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); // Check the traget - let query_args = vec![ - "balance", - "--owner", - ALBERT, - "--token", - NAM, - "--trace-path", - &trace_path, - "--node", - &rpc, - ]; + let query_args = + vec!["balance", "--owner", ALBERT, "--token", NAM, "--node", &rpc]; let expected = format!("{}: 50000", format!("{}/nam", trace_path)); let mut client = run!(test, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; @@ -1330,15 +1304,7 @@ fn check_balances_after_back( let trace_path = format!("{}/{}", dest_port_id, dest_channel_id); let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); let query_args = vec![ - "balance", - "--owner", - BERTHA, - "--token", - NAM, - "--trace-path", - &trace_path, - "--node", - &rpc_b, + "balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc_b, ]; let expected = format!("{}: 0", format!("{}/nam", trace_path)); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; From 1b79599ea4f281f22899cbcca7fcc30693ebca77 Mon Sep 17 00:00:00 2001 From: yito88 Date: Thu, 28 Sep 2023 11:02:41 +0200 Subject: [PATCH 009/161] add changelog --- .../improvements/1946-ibc-balance-query.md | 2 + apps/src/lib/client/rpc.rs | 175 +++++++++--------- tests/src/e2e/ibc_tests.rs | 8 +- 3 files changed, 94 insertions(+), 91 deletions(-) create mode 100644 .changelog/unreleased/improvements/1946-ibc-balance-query.md diff --git a/.changelog/unreleased/improvements/1946-ibc-balance-query.md b/.changelog/unreleased/improvements/1946-ibc-balance-query.md new file mode 100644 index 0000000000..1f1093caa0 --- /dev/null +++ b/.changelog/unreleased/improvements/1946-ibc-balance-query.md @@ -0,0 +1,2 @@ +- Query also IBC token balances + ([\#1946](https://github.com/anoma/namada/issues/1946)) \ No newline at end of file diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 27073fe2d1..d3363d17a7 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -445,93 +445,6 @@ pub async fn query_transparent_balance< } } -async fn lookup_token_alias( - client: &C, - wallet: &Wallet, - token: &Address, - owner: &Address, -) -> String { - if let Address::Internal(InternalAddress::IbcToken(trace_hash)) = token { - let ibc_denom_key = ibc_denom_key(owner.to_string(), trace_hash); - match query_storage_value::(client, &ibc_denom_key).await { - Ok(ibc_denom) => get_ibc_denom_alias(wallet, ibc_denom), - Err(_) => token.to_string(), - } - } else { - wallet.lookup_alias(token) - } -} - -/// Returns pairs of token alias and token address -async fn query_tokens( - client: &C, - wallet: &Wallet, - base_token: Option<&Address>, - owner: Option<&Address>, -) -> BTreeMap { - // Base tokens - let mut tokens = match base_token { - Some(base_token) => { - let mut map = BTreeMap::new(); - map.insert(wallet.lookup_alias(base_token), base_token.clone()); - map - } - None => wallet.tokens_with_aliases(), - }; - - let prefixes = match (base_token, owner) { - (Some(base_token), Some(owner)) => vec![ - ibc_denom_key_prefix(Some(base_token.to_string())), - ibc_denom_key_prefix(Some(owner.to_string())), - ], - (Some(base_token), None) => { - vec![ibc_denom_key_prefix(Some(base_token.to_string()))] - } - (None, Some(_)) => { - // Check all IBC denoms because the owner might not know IBC token - // transfers in the same chain - vec![ibc_denom_key_prefix(None)] - } - (None, None) => vec![ibc_denom_key_prefix(None)], - }; - - for prefix in prefixes { - let ibc_denoms = - query_storage_prefix::(client, &prefix).await; - if let Some(ibc_denoms) = ibc_denoms { - for (key, ibc_denom) in ibc_denoms { - if let Some((_, hash)) = is_ibc_denom_key(&key) { - let ibc_denom_alias = - get_ibc_denom_alias(wallet, ibc_denom); - let ibc_token = - Address::Internal(InternalAddress::IbcToken(hash)); - tokens.insert(ibc_denom_alias, ibc_token); - } - } - } - } - tokens -} - -fn get_ibc_denom_alias( - wallet: &Wallet, - ibc_denom: impl AsRef, -) -> String { - split_ibc_denom(&ibc_denom) - .map(|(trace_path, base_token)| { - let base_token_alias = match Address::decode(&base_token) { - Ok(base_token) => wallet.lookup_alias(&base_token), - Err(_) => base_token, - }; - if trace_path.is_empty() { - base_token_alias - } else { - format!("{}/{}", trace_path, base_token_alias) - } - }) - .unwrap_or(ibc_denom.as_ref().to_string()) -} - /// Query the token pinned balance(s) pub async fn query_pinned_balance< C: namada::ledger::queries::Client + Sync, @@ -776,6 +689,94 @@ async fn print_balances( } } +async fn lookup_token_alias( + client: &C, + wallet: &Wallet, + token: &Address, + owner: &Address, +) -> String { + if let Address::Internal(InternalAddress::IbcToken(trace_hash)) = token { + let ibc_denom_key = ibc_denom_key(owner.to_string(), trace_hash); + match query_storage_value::(client, &ibc_denom_key).await { + Ok(ibc_denom) => get_ibc_denom_alias(wallet, ibc_denom), + Err(_) => token.to_string(), + } + } else { + wallet.lookup_alias(token) + } +} + +/// Returns pairs of token alias and token address +async fn query_tokens( + client: &C, + wallet: &Wallet, + base_token: Option<&Address>, + owner: Option<&Address>, +) -> BTreeMap { + // Base tokens + let mut tokens = match base_token { + Some(base_token) => { + let mut map = BTreeMap::new(); + map.insert(wallet.lookup_alias(base_token), base_token.clone()); + map + } + None => wallet.tokens_with_aliases(), + }; + + let prefixes = match (base_token, owner) { + (Some(base_token), Some(owner)) => vec![ + ibc_denom_key_prefix(Some(base_token.to_string())), + ibc_denom_key_prefix(Some(owner.to_string())), + ], + (Some(base_token), None) => { + vec![ibc_denom_key_prefix(Some(base_token.to_string()))] + } + (None, Some(_)) => { + // Check all IBC denoms because the owner might not know IBC token + // transfers in the same chain + vec![ibc_denom_key_prefix(None)] + } + (None, None) => vec![ibc_denom_key_prefix(None)], + }; + + for prefix in prefixes { + let ibc_denoms = + query_storage_prefix::(client, &prefix).await; + if let Some(ibc_denoms) = ibc_denoms { + for (key, ibc_denom) in ibc_denoms { + if let Some((_, hash)) = is_ibc_denom_key(&key) { + let ibc_denom_alias = + get_ibc_denom_alias(wallet, ibc_denom); + let ibc_token = + Address::Internal(InternalAddress::IbcToken(hash)); + tokens.insert(ibc_denom_alias, ibc_token); + } + } + } + } + tokens +} + +fn get_ibc_denom_alias( + wallet: &Wallet, + ibc_denom: impl AsRef, +) -> String { + split_ibc_denom(&ibc_denom) + .map(|(trace_path, base_token)| { + let base_token_alias = match Address::decode(&base_token) { + Ok(base_token) => wallet.lookup_alias(&base_token), + Err(_) => base_token, + }; + if trace_path.is_empty() { + base_token_alias + } else { + format!("{}/{}", trace_path, base_token_alias) + } + }) + .unwrap_or(ibc_denom.as_ref().to_string()) +} + + /// Query Proposals pub async fn query_proposal< C: namada::ledger::queries::Client + Sync, diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index 7e717b4a54..27261a233d 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -1242,7 +1242,7 @@ fn check_balances( let query_args = vec![ "balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc_b, ]; - let expected = format!("{}: 100000", format!("{}/nam", trace_path)); + let expected = format!("{}/nam: 100000", trace_path); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); @@ -1262,7 +1262,7 @@ fn check_balances_after_non_ibc( let rpc = get_actor_rpc(test, &Who::Validator(0)); let query_args = vec!["balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc]; - let expected = format!("{}: 50000", format!("{}/nam", trace_path)); + let expected = format!("{}/nam: 50000", trace_path); let mut client = run!(test, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); @@ -1270,7 +1270,7 @@ fn check_balances_after_non_ibc( // Check the traget let query_args = vec!["balance", "--owner", ALBERT, "--token", NAM, "--node", &rpc]; - let expected = format!("{}: 50000", format!("{}/nam", trace_path)); + let expected = format!("{}/nam: 50000", trace_path); let mut client = run!(test, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); @@ -1306,7 +1306,7 @@ fn check_balances_after_back( let query_args = vec![ "balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc_b, ]; - let expected = format!("{}: 0", format!("{}/nam", trace_path)); + let expected = format!("{}/nam: 0", trace_path); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); From 0613ac2c3a1bcc749cb4f1112e1b923220f76c5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 11:41:55 +0100 Subject: [PATCH 010/161] benches: move bench lib code to apps crate and feature guard it --- apps/Cargo.toml | 3 +- benches/lib.rs => apps/src/lib/bench_utils.rs | 36 +++++++++---------- apps/src/lib/config/genesis.rs | 2 +- apps/src/lib/mod.rs | 2 ++ benches/Cargo.toml | 20 +++++------ benches/native_vps.rs | 4 +-- benches/process_wrapper.rs | 2 +- benches/txs.rs | 4 +-- benches/vps.rs | 4 +-- 9 files changed, 38 insertions(+), 39 deletions(-) rename benches/lib.rs => apps/src/lib/bench_utils.rs (96%) diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 1d33f55df7..06a69e35ea 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -57,7 +57,7 @@ mainnet = [ dev = ["namada/dev"] std = ["ed25519-consensus/std", "rand/std", "rand_core/std", "namada/std"] # for integration tests and test utilies -testing = ["dev"] +testing = ["dev", "namada_test_utils"] abciplus = [ "namada/abciplus", @@ -67,6 +67,7 @@ abciplus = [ [dependencies] namada = {path = "../shared", features = ["ferveo-tpke", "masp-tx-gen", "multicore", "http-client"]} +namada_test_utils = {path = "../test_utils", optional = true} ark-serialize.workspace = true ark-std.workspace = true arse-merkle-tree = { workspace = true, features = ["blake2b"] } diff --git a/benches/lib.rs b/apps/src/lib/bench_utils.rs similarity index 96% rename from benches/lib.rs rename to apps/src/lib/bench_utils.rs index 47645abdf4..f88b92d3c0 100644 --- a/benches/lib.rs +++ b/apps/src/lib/bench_utils.rs @@ -91,20 +91,21 @@ use namada::types::transaction::governance::InitProposalData; use namada::types::transaction::pos::Bond; use namada::types::transaction::GasLimit; use namada::vm::wasm::run; -use namada_apps::cli::args::{Tx as TxArgs, TxTransfer}; -use namada_apps::cli::context::FromContext; -use namada_apps::cli::Context; -use namada_apps::config::TendermintMode; -use namada_apps::facade::tendermint_proto::abci::RequestInitChain; -use namada_apps::facade::tendermint_proto::google::protobuf::Timestamp; -use namada_apps::node::ledger::shell::Shell; -use namada_apps::wallet::{defaults, CliWalletUtils}; -use namada_apps::{config, wasm_loader}; use namada_test_utils::tx_data::TxWriteData; use rand_core::OsRng; use sha2::{Digest, Sha256}; use tempfile::TempDir; +use crate::cli::args::{Tx as TxArgs, TxTransfer}; +use crate::cli::context::FromContext; +use crate::cli::Context; +use crate::config::TendermintMode; +use crate::facade::tendermint_proto::abci::RequestInitChain; +use crate::facade::tendermint_proto::google::protobuf::Timestamp; +use crate::node::ledger::shell::Shell; +use crate::wallet::{defaults, CliWalletUtils}; +use crate::{config, wasm_loader}; + pub const WASM_DIR: &str = "../wasm"; pub const TX_BOND_WASM: &str = "tx_bond.wasm"; pub const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; @@ -681,13 +682,12 @@ impl Default for BenchShieldedCtx { fn default() -> Self { let mut shell = BenchShell::default(); - let mut ctx = - Context::new::(namada_apps::cli::args::Global { - chain_id: None, - base_dir: shell.tempdir.as_ref().canonicalize().unwrap(), - wasm_dir: Some(WASM_DIR.into()), - }) - .unwrap(); + let mut ctx = Context::new::(crate::cli::args::Global { + chain_id: None, + base_dir: shell.tempdir.as_ref().canonicalize().unwrap(), + wasm_dir: Some(WASM_DIR.into()), + }) + .unwrap(); // Generate spending key for Albert and Bertha ctx.wallet.gen_spending_key( @@ -700,7 +700,7 @@ impl Default for BenchShieldedCtx { None, true, ); - namada_apps::wallet::save(&ctx.wallet).unwrap(); + crate::wallet::save(&ctx.wallet).unwrap(); // Generate payment addresses for both Albert and Bertha for (alias, viewing_alias) in [ @@ -732,7 +732,7 @@ impl Default for BenchShieldedCtx { .unwrap(); } - namada_apps::wallet::save(&ctx.wallet).unwrap(); + crate::wallet::save(&ctx.wallet).unwrap(); namada::ledger::storage::update_allowed_conversions( &mut shell.wl_storage, ) diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index b7281fd4ce..b70637e0bc 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -15,7 +15,6 @@ use namada::types::key::dkg_session_keys::DkgPublicKey; use namada::types::key::*; use namada::types::time::{DateTimeUtc, DurationSecs}; use namada::types::token::Denomination; -use namada::types::uint::Uint; use namada::types::{storage, token}; /// Genesis configuration file format @@ -908,6 +907,7 @@ pub fn genesis(num_validators: u64) -> Genesis { }; use namada::types::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; use namada::types::ethereum_events::EthAddress; + use namada::types::uint::Uint; use crate::wallet; diff --git a/apps/src/lib/mod.rs b/apps/src/lib/mod.rs index 7df31ea2ea..b2991870ef 100644 --- a/apps/src/lib/mod.rs +++ b/apps/src/lib/mod.rs @@ -5,6 +5,8 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] +#[cfg(feature = "testing")] +pub mod bench_utils; pub mod cli; pub mod client; pub mod config; diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 91a5d45333..d81dec84c2 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -12,10 +12,6 @@ readme.workspace = true repository.workspace = true version.workspace = true -[lib] -name = "namada_benches" -path = "lib.rs" - [[bench]] name = "whitelisted_txs" harness = false @@ -42,21 +38,21 @@ harness = false path = "host_env.rs" [dependencies] + +[dev-dependencies] +namada = { path = "../shared", features = ["testing"] } +namada_apps = { path = "../apps", features = ["testing"] } +namada_test_utils = { path = "../test_utils" } async-trait.workspace = true borsh.workspace = true +criterion = { version = "0.5", features = ["html_reports"] } ferveo-common.workspace = true masp_primitives.workspace = true masp_proofs.workspace = true -namada = { path = "../shared", features = ["testing"] } -namada_apps = { path = "../apps", features = ["testing"] } -namada_test_utils = { path = "../test_utils" } prost.workspace = true -rand.workspace = true rand_core.workspace = true +rand.workspace = true sha2.workspace = true -tokio.workspace = true tempfile.workspace = true +tokio.workspace = true tracing-subscriber = { workspace = true, features = ["std"]} - -[dev-dependencies] -criterion = { version = "0.5", features = ["html_reports"] } diff --git a/benches/native_vps.rs b/benches/native_vps.rs index 77373080c4..2c317455dd 100644 --- a/benches/native_vps.rs +++ b/benches/native_vps.rs @@ -32,12 +32,12 @@ use namada::types::storage::{Epoch, TxIndex}; use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; -use namada_apps::wallet::defaults; -use namada_benches::{ +use namada_apps::bench_utils::{ generate_foreign_key_tx, generate_ibc_transfer_tx, generate_ibc_tx, generate_tx, BenchShell, TX_IBC_WASM, TX_INIT_PROPOSAL_WASM, TX_TRANSFER_WASM, TX_VOTE_PROPOSAL_WASM, }; +use namada_apps::wallet::defaults; fn replay_protection(c: &mut Criterion) { // Write a random key under the replay protection subspace diff --git a/benches/process_wrapper.rs b/benches/process_wrapper.rs index ce466b1058..d6fbe9b483 100644 --- a/benches/process_wrapper.rs +++ b/benches/process_wrapper.rs @@ -7,9 +7,9 @@ use namada::types::key::RefTo; use namada::types::storage::BlockHeight; use namada::types::time::DateTimeUtc; use namada::types::transaction::{Fee, WrapperTx}; +use namada_apps::bench_utils::{generate_tx, BenchShell, TX_TRANSFER_WASM}; use namada_apps::node::ledger::shell::process_proposal::ValidationMeta; use namada_apps::wallet::defaults; -use namada_benches::{generate_tx, BenchShell, TX_TRANSFER_WASM}; fn process_tx(c: &mut Criterion) { let mut shell = BenchShell::default(); diff --git a/benches/txs.rs b/benches/txs.rs index a1373c7931..65f702bc43 100644 --- a/benches/txs.rs +++ b/benches/txs.rs @@ -22,14 +22,14 @@ use namada::types::transaction::governance::{ }; use namada::types::transaction::pos::{Bond, CommissionChange, Withdraw}; use namada::types::transaction::EllipticCurve; -use namada_apps::wallet::defaults; -use namada_benches::{ +use namada_apps::bench_utils::{ generate_ibc_transfer_tx, generate_tx, BenchShell, BenchShieldedCtx, ALBERT_PAYMENT_ADDRESS, ALBERT_SPENDING_KEY, BERTHA_PAYMENT_ADDRESS, TX_BOND_WASM, TX_CHANGE_VALIDATOR_COMMISSION_WASM, TX_INIT_PROPOSAL_WASM, TX_REVEAL_PK_WASM, TX_UNBOND_WASM, TX_UNJAIL_VALIDATOR_WASM, TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL_WASM, VP_VALIDATOR_WASM, }; +use namada_apps::wallet::defaults; use rand::rngs::StdRng; use rand::SeedableRng; use sha2::Digest; diff --git a/benches/vps.rs b/benches/vps.rs index 6efaf78e4c..5e5d66d8dd 100644 --- a/benches/vps.rs +++ b/benches/vps.rs @@ -19,14 +19,14 @@ use namada::types::storage::{Key, TxIndex}; use namada::types::transaction::governance::VoteProposalData; use namada::types::transaction::pos::{Bond, CommissionChange}; use namada::vm::wasm::run; -use namada_apps::wallet::defaults; -use namada_benches::{ +use namada_apps::bench_utils::{ generate_foreign_key_tx, generate_tx, BenchShell, BenchShieldedCtx, ALBERT_PAYMENT_ADDRESS, ALBERT_SPENDING_KEY, BERTHA_PAYMENT_ADDRESS, TX_BOND_WASM, TX_CHANGE_VALIDATOR_COMMISSION_WASM, TX_REVEAL_PK_WASM, TX_TRANSFER_WASM, TX_UNBOND_WASM, TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL_WASM, VP_VALIDATOR_WASM, }; +use namada_apps::wallet::defaults; use sha2::Digest; const VP_USER_WASM: &str = "vp_user.wasm"; From 4dc0304d2f38ac49c0779000f61b77baf6eecece Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 11:47:34 +0100 Subject: [PATCH 011/161] benches: update docs --- apps/src/lib/bench_utils.rs | 17 ++--------------- benches/README.md | 10 ++++++++++ 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/apps/src/lib/bench_utils.rs b/apps/src/lib/bench_utils.rs index f88b92d3c0..2bffe656ee 100644 --- a/apps/src/lib/bench_utils.rs +++ b/apps/src/lib/bench_utils.rs @@ -1,18 +1,5 @@ -//! Benchmarks module based on criterion. -//! -//! Measurements are taken on the elapsed wall-time. -//! -//! The benchmarks only focus on sucessfull transactions and vps: in case of -//! failure, the bench function shall panic to avoid timing incomplete execution -//! paths. -//! -//! In addition, this module also contains benchmarks for -//! [`WrapperTx`][`namada::core::types::transaction::wrapper::WrapperTx`] -//! validation and [`host_env`][`namada::vm::host_env`] exposed functions that -//! define the gas constants of [`gas`][`namada::core::ledger::gas`]. -//! -//! For more realistic results these benchmarks should be run on all the -//! combination of supported OS/architecture. +//! Library code for benchmarks provides a wrapper of the ledger's shell +//! `BenchShell` and helper functions to generate transactions. use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; diff --git a/benches/README.md b/benches/README.md index 02b0d52a91..86978eb6f7 100644 --- a/benches/README.md +++ b/benches/README.md @@ -2,6 +2,16 @@ The benchmarks are built with [criterion.rs](https://bheisler.github.io/criterion.rs/book). +Measurements are taken on the elapsed wall-time. + +The benchmarks only focus on sucessfull transactions and vps: in case of failure, the bench function shall panic to avoid timing incomplete execution paths. + +In addition, this crate also contains benchmarks for `WrapperTx` (`namada::core::types::transaction::wrapper::WrapperTx`) validation and `host_env` (`namada::vm::host_env`) exposed functions that define the gas constants of `gas` (`namada::core::ledger::gas`). + +For more realistic results these benchmarks should be run on all the combination of supported OS/architecture. + +## Testing & running + To enable tracing logs, run with e.g. `RUST_LOG=debug`. To ensure that the benches can run successfully without performing measurement, you can run `make test-benches` from the workspace run. From 5aaa83e6a8d3d520df244c7947d6f188c6c485c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 11:53:14 +0100 Subject: [PATCH 012/161] benches: rm unused deps --- Cargo.lock | 8 -------- benches/Cargo.toml | 8 -------- 2 files changed, 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84cbd6f48e..92f191c816 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4167,22 +4167,14 @@ dependencies = [ name = "namada_benchmarks" version = "0.23.0" dependencies = [ - "async-trait", "borsh 0.9.4", "criterion", "ferveo-common", - "masp_primitives", - "masp_proofs", "namada", "namada_apps", - "namada_test_utils", - "prost", "rand 0.8.5", "rand_core 0.6.4", "sha2 0.9.9", - "tempfile", - "tokio", - "tracing-subscriber 0.3.17", ] [[package]] diff --git a/benches/Cargo.toml b/benches/Cargo.toml index d81dec84c2..fc4c3485a6 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -42,17 +42,9 @@ path = "host_env.rs" [dev-dependencies] namada = { path = "../shared", features = ["testing"] } namada_apps = { path = "../apps", features = ["testing"] } -namada_test_utils = { path = "../test_utils" } -async-trait.workspace = true borsh.workspace = true criterion = { version = "0.5", features = ["html_reports"] } ferveo-common.workspace = true -masp_primitives.workspace = true -masp_proofs.workspace = true -prost.workspace = true rand_core.workspace = true rand.workspace = true sha2.workspace = true -tempfile.workspace = true -tokio.workspace = true -tracing-subscriber = { workspace = true, features = ["std"]} From bc2e2859ed66796e664f6899d5bcc5608c741c37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 12:16:33 +0100 Subject: [PATCH 013/161] changelog: add #1955 --- .../improvements/1955-avoid-testing-feature-in-workspace.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/unreleased/improvements/1955-avoid-testing-feature-in-workspace.md diff --git a/.changelog/unreleased/improvements/1955-avoid-testing-feature-in-workspace.md b/.changelog/unreleased/improvements/1955-avoid-testing-feature-in-workspace.md new file mode 100644 index 0000000000..7ce5c574b2 --- /dev/null +++ b/.changelog/unreleased/improvements/1955-avoid-testing-feature-in-workspace.md @@ -0,0 +1,3 @@ +- Refactor benchmarks to avoid enabling `"testing`" and `"dev"`` features by + default in the workspace. + ([\#1955](https://github.com/anoma/namada/pull/1955)) \ No newline at end of file From f015005aea480d007e32d340d622e91c9e48b2e9 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 28 Sep 2023 12:32:41 +0200 Subject: [PATCH 014/161] Moves signatures verification gas in `Signature` --- benches/host_env.rs | 2 +- core/src/proto/types.rs | 37 +++++++++++++++++++++--------------- core/src/types/key/mod.rs | 2 ++ shared/src/vm/host_env.rs | 2 +- tests/src/vm_host_env/mod.rs | 4 ++-- 5 files changed, 28 insertions(+), 19 deletions(-) diff --git a/benches/host_env.rs b/benches/host_env.rs index 6f385b93bc..8970c34a93 100644 --- a/benches/host_env.rs +++ b/benches/host_env.rs @@ -35,7 +35,7 @@ fn tx_section_signature_validation(c: &mut Criterion) { c.bench_function("tx_section_signature_validation", |b| { b.iter(|| { multisig - .verify_signature(&mut HashSet::new(), &pkim, &None) + .verify_signature(&mut HashSet::new(), &pkim, &None, &mut None) .unwrap() }) }); diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index a6082fbbab..e5203293aa 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -553,6 +553,7 @@ impl Signature { verified_pks: &mut HashSet, public_keys_index_map: &AccountPublicKeysMap, signer: &Option
, + gas_meter: &mut Option<&mut VpGasMeter>, ) -> std::result::Result { // Records whether there are any successful verifications let mut verifications = 0; @@ -564,6 +565,11 @@ impl Signature { if let Some(pk) = public_keys_index_map.get_public_key_from_index(*idx) { + if let Some(meter) = gas_meter { + meter + .consume(VERIFY_TX_SIG_GAS_COST) + .map_err(|_| VerifySigError::OutOfGas)?; + } common::SigScheme::verify_signature( &pk, &self.get_raw_hash(), @@ -584,6 +590,11 @@ impl Signature { if let Some(map_idx) = public_keys_index_map.get_index_from_public_key(pk) { + if let Some(meter) = gas_meter { + meter + .consume(VERIFY_TX_SIG_GAS_COST) + .map_err(|_| VerifySigError::OutOfGas)?; + } common::SigScheme::verify_signature( pk, &self.get_raw_hash(), @@ -1381,7 +1392,7 @@ impl Tx { signer: &Option
, threshold: u8, max_signatures: Option, - mut gas_meter: Option<&mut VpGasMeter>, + gas_meter: &mut Option<&mut VpGasMeter>, ) -> std::result::Result, Error> { let max_signatures = max_signatures.unwrap_or(u8::MAX); // Records the public key indices used in successful signatures @@ -1408,26 +1419,22 @@ impl Tx { } // Finally verify that the signature itself is valid - let prev_verifieds = verified_pks.len(); let amt_verifieds = signatures .verify_signature( &mut verified_pks, &public_keys_index_map, signer, + gas_meter, ) - .map_err(|_| { - Error::InvalidSectionSignature( - "found invalid signature.".to_string(), - ) + .map_err(|e| { + if let VerifySigError::OutOfGas = e { + Error::OutOfGas + } else { + Error::InvalidSectionSignature( + "found invalid signature.".to_string(), + ) + } }); - // Compute the cost of the signature verifications - if let Some(x) = gas_meter.as_mut() { - let amt_verified = usize::from(amt_verifieds.is_err()) - + verified_pks.len() - - prev_verifieds; - x.consume(VERIFY_TX_SIG_GAS_COST * amt_verified as u64) - .map_err(|_| Error::OutOfGas)?; - } // Record the section witnessing these signatures if amt_verifieds? > 0 { witnesses.push(signatures); @@ -1458,7 +1465,7 @@ impl Tx { &None, 1, None, - None, + &mut None, ) .map(|x| *x.first().unwrap()) .map_err(|_| Error::InvalidWrapperSignature) diff --git a/core/src/types/key/mod.rs b/core/src/types/key/mod.rs index 1287956b13..d39912299f 100644 --- a/core/src/types/key/mod.rs +++ b/core/src/types/key/mod.rs @@ -123,6 +123,8 @@ pub enum VerifySigError { MissingData, #[error("Signature belongs to a different scheme from the public key.")] MismatchedScheme, + #[error("Signature verification went out of gas")] + OutOfGas, } #[allow(missing_docs)] diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index 7806d1abef..1479ed7d8f 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -1848,7 +1848,7 @@ where &Some(signer), threshold, max_signatures, - Some(gas_meter), + &mut Some(gas_meter), ) .is_ok(), ) diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index 68ebd76dff..de24784846 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -483,7 +483,7 @@ mod tests { &None, 1, None, - Some(&mut VpGasMeter::new_from_tx_meter( + &mut Some(&mut VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()) )) ) @@ -504,7 +504,7 @@ mod tests { &None, 1, None, - Some(&mut VpGasMeter::new_from_tx_meter( + &mut Some(&mut VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()) )) ) From 6fba21a6f664fd3c4366b13e20fe1716f5a825e1 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 28 Sep 2023 12:36:20 +0200 Subject: [PATCH 015/161] changelog: add #1954 --- .changelog/unreleased/improvements/1954-gas-in-sig-ver.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1954-gas-in-sig-ver.md diff --git a/.changelog/unreleased/improvements/1954-gas-in-sig-ver.md b/.changelog/unreleased/improvements/1954-gas-in-sig-ver.md new file mode 100644 index 0000000000..27ba173c46 --- /dev/null +++ b/.changelog/unreleased/improvements/1954-gas-in-sig-ver.md @@ -0,0 +1,2 @@ +- Increased resoultion of gas accounting for signature verification. + ([\#1954](https://github.com/anoma/namada/pull/1954)) \ No newline at end of file From 8416f0bc37b5a9e5778596e300b2f31a36a7463c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 14:19:34 +0100 Subject: [PATCH 016/161] test/eth: init storage with PoS params where needed --- ethereum_bridge/src/protocol/transactions/votes/update.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ethereum_bridge/src/protocol/transactions/votes/update.rs b/ethereum_bridge/src/protocol/transactions/votes/update.rs index c1173bdf12..928bb4d7f8 100644 --- a/ethereum_bridge/src/protocol/transactions/votes/update.rs +++ b/ethereum_bridge/src/protocol/transactions/votes/update.rs @@ -376,6 +376,7 @@ mod tests { #[test] fn test_apply_duplicate_votes() -> Result<()> { let mut wl_storage = TestWlStorage::default(); + test_utils::init_default_storage(&mut wl_storage); let validator = address::testing::established_address_1(); let already_voted_height = BlockHeight(100); @@ -411,6 +412,7 @@ mod tests { #[test] fn test_calculate_already_seen() -> Result<()> { let mut wl_storage = TestWlStorage::default(); + test_utils::init_default_storage(&mut wl_storage); let event = default_event(); let keys = vote_tallies::Keys::from(&event); let tally_pre = TallyParams { From f5d4544639caad90f3e5e7aebc35ba11c97abd28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 14:26:29 +0100 Subject: [PATCH 017/161] test/PoS/epoched: init PoS params and fix `PastEpochs` type param --- proof_of_stake/src/epoched.rs | 39 +++++++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 8483a61c62..92b336d960 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -849,17 +849,23 @@ pub trait EpochOffset: #[cfg(test)] mod test { use namada_core::ledger::storage::testing::TestWlStorage; + use namada_core::types::address::testing::established_address_1; + use namada_core::types::dec::Dec; + use namada_core::types::{key, token}; use test_log::test; use super::*; + use crate::types::GenesisValidator; #[test] fn test_epoched_data_trimming() -> storage_api::Result<()> { - let mut s = TestWlStorage::default(); + let mut s = init_storage()?; let key_prefix = storage::Key::parse("test").unwrap(); let epoched = - Epoched::::open(key_prefix); + Epoched::::open( + key_prefix, + ); let data_handler = epoched.get_data_handler(); assert!(epoched.get_last_update(&s)?.is_none()); assert!(epoched.get_oldest_epoch(&s)?.is_none()); @@ -924,7 +930,7 @@ mod test { #[test] fn test_epoched_without_data_trimming() -> storage_api::Result<()> { - let mut s = TestWlStorage::default(); + let mut s = init_storage()?; let key_prefix = storage::Key::parse("test").unwrap(); let epoched = @@ -992,11 +998,11 @@ mod test { #[test] fn test_epoched_delta_data_trimming() -> storage_api::Result<()> { - let mut s = TestWlStorage::default(); + let mut s = init_storage()?; let key_prefix = storage::Key::parse("test").unwrap(); let epoched = - EpochedDelta::::open( + EpochedDelta::::open( key_prefix, ); let data_handler = epoched.get_data_handler(); @@ -1065,7 +1071,7 @@ mod test { #[test] fn test_epoched_delta_without_data_trimming() -> storage_api::Result<()> { - let mut s = TestWlStorage::default(); + let mut s = init_storage()?; // Nothing should ever get trimmed let key_prefix = storage::Key::parse("test").unwrap(); @@ -1133,4 +1139,25 @@ mod test { Ok(()) } + + fn init_storage() -> storage_api::Result { + let mut s = TestWlStorage::default(); + crate::init_genesis( + &mut s, + &PosParams::default(), + [GenesisValidator { + address: established_address_1(), + tokens: token::Amount::native_whole(1_000), + consensus_key: key::testing::keypair_1().to_public(), + eth_hot_key: key::testing::keypair_3().to_public(), + eth_cold_key: key::testing::keypair_3().to_public(), + commission_rate: Dec::new(1, 1).expect("Dec creation failed"), + max_commission_rate_change: Dec::new(1, 1) + .expect("Dec creation failed"), + }] + .into_iter(), + Epoch::default(), + )?; + Ok(s) + } } From d3f1eb3dfe3be49087bd4fb5b765402e5c7748c1 Mon Sep 17 00:00:00 2001 From: brentstone Date: Thu, 28 Sep 2023 09:24:19 -0600 Subject: [PATCH 018/161] changelog: add #1943 --- .../improvements/1943-refactor-past-epoch-offsets.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/unreleased/improvements/1943-refactor-past-epoch-offsets.md diff --git a/.changelog/unreleased/improvements/1943-refactor-past-epoch-offsets.md b/.changelog/unreleased/improvements/1943-refactor-past-epoch-offsets.md new file mode 100644 index 0000000000..958b40760d --- /dev/null +++ b/.changelog/unreleased/improvements/1943-refactor-past-epoch-offsets.md @@ -0,0 +1,3 @@ +- Improve the Epoched data structure's bookkeeping of past + epochs, now parameterizable by PoS and governance params. + ([\#1943](https://github.com/anoma/namada/pull/1943)) \ No newline at end of file From 812b2c2bd782fe015e91873b8548219f233ee01f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 28 Sep 2023 16:22:37 +0000 Subject: [PATCH 019/161] [ci] wasm checksums update --- wasm/checksums.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/wasm/checksums.json b/wasm/checksums.json index 614ab78e6b..8ebfc20f2b 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,20 +1,20 @@ { - "tx_bond.wasm": "tx_bond.b322054eef9d45e299384b2a363049ce0b0160a0c4781ca357aa59970904726c.wasm", + "tx_bond.wasm": "tx_bond.126ab38555f9dfbfb689f9774a7a028f7ffc226bd43fef118a3f32261c164fea.wasm", "tx_bridge_pool.wasm": "tx_bridge_pool.6f6ad3b95e21072af9e854e374fa0d7f691f0743da8cf52a643ed1bdb0e16611.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.9310e0a0b7c14fc7c2427040da8c91eb4067babfaaea9e3b646edbfdd09c8069.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.a75d583d6ccbd956ef9c9e85010bba75b4bf63a15a67b55dfb159be35cb5c142.wasm", "tx_ibc.wasm": "tx_ibc.54313469bcc9bcaabf661177f88cb90ac9008f542edbf686f286a02f8cdbfd41.wasm", "tx_init_account.wasm": "tx_init_account.10ee01dac5325685360119ba8e4b597d776a018ea4c9ac3534dd876ec377789e.wasm", "tx_init_proposal.wasm": "tx_init_proposal.04cad5a3a71f833a5867bca3ced54b06d34ad07f3f21877599d38581d362ba10.wasm", - "tx_init_validator.wasm": "tx_init_validator.16d53a09e5df06400849aaa161c35e4e377284692f73a71dcbd4573656da7f64.wasm", + "tx_init_validator.wasm": "tx_init_validator.964c9449ffe0fc41649decd283c905c7cd3665127274444fafc6f1347364a61e.wasm", "tx_resign_steward.wasm": "tx_resign_steward.b5d92c1bd196be0d196ef16e2ceed9a9ced7ac61d7b177fdbad208c0e784e172.wasm", "tx_reveal_pk.wasm": "tx_reveal_pk.32011ddc5316705ae005059d5916b071288a04fb4dee80854af16d61548b5c27.wasm", "tx_transfer.wasm": "tx_transfer.963ec4c2705377423ddc46b4ff3de63f9b625351467d89290fa771a485710c41.wasm", - "tx_unbond.wasm": "tx_unbond.7f26336db8e8cfebc04d301dc4790138fdd9bc22878fe7542c3da525a09576be.wasm", - "tx_unjail_validator.wasm": "tx_unjail_validator.15a7a399d8fb79f8df959d0ddf4c193020886d1caab1e094cca10ea3aff44a72.wasm", + "tx_unbond.wasm": "tx_unbond.63baa912938704817d2b23b4f889ffa0f40ea38ca1ce1990ed08999a717998d2.wasm", + "tx_unjail_validator.wasm": "tx_unjail_validator.03bcdf9d8f4ff06b87e9eb6207b709b8b99fac4035737dbfc70881fc5810e0c0.wasm", "tx_update_account.wasm": "tx_update_account.7b4e225a823449d3d8bffde197c439ad24f4f6c95cf754acf62b6373958c4486.wasm", "tx_update_steward_commission.wasm": "tx_update_steward_commission.0001b21ef3ef4f9b33afb5a5ef75a6a5427fbe221a8350cfbd81781ac18ded6e.wasm", "tx_vote_proposal.wasm": "tx_vote_proposal.727e36112fcd0753f758370dff981cc93430fe7d6f95ceb570a02a37529a7531.wasm", - "tx_withdraw.wasm": "tx_withdraw.e70485a8b79c5bff17d3b6ea96a7546cb709137c8a64606bdd1e77637157de33.wasm", + "tx_withdraw.wasm": "tx_withdraw.311993b9362f1a66acf002f15fbd9599115e41d9c5b4e1b0fa565335fae147cd.wasm", "vp_implicit.wasm": "vp_implicit.e0958c2ec06863f7bd48cd9abb67cc7557f956ce9fa6c714deba885db721fa50.wasm", "vp_masp.wasm": "vp_masp.037671b60b3e9f312c1c5fdc53d040ebfad21a646b9b1e2dac6b3e20fc0d01ec.wasm", "vp_user.wasm": "vp_user.0203fddde57bc31ef411370b628963486928a7c4d34614980d1a52616e0f617b.wasm", From bfd91f932e98a1c506cad208081a856cf9f17016 Mon Sep 17 00:00:00 2001 From: brentstone Date: Tue, 26 Sep 2023 17:30:14 -0600 Subject: [PATCH 020/161] WIP want to pass `max_proposal_period` from gov params into PoS --- proof_of_stake/src/epoched.rs | 50 +++++++++++++++++--------------- proof_of_stake/src/lib.rs | 19 ++++++++++-- proof_of_stake/src/parameters.rs | 10 +++++++ 3 files changed, 54 insertions(+), 25 deletions(-) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 92b336d960..2b649a53a1 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -15,8 +15,8 @@ use namada_core::ledger::storage_api::collections::{self, LazyCollection}; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::types::storage::{self, Epoch}; -use crate::parameters::PosParams; -use crate::read_pos_params; +use crate::parameters::{PosAndGovParams, PosParams}; +use crate::read_pos_and_gov_params; /// Sub-key holding a lazy map in storage pub const LAZY_MAP_SUB_KEY: &str = "lazy_map"; @@ -95,7 +95,7 @@ where &self, storage: &S, epoch: Epoch, - params: &PosParams, + params: &PosAndGovParams, ) -> storage_api::Result> where S: StorageRead, @@ -142,7 +142,7 @@ where where S: StorageWrite + StorageRead, { - let params = read_pos_params(storage)?; + let params = read_pos_and_gov_params(storage)?; self.update_data(storage, ¶ms, current_epoch)?; self.set_at_epoch(storage, value, current_epoch, offset) } @@ -171,7 +171,7 @@ where fn update_data( &self, storage: &mut S, - params: &PosParams, + params: &PosAndGovParams, current_epoch: Epoch, ) -> storage_api::Result<()> where @@ -265,7 +265,7 @@ where LazyMap::open(key) } - fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { + fn sub_past_epochs(params: &PosAndGovParams, epoch: Epoch) -> Epoch { Epoch( epoch .0 @@ -426,7 +426,7 @@ where &self, storage: &S, epoch: Epoch, - params: &PosParams, + params: &PosAndGovParams, ) -> storage_api::Result> where S: StorageRead, @@ -472,7 +472,7 @@ where where S: StorageWrite + StorageRead, { - let params = read_pos_params(storage)?; + let params = read_pos_and_gov_params(storage)?; self.update_data(storage, ¶ms, current_epoch)?; self.set_at_epoch(storage, value, current_epoch, offset) } @@ -499,7 +499,7 @@ where fn update_data( &self, storage: &mut S, - params: &PosParams, + params: &PosAndGovParams, current_epoch: Epoch, ) -> storage_api::Result<()> where @@ -613,7 +613,7 @@ where handle.iter(storage)?.collect() } - fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { + fn sub_past_epochs(params: &PosAndGovParams, epoch: Epoch) -> Epoch { Epoch( epoch .0 @@ -666,7 +666,7 @@ where )] pub struct OffsetZero; impl EpochOffset for OffsetZero { - fn value(_params: &PosParams) -> u64 { + fn value(_params: &PosAndGovParams) -> u64 { 0 } @@ -689,7 +689,7 @@ impl EpochOffset for OffsetZero { )] pub struct OffsetDefaultNumPastEpochs; impl EpochOffset for OffsetDefaultNumPastEpochs { - fn value(_params: &PosParams) -> u64 { + fn value(_params: &PosAndGovParams) -> u64 { DEFAULT_NUM_PAST_EPOCHS } @@ -712,8 +712,8 @@ impl EpochOffset for OffsetDefaultNumPastEpochs { )] pub struct OffsetPipelineLen; impl EpochOffset for OffsetPipelineLen { - fn value(params: &PosParams) -> u64 { - params.pipeline_len + fn value(params: &PosAndGovParams) -> u64 { + params.pos_params.pipeline_len } fn dyn_offset() -> DynEpochOffset { @@ -735,8 +735,8 @@ impl EpochOffset for OffsetPipelineLen { )] pub struct OffsetUnbondingLen; impl EpochOffset for OffsetUnbondingLen { - fn value(params: &PosParams) -> u64 { - params.unbonding_len + fn value(params: &PosAndGovParams) -> u64 { + params.pos_params.unbonding_len } fn dyn_offset() -> DynEpochOffset { @@ -758,8 +758,8 @@ impl EpochOffset for OffsetUnbondingLen { )] pub struct OffsetPipelinePlusUnbondingLen; impl EpochOffset for OffsetPipelinePlusUnbondingLen { - fn value(params: &PosParams) -> u64 { - params.pipeline_len + params.unbonding_len + fn value(params: &PosAndGovParams) -> u64 { + params.pos_params.pipeline_len + params.pos_params.unbonding_len } fn dyn_offset() -> DynEpochOffset { @@ -781,8 +781,8 @@ impl EpochOffset for OffsetPipelinePlusUnbondingLen { )] pub struct OffsetSlashProcessingLen; impl EpochOffset for OffsetSlashProcessingLen { - fn value(params: &PosParams) -> u64 { - params.slash_processing_epoch_offset() + fn value(params: &PosAndGovParams) -> u64 { + params.pos_params.slash_processing_epoch_offset() } fn dyn_offset() -> DynEpochOffset { @@ -804,7 +804,7 @@ impl EpochOffset for OffsetSlashProcessingLen { )] pub struct OffsetMaxU64; impl EpochOffset for OffsetMaxU64 { - fn value(_params: &PosParams) -> u64 { + fn value(_params: &PosAndGovParams) -> u64 { u64::MAX } @@ -831,6 +831,10 @@ pub enum DynEpochOffset { /// Offset at slash processing delay (unbonding + /// cubic_slashing_window + 1). SlashProcessingLen, + /// Offset at the max proposal period + MaxProposalPeriod, + /// Offset at the larger of max proposal period or slash processing delay + MaxProposalPeriodOrSlashProcessingLen, /// Offset of the max u64 value MaxU64, } @@ -840,8 +844,8 @@ pub enum DynEpochOffset { pub trait EpochOffset: Debug + Clone + BorshDeserialize + BorshSerialize + BorshSchema { - /// Find the value of a given offset from PoS parameters. - fn value(params: &PosParams) -> u64; + /// Find the value of a given offset from PoS and Gov parameters. + fn value(params: &PosAndGovParams) -> u64; /// Convert to [`DynEpochOffset`] fn dyn_offset() -> DynEpochOffset; } diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 0fbbf2231b..54d49c65e3 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -36,7 +36,7 @@ use namada_core::ledger::storage_api::collections::lazy_map::{ use namada_core::ledger::storage_api::collections::{LazyCollection, LazySet}; use namada_core::ledger::storage_api::token::credit_tokens; use namada_core::ledger::storage_api::{ - self, ResultExt, StorageRead, StorageWrite, + self, governance, ResultExt, StorageRead, StorageWrite, }; use namada_core::types::address::{Address, InternalAddress}; use namada_core::types::dec::Dec; @@ -46,7 +46,7 @@ use namada_core::types::key::{ pub use namada_core::types::storage::{Epoch, Key, KeySeg}; use namada_core::types::token; use once_cell::unsync::Lazy; -use parameters::PosParams; +use parameters::{PosAndGovParams, PosParams}; use rewards::PosRewardsCalculator; use storage::{ bonds_for_source_prefix, bonds_prefix, consensus_keys_key, @@ -512,6 +512,21 @@ where Ok(()) } +/// Read PoS and Governance parameters +pub fn read_pos_and_gov_params( + storage: &S, +) -> storage_api::Result +where + S: StorageRead, +{ + let gov_params = governance::get_parameters(storage)?; + let pos_params = read_pos_params(storage)?; + Ok(PosAndGovParams { + pos_params, + gov_params, + }) +} + /// Read PoS parameters pub fn read_pos_params(storage: &S) -> storage_api::Result where diff --git a/proof_of_stake/src/parameters.rs b/proof_of_stake/src/parameters.rs index 8501aff379..87ec199f28 100644 --- a/proof_of_stake/src/parameters.rs +++ b/proof_of_stake/src/parameters.rs @@ -1,6 +1,7 @@ //! Proof-of-Stake system parameters use borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_core::types::dec::Dec; use namada_core::types::storage::Epoch; use namada_core::types::token; @@ -74,6 +75,15 @@ impl Default for PosParams { } } +/// A struct to hold both PoS and governance parameters +#[derive(Debug, Clone)] +pub struct PosAndGovParams { + /// Pos parameters + pub pos_params: PosParams, + /// Governance parameters + pub gov_params: GovernanceParameters, +} + #[allow(missing_docs)] #[derive(Error, Debug)] pub enum ValidationError { From 053fffbd08fabb5f25398fcb445c611aaebba2d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 27 Sep 2023 17:30:59 +0100 Subject: [PATCH 021/161] add PosParams type with added gov param --- apps/src/lib/config/genesis.rs | 10 +- .../lib/node/ledger/shell/finalize_block.rs | 10 +- apps/src/lib/node/ledger/shell/init_chain.rs | 4 +- .../shell/vote_extensions/bridge_pool_vext.rs | 2 +- core/src/ledger/storage_api/governance.rs | 15 +- .../transactions/bridge_pool_roots.rs | 6 +- .../src/protocol/transactions/votes.rs | 6 +- .../src/protocol/transactions/votes/update.rs | 1 + ethereum_bridge/src/test_utils.rs | 7 +- proof_of_stake/src/epoched.rs | 44 ++-- proof_of_stake/src/lib.rs | 65 ++++-- proof_of_stake/src/parameters.rs | 59 +++-- proof_of_stake/src/tests.rs | 214 +++++++++--------- proof_of_stake/src/tests/state_machine.rs | 5 +- shared/src/ledger/native_vp/ibc/mod.rs | 3 + shared/src/ledger/pos/mod.rs | 4 +- tests/src/native_vp/pos.rs | 16 +- tests/src/vm_host_env/ibc.rs | 3 + wasm/wasm_source/src/tx_bond.rs | 9 +- .../src/tx_change_validator_commission.rs | 7 +- wasm/wasm_source/src/tx_unbond.rs | 9 +- wasm/wasm_source/src/tx_withdraw.rs | 9 +- wasm/wasm_source/src/vp_validator.rs | 6 +- 23 files changed, 298 insertions(+), 216 deletions(-) diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index b7281fd4ce..502082af48 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -8,7 +8,7 @@ use namada::core::ledger::governance::parameters::GovernanceParameters; use namada::core::ledger::pgf::parameters::PgfParameters; use namada::ledger::eth_bridge::EthereumBridgeConfig; use namada::ledger::parameters::EpochDuration; -use namada::ledger::pos::{Dec, GenesisValidator, PosParams}; +use namada::ledger::pos::{Dec, GenesisValidator, OwnedPosParams}; use namada::types::address::Address; use namada::types::chain::ProposalBytes; use namada::types::key::dkg_session_keys::DkgPublicKey; @@ -31,7 +31,7 @@ pub mod genesis_config { use namada::core::ledger::governance::parameters::GovernanceParameters; use namada::core::ledger::pgf::parameters::PgfParameters; use namada::ledger::parameters::EpochDuration; - use namada::ledger::pos::{Dec, GenesisValidator, PosParams}; + use namada::ledger::pos::{Dec, GenesisValidator, OwnedPosParams}; use namada::types::address::Address; use namada::types::chain::ProposalBytes; use namada::types::key::dkg_session_keys::DkgPublicKey; @@ -665,7 +665,7 @@ pub mod genesis_config { validator_stake_threshold, } = pos_params; - let pos_params = PosParams { + let pos_params = OwnedPosParams { max_validator_slots, pipeline_len, unbonding_len, @@ -738,7 +738,7 @@ pub struct Genesis { pub established_accounts: Vec, pub implicit_accounts: Vec, pub parameters: Parameters, - pub pos_params: PosParams, + pub pos_params: OwnedPosParams, pub gov_params: GovernanceParameters, pub pgf_params: PgfParameters, // Ethereum bridge config @@ -1109,7 +1109,7 @@ pub fn genesis(num_validators: u64) -> Genesis { implicit_accounts, token_accounts, parameters, - pos_params: PosParams::default(), + pos_params: OwnedPosParams::default(), gov_params: GovernanceParameters::default(), pgf_params: PgfParameters::default(), ethereum_bridge_params: Some(EthereumBridgeConfig { diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..2f8f970210 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -2653,8 +2653,8 @@ mod test_finalize_block { num_validators, }); let mut params = read_pos_params(&shell.wl_storage).unwrap(); - params.unbonding_len = 4; - write_pos_params(&mut shell.wl_storage, params.clone())?; + params.owned.unbonding_len = 4; + write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; let validator_set: Vec = read_consensus_validator_set_addresses_with_stake( @@ -3031,9 +3031,9 @@ mod test_finalize_block { num_validators, }); let mut params = read_pos_params(&shell.wl_storage).unwrap(); - params.unbonding_len = 4; - params.max_validator_slots = 4; - write_pos_params(&mut shell.wl_storage, params.clone())?; + params.owned.unbonding_len = 4; + params.owned.max_validator_slots = 4; + write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; // Slash pool balance let nam_address = shell.wl_storage.storage.native_token.clone(); diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index d6b2efe4dd..4d7522aaba 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -4,7 +4,7 @@ use std::hash::Hash; use namada::ledger::eth_bridge::EthBridgeStatus; use namada::ledger::parameters::{self, Parameters}; -use namada::ledger::pos::{staking_token_address, PosParams}; +use namada::ledger::pos::{staking_token_address, OwnedPosParams}; use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::{DBIter, DB}; use namada::ledger::storage_api::token::{ @@ -412,7 +412,7 @@ where &mut self, staking_token: &Address, validators: Vec, - pos_params: &PosParams, + pos_params: &OwnedPosParams, ) -> Result { let mut response = response::InitChain::default(); // PoS system depends on epoch being initialized. Write the total diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs index 002bd18904..880409bef8 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs @@ -336,7 +336,7 @@ mod test_bp_vote_extensions { // change pipeline length to 1 let mut params = shell.wl_storage.pos_queries().get_pos_params(); - params.pipeline_len = 1; + params.owned.pipeline_len = 1; let consensus_key = gen_keypair(); let hot_key = gen_secp256k1_keypair(); diff --git a/core/src/ledger/storage_api/governance.rs b/core/src/ledger/storage_api/governance.rs index e03be937b5..e5b27a5d02 100644 --- a/core/src/ledger/storage_api/governance.rs +++ b/core/src/ledger/storage_api/governance.rs @@ -245,9 +245,7 @@ where let min_proposal_voting_period: u64 = storage.read(&key)?.expect("Parameter should be definied."); - let key = governance_keys::get_max_proposal_period_key(); - let max_proposal_period: u64 = - storage.read(&key)?.expect("Parameter should be definied."); + let max_proposal_period: u64 = get_max_proposal_period(storage)?; Ok(GovernanceParameters { min_proposal_fund, @@ -258,3 +256,14 @@ where min_proposal_grace_epochs, }) } + +/// Get governance "max_proposal_period" parameter +pub fn get_max_proposal_period(storage: &S) -> storage_api::Result +where + S: storage_api::StorageRead, +{ + let key = governance_keys::get_max_proposal_period_key(); + let max_proposal_period: u64 = + storage.read(&key)?.expect("Parameter should be definied."); + Ok(max_proposal_period) +} diff --git a/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs b/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs index 3271efeed5..b8a9d22303 100644 --- a/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs +++ b/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs @@ -235,7 +235,7 @@ mod test_apply_bp_roots_to_storage { use namada_core::types::storage::Key; use namada_core::types::vote_extensions::bridge_pool_roots; use namada_core::types::voting_power::FractionalVotingPower; - use namada_proof_of_stake::parameters::PosParams; + use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::write_pos_params; use super::*; @@ -757,11 +757,11 @@ mod test_apply_bp_roots_to_storage { ); // update the pos params - let params = PosParams { + let params = OwnedPosParams { pipeline_len: 1, ..Default::default() }; - write_pos_params(&mut wl_storage, params).expect("Test failed"); + write_pos_params(&mut wl_storage, ¶ms).expect("Test failed"); // insert validators 2 and 3 at epoch 1 test_utils::append_validators_to_storage( diff --git a/ethereum_bridge/src/protocol/transactions/votes.rs b/ethereum_bridge/src/protocol/transactions/votes.rs index c3a82bd370..cc029e28f5 100644 --- a/ethereum_bridge/src/protocol/transactions/votes.rs +++ b/ethereum_bridge/src/protocol/transactions/votes.rs @@ -189,7 +189,7 @@ mod tests { use namada_core::types::storage::BlockHeight; use namada_core::types::{address, token}; - use namada_proof_of_stake::parameters::PosParams; + use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::write_pos_params; use super::*; @@ -321,11 +321,11 @@ mod tests { ); // update the pos params - let params = PosParams { + let params = OwnedPosParams { pipeline_len: 1, ..Default::default() }; - write_pos_params(&mut wl_storage, params).expect("Test failed"); + write_pos_params(&mut wl_storage, ¶ms).expect("Test failed"); // insert validators 2 and 3 at epoch 1 test_utils::append_validators_to_storage( diff --git a/ethereum_bridge/src/protocol/transactions/votes/update.rs b/ethereum_bridge/src/protocol/transactions/votes/update.rs index 928bb4d7f8..a98be1859d 100644 --- a/ethereum_bridge/src/protocol/transactions/votes/update.rs +++ b/ethereum_bridge/src/protocol/transactions/votes/update.rs @@ -263,6 +263,7 @@ mod tests { votes, total_stake, } = self; + let keys = vote_tallies::Keys::from(event); let seen_voting_power: token::Amount = votes .iter() diff --git a/ethereum_bridge/src/test_utils.rs b/ethereum_bridge/src/test_utils.rs index 9c24e9edfa..8091566f10 100644 --- a/ethereum_bridge/src/test_utils.rs +++ b/ethereum_bridge/src/test_utils.rs @@ -6,6 +6,7 @@ use std::num::NonZeroU64; use borsh::BorshSerialize; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; use namada_core::ledger::eth_bridge::storage::whitelist; +use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_core::ledger::storage::mockdb::MockDBWriteBatch; use namada_core::ledger::storage::testing::{TestStorage, TestWlStorage}; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; @@ -16,7 +17,7 @@ use namada_core::types::keccak::KeccakHash; use namada_core::types::key::{self, protocol_pk_key, RefTo}; use namada_core::types::storage::{BlockHeight, Key}; use namada_core::types::token; -use namada_proof_of_stake::parameters::PosParams; +use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::pos_queries::PosQueries; use namada_proof_of_stake::types::GenesisValidator; use namada_proof_of_stake::{ @@ -212,9 +213,11 @@ pub fn init_storage_with_validators( }) .collect(); + let gov_params = GovernanceParameters::default(); + gov_params.init_storage(wl_storage).unwrap(); namada_proof_of_stake::init_genesis( wl_storage, - &PosParams::default(), + &OwnedPosParams::default(), validators.into_iter(), 0.into(), ) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 2b649a53a1..5c119fee3c 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -15,8 +15,8 @@ use namada_core::ledger::storage_api::collections::{self, LazyCollection}; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::types::storage::{self, Epoch}; -use crate::parameters::{PosAndGovParams, PosParams}; -use crate::read_pos_and_gov_params; +use crate::parameters::PosParams; +use crate::read_pos_params; /// Sub-key holding a lazy map in storage pub const LAZY_MAP_SUB_KEY: &str = "lazy_map"; @@ -95,7 +95,7 @@ where &self, storage: &S, epoch: Epoch, - params: &PosAndGovParams, + params: &PosParams, ) -> storage_api::Result> where S: StorageRead, @@ -142,7 +142,7 @@ where where S: StorageWrite + StorageRead, { - let params = read_pos_and_gov_params(storage)?; + let params = read_pos_params(storage)?; self.update_data(storage, ¶ms, current_epoch)?; self.set_at_epoch(storage, value, current_epoch, offset) } @@ -171,7 +171,7 @@ where fn update_data( &self, storage: &mut S, - params: &PosAndGovParams, + params: &PosParams, current_epoch: Epoch, ) -> storage_api::Result<()> where @@ -265,7 +265,7 @@ where LazyMap::open(key) } - fn sub_past_epochs(params: &PosAndGovParams, epoch: Epoch) -> Epoch { + fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { Epoch( epoch .0 @@ -426,7 +426,7 @@ where &self, storage: &S, epoch: Epoch, - params: &PosAndGovParams, + params: &PosParams, ) -> storage_api::Result> where S: StorageRead, @@ -472,7 +472,7 @@ where where S: StorageWrite + StorageRead, { - let params = read_pos_and_gov_params(storage)?; + let params = read_pos_params(storage)?; self.update_data(storage, ¶ms, current_epoch)?; self.set_at_epoch(storage, value, current_epoch, offset) } @@ -499,7 +499,7 @@ where fn update_data( &self, storage: &mut S, - params: &PosAndGovParams, + params: &PosParams, current_epoch: Epoch, ) -> storage_api::Result<()> where @@ -613,7 +613,7 @@ where handle.iter(storage)?.collect() } - fn sub_past_epochs(params: &PosAndGovParams, epoch: Epoch) -> Epoch { + fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { Epoch( epoch .0 @@ -666,7 +666,7 @@ where )] pub struct OffsetZero; impl EpochOffset for OffsetZero { - fn value(_params: &PosAndGovParams) -> u64 { + fn value(_params: &PosParams) -> u64 { 0 } @@ -689,7 +689,7 @@ impl EpochOffset for OffsetZero { )] pub struct OffsetDefaultNumPastEpochs; impl EpochOffset for OffsetDefaultNumPastEpochs { - fn value(_params: &PosAndGovParams) -> u64 { + fn value(_params: &PosParams) -> u64 { DEFAULT_NUM_PAST_EPOCHS } @@ -712,8 +712,8 @@ impl EpochOffset for OffsetDefaultNumPastEpochs { )] pub struct OffsetPipelineLen; impl EpochOffset for OffsetPipelineLen { - fn value(params: &PosAndGovParams) -> u64 { - params.pos_params.pipeline_len + fn value(params: &PosParams) -> u64 { + params.pipeline_len } fn dyn_offset() -> DynEpochOffset { @@ -735,8 +735,8 @@ impl EpochOffset for OffsetPipelineLen { )] pub struct OffsetUnbondingLen; impl EpochOffset for OffsetUnbondingLen { - fn value(params: &PosAndGovParams) -> u64 { - params.pos_params.unbonding_len + fn value(params: &PosParams) -> u64 { + params.unbonding_len } fn dyn_offset() -> DynEpochOffset { @@ -758,8 +758,8 @@ impl EpochOffset for OffsetUnbondingLen { )] pub struct OffsetPipelinePlusUnbondingLen; impl EpochOffset for OffsetPipelinePlusUnbondingLen { - fn value(params: &PosAndGovParams) -> u64 { - params.pos_params.pipeline_len + params.pos_params.unbonding_len + fn value(params: &PosParams) -> u64 { + params.pipeline_len + params.unbonding_len } fn dyn_offset() -> DynEpochOffset { @@ -781,8 +781,8 @@ impl EpochOffset for OffsetPipelinePlusUnbondingLen { )] pub struct OffsetSlashProcessingLen; impl EpochOffset for OffsetSlashProcessingLen { - fn value(params: &PosAndGovParams) -> u64 { - params.pos_params.slash_processing_epoch_offset() + fn value(params: &PosParams) -> u64 { + params.slash_processing_epoch_offset() } fn dyn_offset() -> DynEpochOffset { @@ -804,7 +804,7 @@ impl EpochOffset for OffsetSlashProcessingLen { )] pub struct OffsetMaxU64; impl EpochOffset for OffsetMaxU64 { - fn value(_params: &PosAndGovParams) -> u64 { + fn value(_params: &PosParams) -> u64 { u64::MAX } @@ -845,7 +845,7 @@ pub trait EpochOffset: Debug + Clone + BorshDeserialize + BorshSerialize + BorshSchema { /// Find the value of a given offset from PoS and Gov parameters. - fn value(params: &PosAndGovParams) -> u64; + fn value(params: &PosParams) -> u64; /// Convert to [`DynEpochOffset`] fn dyn_offset() -> DynEpochOffset; } diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 54d49c65e3..3645a84d05 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -46,7 +46,7 @@ use namada_core::types::key::{ pub use namada_core::types::storage::{Epoch, Key, KeySeg}; use namada_core::types::token; use once_cell::unsync::Lazy; -use parameters::{PosAndGovParams, PosParams}; +use parameters::{OwnedPosParams, PosParams}; use rewards::PosRewardsCalculator; use storage::{ bonds_for_source_prefix, bonds_prefix, consensus_keys_key, @@ -394,10 +394,10 @@ pub fn delegator_rewards_products_handle( RewardsProducts::open(key) } -/// Init genesis +/// Init genesis. Requires that the governance parameters are initialized. pub fn init_genesis( storage: &mut S, - params: &PosParams, + params: &OwnedPosParams, validators: impl Iterator + Clone, current_epoch: namada_core::types::storage::Epoch, ) -> storage_api::Result<()> @@ -405,7 +405,8 @@ where S: StorageRead + StorageWrite, { tracing::debug!("Initializing PoS genesis"); - write_pos_params(storage, params.clone())?; + write_pos_params(storage, params)?; + let params = read_non_pos_owned_params(storage, params.clone())?; let mut total_bonded = token::Amount::default(); consensus_validator_set_handle().init(storage, current_epoch)?; @@ -433,7 +434,7 @@ where // validator data insert_validator_into_validator_set( storage, - params, + ¶ms, &address, tokens, current_epoch, @@ -512,36 +513,38 @@ where Ok(()) } -/// Read PoS and Governance parameters -pub fn read_pos_and_gov_params( - storage: &S, -) -> storage_api::Result +/// Read PoS parameters +pub fn read_pos_params(storage: &S) -> storage_api::Result where S: StorageRead, { - let gov_params = governance::get_parameters(storage)?; - let pos_params = read_pos_params(storage)?; - Ok(PosAndGovParams { - pos_params, - gov_params, - }) + let params = storage + .read(¶ms_key()) + .transpose() + .expect("PosParams should always exist in storage after genesis")?; + read_non_pos_owned_params(storage, params) } -/// Read PoS parameters -pub fn read_pos_params(storage: &S) -> storage_api::Result +/// Read non-PoS-owned parameters to add them to `OwnedPosParams` to construct +/// `PosParams`. +pub fn read_non_pos_owned_params( + storage: &S, + owned: OwnedPosParams, +) -> storage_api::Result where S: StorageRead, { - storage - .read(¶ms_key()) - .transpose() - .expect("PosParams should always exist in storage after genesis") + let max_proposal_period = governance::get_max_proposal_period(storage)?; + Ok(PosParams { + owned, + max_proposal_period, + }) } /// Write PoS parameters pub fn write_pos_params( storage: &mut S, - params: PosParams, + params: &OwnedPosParams, ) -> storage_api::Result<()> where S: StorageRead + StorageWrite, @@ -4057,3 +4060,21 @@ where } Ok(slashes) } + +/// Init PoS genesis wrapper helper that also initializes gov params that are +/// used in PoS with default values. +#[cfg(feature = "testing")] +pub fn test_init_genesis( + storage: &mut S, + owned: OwnedPosParams, + validators: impl Iterator + Clone, + current_epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result +where + S: StorageRead + StorageWrite, +{ + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(storage)?; + crate::init_genesis(storage, &owned, validators, current_epoch)?; + crate::read_non_pos_owned_params(storage, owned) +} diff --git a/proof_of_stake/src/parameters.rs b/proof_of_stake/src/parameters.rs index 87ec199f28..0776b2791b 100644 --- a/proof_of_stake/src/parameters.rs +++ b/proof_of_stake/src/parameters.rs @@ -8,10 +8,21 @@ use namada_core::types::token; use namada_core::types::uint::Uint; use thiserror::Error; -/// Proof-of-Stake system parameters, set at genesis and can only be changed via -/// governance +/// Proof-of-Stake system parameters. This includes parameters that are used in +/// PoS but are read from other accounts storage (governance). #[derive(Debug, Clone, BorshDeserialize, BorshSerialize)] pub struct PosParams { + /// PoS-owned params + pub owned: OwnedPosParams, + /// Governance param - Maximum proposal voting period in epochs. + /// This param is stored in governance. + pub max_proposal_period: u64, +} + +/// Proof-of-Stake system parameters owned by the PoS address, set at genesis +/// and can only be changed via governance +#[derive(Debug, Clone, BorshDeserialize, BorshSerialize)] +pub struct OwnedPosParams { /// A maximum number of consensus validators pub max_validator_slots: u64, /// Any change applied during an epoch `n` will become active at the @@ -50,6 +61,17 @@ pub struct PosParams { } impl Default for PosParams { + fn default() -> Self { + let owned = OwnedPosParams::default(); + let gov = GovernanceParameters::default(); + Self { + owned, + max_proposal_period: gov.max_proposal_period, + } + } +} + +impl Default for OwnedPosParams { fn default() -> Self { Self { max_validator_slots: 100, @@ -75,15 +97,6 @@ impl Default for PosParams { } } -/// A struct to hold both PoS and governance parameters -#[derive(Debug, Clone)] -pub struct PosAndGovParams { - /// Pos parameters - pub pos_params: PosParams, - /// Governance parameters - pub gov_params: GovernanceParameters, -} - #[allow(missing_docs)] #[derive(Error, Debug)] pub enum ValidationError { @@ -112,7 +125,7 @@ const MAX_TOTAL_VOTING_POWER: i64 = i64::MAX / 8; /// Assuming token amount is `u64` in micro units. const TOKEN_MAX_AMOUNT: u64 = u64::MAX / TOKENS_PER_NAM; -impl PosParams { +impl OwnedPosParams { /// Validate PoS parameters values. Returns an empty list if the values are /// valid. #[must_use] @@ -183,6 +196,24 @@ impl PosParams { let end = infraction_epoch + self.cubic_slashing_window_length; (start, end) } + + /// A test helper to add the default gov params to PoS params. + #[cfg(any(test, feature = "testing"))] + pub fn with_default_gov_params(self) -> PosParams { + let gov = GovernanceParameters::default(); + PosParams { + owned: self, + max_proposal_period: gov.max_proposal_period, + } + } +} + +impl std::ops::Deref for PosParams { + type Target = OwnedPosParams; + + fn deref(&self) -> &Self::Target { + &self.owned + } } #[cfg(test)] @@ -224,8 +255,8 @@ pub mod testing { unbonding_len in pipeline_len + 1..pipeline_len + 8, pipeline_len in Just(pipeline_len), tm_votes_per_token in 1..10_001_i128) - -> PosParams { - PosParams { + -> OwnedPosParams { + OwnedPosParams { max_validator_slots, pipeline_len, unbonding_len, diff --git a/proof_of_stake/src/tests.rs b/proof_of_stake/src/tests.rs index b7463c8ea5..4465bdc687 100644 --- a/proof_of_stake/src/tests.rs +++ b/proof_of_stake/src/tests.rs @@ -30,7 +30,7 @@ use proptest::test_runner::Config; use test_log::test; use crate::parameters::testing::arb_pos_params; -use crate::parameters::PosParams; +use crate::parameters::{OwnedPosParams, PosParams}; use crate::types::{ into_tm_voting_power, BondDetails, BondId, BondsAndUnbondsDetails, ConsensusValidator, GenesisValidator, Position, ReverseOrdTokenAmount, @@ -41,36 +41,35 @@ use crate::{ become_validator, below_capacity_validator_set_handle, bond_handle, bond_tokens, bonds_and_unbonds, consensus_validator_set_handle, copy_validator_sets_and_positions, find_validator_by_raw_hash, - get_num_consensus_validators, init_genesis, - insert_validator_into_validator_set, is_validator, process_slashes, - purge_validator_sets_for_old_epoch, + get_num_consensus_validators, insert_validator_into_validator_set, + is_validator, process_slashes, purge_validator_sets_for_old_epoch, read_below_capacity_validator_set_addresses_with_stake, read_below_threshold_validator_set_addresses, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_delta_value, read_validator_stake, slash, - staking_token_address, store_total_consensus_stake, total_deltas_handle, - unbond_handle, unbond_tokens, unjail_validator, update_validator_deltas, - update_validator_set, validator_consensus_key_handle, - validator_set_positions_handle, validator_set_update_tendermint, - validator_slashes_handle, validator_state_handle, withdraw_tokens, - write_validator_address_raw_hash, BecomeValidator, - STORE_VALIDATOR_SETS_LEN, + staking_token_address, store_total_consensus_stake, test_init_genesis, + total_deltas_handle, unbond_handle, unbond_tokens, unjail_validator, + update_validator_deltas, update_validator_set, + validator_consensus_key_handle, validator_set_positions_handle, + validator_set_update_tendermint, validator_slashes_handle, + validator_state_handle, withdraw_tokens, write_validator_address_raw_hash, + BecomeValidator, STORE_VALIDATOR_SETS_LEN, }; proptest! { - // Generate arb valid input for `test_init_genesis_aux` + // Generate arb valid input for `test_test_init_genesis_aux` #![proptest_config(Config { cases: 1, .. Config::default() })] #[test] - fn test_init_genesis( + fn test_test_init_genesis( (pos_params, genesis_validators) in arb_params_and_genesis_validators(Some(5), 1..10), start_epoch in (0_u64..1000).prop_map(Epoch), ) { - test_init_genesis_aux(pos_params, start_epoch, genesis_validators) + test_test_init_genesis_aux(pos_params, start_epoch, genesis_validators) } } @@ -144,7 +143,7 @@ proptest! { fn arb_params_and_genesis_validators( num_max_validator_slots: Option, val_size: Range, -) -> impl Strategy)> { +) -> impl Strategy)> { let params = arb_pos_params(num_max_validator_slots); params.prop_flat_map(move |params| { let validators = arb_genesis_validators( @@ -156,7 +155,7 @@ fn arb_params_and_genesis_validators( } fn test_slashes_with_unbonding_params() --> impl Strategy, u64)> { +-> impl Strategy, u64)> { let params = arb_pos_params(Some(5)); params.prop_flat_map(|params| { let unbond_delay = 0..(params.slash_processing_epoch_offset() * 2); @@ -168,8 +167,8 @@ fn test_slashes_with_unbonding_params() } /// Test genesis initialization -fn test_init_genesis_aux( - params: PosParams, +fn test_test_init_genesis_aux( + params: OwnedPosParams, start_epoch: Epoch, mut validators: Vec, ) { @@ -181,8 +180,13 @@ fn test_init_genesis_aux( s.storage.block.epoch = start_epoch; validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); - init_genesis(&mut s, ¶ms, validators.clone().into_iter(), start_epoch) - .unwrap(); + let params = test_init_genesis( + &mut s, + params, + validators.clone().into_iter(), + start_epoch, + ) + .unwrap(); let mut bond_details = bonds_and_unbonds(&s, None, None).unwrap(); assert!(bond_details.iter().all(|(_id, details)| { @@ -250,7 +254,7 @@ fn test_init_genesis_aux( /// Test bonding /// NOTE: copy validator sets each time we advance the epoch -fn test_bonds_aux(params: PosParams, validators: Vec) { +fn test_bonds_aux(params: OwnedPosParams, validators: Vec) { // This can be useful for debugging: // params.pipeline_len = 2; // params.unbonding_len = 4; @@ -260,9 +264,9 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { // Genesis let start_epoch = s.storage.block.epoch; let mut current_epoch = s.storage.block.epoch; - init_genesis( + let params = test_init_genesis( &mut s, - ¶ms, + params, validators.clone().into_iter(), current_epoch, ) @@ -801,7 +805,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { /// Test validator initialization. fn test_become_validator_aux( - params: PosParams, + params: OwnedPosParams, new_validator: Address, new_validator_consensus_key: SecretKey, validators: Vec, @@ -815,9 +819,9 @@ fn test_become_validator_aux( // Genesis let mut current_epoch = dbg!(s.storage.block.epoch); - init_genesis( + let params = test_init_genesis( &mut s, - ¶ms, + params, validators.clone().into_iter(), current_epoch, ) @@ -949,7 +953,7 @@ fn test_become_validator_aux( } fn test_slashes_with_unbonding_aux( - mut params: PosParams, + mut params: OwnedPosParams, validators: Vec, unbond_delay: u64, ) { @@ -975,9 +979,9 @@ fn test_slashes_with_unbonding_aux( // Genesis // let start_epoch = s.storage.block.epoch; let mut current_epoch = s.storage.block.epoch; - init_genesis( + let params = test_init_genesis( &mut s, - ¶ms, + params, validators.clone().into_iter(), current_epoch, ) @@ -1128,7 +1132,7 @@ fn test_validator_raw_hash() { fn test_validator_sets() { let mut s = TestWlStorage::default(); // Only 3 consensus validator slots - let params = PosParams { + let params = OwnedPosParams { max_validator_slots: 3, ..Default::default() }; @@ -1145,39 +1149,6 @@ fn test_validator_sets() { res }; - // A helper to insert a non-genesis validator - let insert_validator = |s: &mut TestWlStorage, - addr, - pk: &PublicKey, - stake: token::Amount, - epoch: Epoch| { - insert_validator_into_validator_set( - s, - ¶ms, - addr, - stake, - epoch, - params.pipeline_len, - ) - .unwrap(); - - update_validator_deltas( - s, - ¶ms, - addr, - stake.change(), - epoch, - params.pipeline_len, - ) - .unwrap(); - - // Set their consensus key (needed for - // `validator_set_update_tendermint` fn) - validator_consensus_key_handle(addr) - .set(s, pk.clone(), epoch, params.pipeline_len) - .unwrap(); - }; - // Create genesis validators let ((val1, pk1), stake1) = (gen_validator(), token::Amount::native_whole(1)); @@ -1204,9 +1175,9 @@ fn test_validator_sets() { let start_epoch = Epoch::default(); let epoch = start_epoch; - init_genesis( + let params = test_init_genesis( &mut s, - ¶ms, + params, [ GenesisValidator { address: val1.clone(), @@ -1246,6 +1217,39 @@ fn test_validator_sets() { ) .unwrap(); + // A helper to insert a non-genesis validator + let insert_validator = |s: &mut TestWlStorage, + addr, + pk: &PublicKey, + stake: token::Amount, + epoch: Epoch| { + insert_validator_into_validator_set( + s, + ¶ms, + addr, + stake, + epoch, + params.pipeline_len, + ) + .unwrap(); + + update_validator_deltas( + s, + ¶ms, + addr, + stake.change(), + epoch, + params.pipeline_len, + ) + .unwrap(); + + // Set their consensus key (needed for + // `validator_set_update_tendermint` fn) + validator_consensus_key_handle(addr) + .set(s, pk.clone(), epoch, params.pipeline_len) + .unwrap(); + }; + // Advance to EPOCH 1 // // We cannot call `get_tendermint_set_updates` for the genesis state as @@ -1804,7 +1808,7 @@ fn test_validator_sets() { fn test_validator_sets_swap() { let mut s = TestWlStorage::default(); // Only 2 consensus validator slots - let params = PosParams { + let params = OwnedPosParams { max_validator_slots: 2, // Set the stake threshold to 0 so no validators are in the // below-threshold set @@ -1826,39 +1830,6 @@ fn test_validator_sets_swap() { res }; - // A helper to insert a non-genesis validator - let insert_validator = |s: &mut TestWlStorage, - addr, - pk: &PublicKey, - stake: token::Amount, - epoch: Epoch| { - insert_validator_into_validator_set( - s, - ¶ms, - addr, - stake, - epoch, - params.pipeline_len, - ) - .unwrap(); - - update_validator_deltas( - s, - ¶ms, - addr, - stake.change(), - epoch, - params.pipeline_len, - ) - .unwrap(); - - // Set their consensus key (needed for - // `validator_set_update_tendermint` fn) - validator_consensus_key_handle(addr) - .set(s, pk.clone(), epoch, params.pipeline_len) - .unwrap(); - }; - // Start with two genesis validators, one with 1 voting power and other 0 let epoch = Epoch::default(); // 1M voting power @@ -1874,9 +1845,9 @@ fn test_validator_sets_swap() { println!("val2: {val2}, {pk2}, {}", stake2.to_string_native()); println!("val3: {val3}, {pk3}, {}", stake3.to_string_native()); - init_genesis( + let params = test_init_genesis( &mut s, - ¶ms, + params, [ GenesisValidator { address: val1, @@ -1916,6 +1887,39 @@ fn test_validator_sets_swap() { ) .unwrap(); + // A helper to insert a non-genesis validator + let insert_validator = |s: &mut TestWlStorage, + addr, + pk: &PublicKey, + stake: token::Amount, + epoch: Epoch| { + insert_validator_into_validator_set( + s, + ¶ms, + addr, + stake, + epoch, + params.pipeline_len, + ) + .unwrap(); + + update_validator_deltas( + s, + ¶ms, + addr, + stake.change(), + epoch, + params.pipeline_len, + ) + .unwrap(); + + // Set their consensus key (needed for + // `validator_set_update_tendermint` fn) + validator_consensus_key_handle(addr) + .set(s, pk.clone(), epoch, params.pipeline_len) + .unwrap(); + }; + // Advance to EPOCH 1 let epoch = advance_epoch(&mut s, ¶ms); let pipeline_epoch = epoch + params.pipeline_len; @@ -2043,7 +2047,7 @@ fn get_tendermint_set_updates( } /// Advance to the next epoch. Returns the new epoch. -fn advance_epoch(s: &mut TestWlStorage, params: &PosParams) -> Epoch { +fn advance_epoch(s: &mut TestWlStorage, params: &OwnedPosParams) -> Epoch { s.storage.block.epoch = s.storage.block.epoch.next(); let current_epoch = s.storage.block.epoch; store_total_consensus_stake(s, current_epoch).unwrap(); @@ -2131,7 +2135,7 @@ fn arb_genesis_validators( } fn test_unjail_validator_aux( - params: PosParams, + params: OwnedPosParams, mut validators: Vec, ) { println!("\nTest inputs: {params:?}, genesis validators: {validators:#?}"); @@ -2153,9 +2157,9 @@ fn test_unjail_validator_aux( // Genesis let mut current_epoch = s.storage.block.epoch; - init_genesis( + let params = test_init_genesis( &mut s, - ¶ms, + params, validators.clone().into_iter(), current_epoch, ) diff --git a/proof_of_stake/src/tests/state_machine.rs b/proof_of_stake/src/tests/state_machine.rs index 6c9968c519..05b9b26345 100644 --- a/proof_of_stake/src/tests/state_machine.rs +++ b/proof_of_stake/src/tests/state_machine.rs @@ -151,9 +151,9 @@ impl StateMachineTest for ConcretePosState { .collect::>() ); let mut s = TestWlStorage::default(); - crate::init_genesis( + crate::test_init_genesis( &mut s, - &initial_state.params, + initial_state.params.owned.clone(), initial_state.genesis_validators.clone().into_iter(), initial_state.epoch, ) @@ -1197,6 +1197,7 @@ impl ReferenceStateMachine for AbstractPosState { println!("\nInitializing abstract state machine"); arb_params_and_genesis_validators(Some(8), 8..10) .prop_map(|(params, genesis_validators)| { + let params = params.with_default_gov_params(); let epoch = Epoch::default(); let mut state = Self { epoch, diff --git a/shared/src/ledger/native_vp/ibc/mod.rs b/shared/src/ledger/native_vp/ibc/mod.rs index 3b6521905b..818aa4df79 100644 --- a/shared/src/ledger/native_vp/ibc/mod.rs +++ b/shared/src/ledger/native_vp/ibc/mod.rs @@ -293,6 +293,7 @@ mod tests { use borsh::BorshSerialize; use namada_core::ledger::gas::TxGasMeter; + use namada_core::ledger::governance::parameters::GovernanceParameters; use prost::Message; use sha2::Digest; @@ -409,6 +410,8 @@ mod tests { // initialize the storage ibc::init_genesis_storage(&mut wl_storage); + let gov_params = GovernanceParameters::default(); + gov_params.init_storage(&mut wl_storage).unwrap(); pos::init_genesis_storage( &mut wl_storage, &PosParams::default(), diff --git a/shared/src/ledger/pos/mod.rs b/shared/src/ledger/pos/mod.rs index a61a721e4f..d47e5cc884 100644 --- a/shared/src/ledger/pos/mod.rs +++ b/shared/src/ledger/pos/mod.rs @@ -11,7 +11,7 @@ pub use namada_core::types::dec::Dec; pub use namada_core::types::key::common; pub use namada_core::types::token; pub use namada_proof_of_stake; -pub use namada_proof_of_stake::parameters::PosParams; +pub use namada_proof_of_stake::parameters::{OwnedPosParams, PosParams}; pub use namada_proof_of_stake::pos_queries::*; pub use namada_proof_of_stake::storage::*; pub use namada_proof_of_stake::{staking_token_address, types}; @@ -42,7 +42,7 @@ pub fn into_tm_voting_power( /// Initialize storage in the genesis block. pub fn init_genesis_storage( storage: &mut S, - params: &PosParams, + params: &OwnedPosParams, validators: impl Iterator + Clone, current_epoch: Epoch, ) where diff --git a/tests/src/native_vp/pos.rs b/tests/src/native_vp/pos.rs index 344a75d4e3..4140c08b7f 100644 --- a/tests/src/native_vp/pos.rs +++ b/tests/src/native_vp/pos.rs @@ -95,9 +95,9 @@ //! - add slashes //! - add rewards -use namada::ledger::pos::namada_proof_of_stake::init_genesis; -use namada::proof_of_stake::parameters::PosParams; +use namada::proof_of_stake::parameters::{OwnedPosParams, PosParams}; use namada::proof_of_stake::storage::GenesisValidator; +use namada::proof_of_stake::test_init_genesis as init_genesis; use namada::types::storage::Epoch; use crate::tx::tx_host_env; @@ -106,9 +106,9 @@ use crate::tx::tx_host_env; /// parameters. pub fn init_pos( genesis_validators: &[GenesisValidator], - params: &PosParams, + params: &OwnedPosParams, start_epoch: Epoch, -) { +) -> PosParams { tx_host_env::init(); tx_host_env::with(|tx_env| { @@ -130,9 +130,9 @@ pub fn init_pos( // .storage // .init_genesis(params, genesis_validators.iter(), start_epoch) // .unwrap(); - init_genesis( + let params = init_genesis( &mut tx_env.wl_storage, - params, + params.clone(), genesis_validators.iter().cloned(), start_epoch, ) @@ -140,7 +140,8 @@ pub fn init_pos( // Commit changes in WL to genesis state tx_env.commit_genesis(); - }); + params + }) } #[cfg(test)] @@ -334,6 +335,7 @@ mod tests { // We're starting from an empty state let state = vec![]; let epoch = Epoch(epoch); + let params = params.with_default_gov_params(); arb_valid_pos_action(&state).prop_map(move |valid_action| { Self { epoch, diff --git a/tests/src/vm_host_env/ibc.rs b/tests/src/vm_host_env/ibc.rs index b8d88961ea..9cd1caebdb 100644 --- a/tests/src/vm_host_env/ibc.rs +++ b/tests/src/vm_host_env/ibc.rs @@ -84,6 +84,7 @@ use namada::types::time::DurationSecs; use namada::types::token::{self, Amount, DenominatedAmount}; use namada::vm::{wasm, WasmCacheRwAccess}; use namada_core::ledger::gas::TxGasMeter; +use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_test_utils::TestWasms; use namada_tx_prelude::BorshSerialize; @@ -211,6 +212,8 @@ pub fn init_storage() -> (Address, Address) { tx_host_env::with(|env| { ibc::init_genesis_storage(&mut env.wl_storage); + let gov_params = GovernanceParameters::default(); + gov_params.init_storage(&mut env.wl_storage).unwrap(); pos::init_genesis_storage( &mut env.wl_storage, &PosParams::default(), diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 3453747161..509a50f822 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -17,7 +17,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { mod tests { use std::collections::BTreeSet; - use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; + use namada::ledger::pos::{GenesisValidator, OwnedPosParams, PosVP}; use namada::proof_of_stake::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, @@ -64,10 +64,10 @@ mod tests { initial_stake: token::Amount, bond: transaction::pos::Bond, key: key::common::SecretKey, - pos_params: PosParams, + pos_params: OwnedPosParams, ) -> TxResult { // Remove the validator stake threshold for simplicity - let pos_params = PosParams { + let pos_params = OwnedPosParams { validator_stake_threshold: token::Amount::default(), ..pos_params }; @@ -91,7 +91,8 @@ mod tests { max_commission_rate_change, }]; - init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + let pos_params = + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { if let Some(source) = &bond.source { diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index c1e1b35226..b8db50ecb0 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -19,7 +19,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { mod tests { use std::cmp; - use namada::ledger::pos::{PosParams, PosVP}; + use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::validator_commission_rate_handle; use namada::types::dec::{Dec, POS_DECIMAL_PRECISION}; use namada::types::storage::Epoch; @@ -63,7 +63,7 @@ mod tests { max_change: Dec, commission_change: transaction::pos::CommissionChange, key: key::common::SecretKey, - pos_params: PosParams, + pos_params: OwnedPosParams, ) -> TxResult { let consensus_key = key::testing::keypair_1().ref_to(); let eth_hot_key = key::common::PublicKey::Secp256k1( @@ -82,7 +82,8 @@ mod tests { eth_cold_key, }]; - init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + let pos_params = + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let tx_code = vec![]; let tx_data = commission_change.try_to_vec().unwrap(); diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 7e08c0dcda..5e1eaeb2a3 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -17,7 +17,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { mod tests { use std::collections::BTreeSet; - use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; + use namada::ledger::pos::{GenesisValidator, OwnedPosParams, PosVP}; use namada::proof_of_stake::types::WeightedValidator; use namada::proof_of_stake::{ bond_handle, read_consensus_validator_set_addresses_with_stake, @@ -62,10 +62,10 @@ mod tests { initial_stake: token::Amount, unbond: transaction::pos::Unbond, key: key::common::SecretKey, - pos_params: PosParams, + pos_params: OwnedPosParams, ) -> TxResult { // Remove the validator stake threshold for simplicity - let pos_params = PosParams { + let pos_params = OwnedPosParams { validator_stake_threshold: token::Amount::default(), ..pos_params }; @@ -96,7 +96,8 @@ mod tests { max_commission_rate_change, }]; - init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + let pos_params = + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { let native_token = tx_env.wl_storage.storage.native_token.clone(); diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index c8fa649c43..e288282c42 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -20,7 +20,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { - use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; + use namada::ledger::pos::{GenesisValidator, OwnedPosParams, PosVP}; use namada::proof_of_stake::unbond_handle; use namada::types::dec::Dec; use namada::types::storage::Epoch; @@ -67,10 +67,10 @@ mod tests { unbonded_amount: token::Amount, withdraw: transaction::pos::Withdraw, key: key::common::SecretKey, - pos_params: PosParams, + pos_params: OwnedPosParams, ) -> TxResult { // Remove the validator stake threshold for simplicity - let pos_params = PosParams { + let pos_params = OwnedPosParams { validator_stake_threshold: token::Amount::default(), ..pos_params }; @@ -100,7 +100,8 @@ mod tests { max_commission_rate_change, }]; - init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + let pos_params = + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { let native_token = tx_env.wl_storage.storage.native_token.clone(); diff --git a/wasm/wasm_source/src/vp_validator.rs b/wasm/wasm_source/src/vp_validator.rs index f929a8a0d1..77a8c76d66 100644 --- a/wasm/wasm_source/src/vp_validator.rs +++ b/wasm/wasm_source/src/vp_validator.rs @@ -191,7 +191,7 @@ fn validate_tx( #[cfg(test)] mod tests { use address::testing::arb_non_internal_address; - use namada::ledger::pos::{GenesisValidator, PosParams}; + use namada::ledger::pos::{GenesisValidator, OwnedPosParams}; use namada::proto::{Code, Data, Signature}; use namada::types::dec::Dec; use namada::types::storage::Epoch; @@ -421,7 +421,7 @@ mod tests { #[test] fn test_unsigned_pos_action_rejected() { // Init PoS genesis - let pos_params = PosParams::default(); + let pos_params = OwnedPosParams::default(); let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); @@ -504,7 +504,7 @@ mod tests { #[test] fn test_signed_pos_action_accepted() { // Init PoS genesis - let pos_params = PosParams::default(); + let pos_params = OwnedPosParams::default(); let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); From bfe4c97c0bebd489e343ec6755722dde9846b42b Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 27 Sep 2023 18:15:19 -0600 Subject: [PATCH 022/161] pos/types: configure number of past epochs kept for PoS data --- proof_of_stake/src/epoched.rs | 107 +++++++++++++++++++++++++++++++++- proof_of_stake/src/types.rs | 6 +- 2 files changed, 109 insertions(+), 4 deletions(-) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 5c119fee3c..25dcbcb7f3 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use std::fmt::Debug; use std::marker::PhantomData; -use std::ops; +use std::{cmp, ops}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use namada_core::ledger::storage_api; @@ -813,6 +813,106 @@ impl EpochOffset for OffsetMaxU64 { } } +/// Offset at max proposal period. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetMaxProposalPeriod; +impl EpochOffset for OffsetMaxProposalPeriod { + fn value(params: &PosParams) -> u64 { + params.max_proposal_period + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::MaxProposalPeriod + } +} + +/// Offset at the max proposal period, plus the default num past epochs. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetMaxProposalPeriodPlus; +impl EpochOffset for OffsetMaxProposalPeriodPlus { + fn value(params: &PosParams) -> u64 { + params.max_proposal_period + DEFAULT_NUM_PAST_EPOCHS + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::MaxProposalPeriodPlus + } +} + +/// Offset at the larger of the slash processing length and the max proposal +/// period. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetMaxProposalPeriodOrSlashProcessingLen; +impl EpochOffset for OffsetMaxProposalPeriodOrSlashProcessingLen { + fn value(params: &PosParams) -> u64 { + cmp::max( + params.slash_processing_epoch_offset(), + params.max_proposal_period, + ) + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::MaxProposalPeriodOrSlashProcessingLen + } +} + +/// Offset at the larger of the slash processing length and the max proposal +/// period, plus the default num past epochs. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetMaxProposalPeriodOrSlashProcessingLenPlus; +impl EpochOffset for OffsetMaxProposalPeriodOrSlashProcessingLenPlus { + fn value(params: &PosParams) -> u64 { + cmp::max( + params.slash_processing_epoch_offset(), + params.max_proposal_period, + ) + DEFAULT_NUM_PAST_EPOCHS + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::MaxProposalPeriodOrSlashProcessingLenPlus + } +} + /// Offset length dynamic choice. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub enum DynEpochOffset { @@ -833,8 +933,13 @@ pub enum DynEpochOffset { SlashProcessingLen, /// Offset at the max proposal period MaxProposalPeriod, + /// Offset at the max proposal period plus the default num past epochs + MaxProposalPeriodPlus, /// Offset at the larger of max proposal period or slash processing delay MaxProposalPeriodOrSlashProcessingLen, + /// Offset at the larger of max proposal period or slash processing delay, + /// plus the default num past epochs + MaxProposalPeriodOrSlashProcessingLenPlus, /// Offset of the max u64 value MaxU64, } diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 2ec8e4fd86..88881aae84 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -77,7 +77,7 @@ pub type BelowCapacityValidatorSet = pub type ConsensusValidatorSets = crate::epoched::NestedEpoched< ConsensusValidatorSet, crate::epoched::OffsetPipelineLen, - crate::epoched::OffsetDefaultNumPastEpochs, + crate::epoched::OffsetMaxProposalPeriodPlus, >; /// Epoched below-capacity validator sets. @@ -98,7 +98,7 @@ pub type TotalConsensusStakes = crate::epoched::Epoched< pub type ValidatorDeltas = crate::epoched::EpochedDelta< token::Change, crate::epoched::OffsetUnbondingLen, - crate::epoched::OffsetSlashProcessingLen, + crate::epoched::OffsetMaxProposalPeriodOrSlashProcessingLenPlus, >; /// Epoched total deltas. @@ -141,7 +141,7 @@ pub type ValidatorSlashes = NestedMap; pub type EpochedSlashes = crate::epoched::NestedEpoched< ValidatorSlashes, crate::epoched::OffsetUnbondingLen, - crate::epoched::OffsetSlashProcessingLen, + crate::epoched::OffsetSlashProcessingLen, /* TODO: should this be slash procesing + cubic window? */ >; /// Epoched validator's unbonds From eaed1895931d7fde66164d11d11bf1fbddaaa9dc Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 27 Sep 2023 18:21:10 -0600 Subject: [PATCH 023/161] new impl for purging old validator sets --- .../lib/node/ledger/shell/finalize_block.rs | 178 +++++++++++++++++- benches/lib.rs | 9 +- proof_of_stake/src/epoched.rs | 110 ++++++++++- proof_of_stake/src/lib.rs | 46 ++--- proof_of_stake/src/tests.rs | 12 +- 5 files changed, 312 insertions(+), 43 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 2f8f970210..739d803bc2 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -100,6 +100,7 @@ where namada_proof_of_stake::read_pos_params(&self.wl_storage)?; namada_proof_of_stake::copy_validator_sets_and_positions( &mut self.wl_storage, + &pos_params, current_epoch, current_epoch + pos_params.pipeline_len, )?; @@ -107,10 +108,6 @@ where &mut self.wl_storage, current_epoch, )?; - namada_proof_of_stake::purge_validator_sets_for_old_epoch( - &mut self.wl_storage, - current_epoch, - )?; } // Invariant: Has to be applied before `record_slashes_from_evidence` @@ -3823,6 +3820,179 @@ mod test_finalize_block { Ok(()) } + #[test] + fn test_purge_validator_information() -> storage_api::Result<()> { + // Setup the network with pipeline_len = 2, unbonding_len = 4 + let num_validators = 4_u64; + let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { + last_height: 0, + num_validators, + }); + let mut params = read_pos_params(&shell.wl_storage).unwrap(); + params.owned.unbonding_len = 4; + // params.owned.max_validator_slots = 3; + // params.owned.validator_stake_threshold = token::Amount::zero(); + write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; + + let max_proposal_period = params.max_proposal_period; + let default_past_epochs = 2; + let consensus_val_set_len = max_proposal_period + default_past_epochs; + + let consensus_val_set = + namada_proof_of_stake::consensus_validator_set_handle(); + // let below_cap_val_set = + // namada_proof_of_stake::below_capacity_validator_set_handle(); + let validator_positions = + namada_proof_of_stake::validator_set_positions_handle(); + let all_validator_addresses = + namada_proof_of_stake::validator_addresses_handle(); + + let consensus_set: Vec = + read_consensus_validator_set_addresses_with_stake( + &shell.wl_storage, + Epoch::default(), + ) + .unwrap() + .into_iter() + .collect(); + let val1 = consensus_set[0].clone(); + let pkh1 = get_pkh_from_address( + &shell.wl_storage, + ¶ms, + val1.address, + Epoch::default(), + ); + + // Finalize block 1 + next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); + + let votes = get_default_true_votes(&shell.wl_storage, Epoch::default()); + assert!(!votes.is_empty()); + + let check_is_data = |storage: &WlStorage<_, _>, + start: Epoch, + end: Epoch| { + for ep in Epoch::iter_bounds_inclusive(start, end) { + assert!(!consensus_val_set.at(&ep).is_empty(storage).unwrap()); + // assert!(!below_cap_val_set.at(&ep).is_empty(storage). + // unwrap()); + assert!( + !validator_positions.at(&ep).is_empty(storage).unwrap() + ); + assert!( + !all_validator_addresses.at(&ep).is_empty(storage).unwrap() + ); + } + }; + + // Check that there is validator data for epochs 0 - pipeline_len + check_is_data(&shell.wl_storage, Epoch(0), Epoch(params.pipeline_len)); + + // Advance to epoch `default_past_epochs` + let mut current_epoch = Epoch(0); + for _ in 0..default_past_epochs { + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + } + assert_eq!(shell.wl_storage.storage.block.epoch.0, default_past_epochs); + assert_eq!(current_epoch.0, default_past_epochs); + + check_is_data( + &shell.wl_storage, + Epoch(0), + Epoch(params.pipeline_len + default_past_epochs), + ); + + // Advance one more epoch, which should purge the data for epoch 0 in + // everything except the consensus validator set + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + assert_eq!(current_epoch.0, default_past_epochs + 1); + + check_is_data( + &shell.wl_storage, + Epoch(1), + Epoch(params.pipeline_len + default_past_epochs + 1), + ); + assert!( + !consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + assert!( + validator_positions + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + assert!( + all_validator_addresses + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + + // Advance to the epoch `consensus_val_set_len` + 1 + loop { + assert!( + !consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + if current_epoch.0 == consensus_val_set_len + 1 { + break; + } + } + + assert!( + consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + + // Advance one more epoch + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + for ep in Epoch::default().iter_range(2) { + assert!( + consensus_val_set + .at(&ep) + .is_empty(&shell.wl_storage) + .unwrap() + ); + } + for ep in Epoch::iter_bounds_inclusive( + Epoch(2), + current_epoch + params.pipeline_len, + ) { + assert!( + !consensus_val_set + .at(&ep) + .is_empty(&shell.wl_storage) + .unwrap() + ); + } + + Ok(()) + } + fn get_default_true_votes(storage: &S, epoch: Epoch) -> Vec where S: StorageRead, diff --git a/benches/lib.rs b/benches/lib.rs index 47645abdf4..d247d3872d 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -267,10 +267,8 @@ impl BenchShell { } pub fn advance_epoch(&mut self) { - let pipeline_len = - proof_of_stake::read_pos_params(&self.inner.wl_storage) - .unwrap() - .pipeline_len; + let params = + proof_of_stake::read_pos_params(&self.inner.wl_storage).unwrap(); self.wl_storage.storage.block.epoch = self.wl_storage.storage.block.epoch.next(); @@ -278,8 +276,9 @@ impl BenchShell { proof_of_stake::copy_validator_sets_and_positions( &mut self.wl_storage, + ¶ms, current_epoch, - current_epoch + pipeline_len, + current_epoch + params.pipeline_len, ) .unwrap(); } diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 25dcbcb7f3..553eda01ef 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -26,7 +26,7 @@ pub const LAST_UPDATE_SUB_KEY: &str = "last_update"; pub const OLDEST_EPOCH_SUB_KEY: &str = "oldest_epoch"; /// Default number of past epochs to keep. -const DEFAULT_NUM_PAST_EPOCHS: u64 = 2; +pub const DEFAULT_NUM_PAST_EPOCHS: u64 = 2; /// Discrete epoched data handle pub struct Epoched { @@ -168,7 +168,7 @@ where /// kept is dropped. If the oldest stored epoch is not already /// associated with some value, the latest value from the dropped /// values, if any, is associated with it. - fn update_data( + pub fn update_data( &self, storage: &mut S, params: &PosParams, @@ -335,7 +335,8 @@ where S: StorageWrite + StorageRead, { let key = self.get_last_update_storage_key(); - storage.write(&key, epoch) + storage.write(&key, epoch)?; + self.set_oldest_epoch(storage, epoch) } fn get_last_update_storage_key(&self) -> storage::Key { @@ -368,6 +369,109 @@ where let key = self.get_last_update_storage_key(); storage.write(&key, current_epoch) } + + fn get_oldest_epoch_storage_key(&self) -> storage::Key { + self.storage_prefix + .push(&OLDEST_EPOCH_SUB_KEY.to_owned()) + .unwrap() + } + + fn get_oldest_epoch( + &self, + storage: &S, + ) -> storage_api::Result> + where + S: StorageRead, + { + let key = self.get_oldest_epoch_storage_key(); + storage.read(&key) + } + + fn set_oldest_epoch( + &self, + storage: &mut S, + new_oldest_epoch: Epoch, + ) -> storage_api::Result<()> + where + S: StorageRead + StorageWrite, + { + let key = self.get_oldest_epoch_storage_key(); + storage.write(&key, new_oldest_epoch) + } + + fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { + Epoch( + epoch + .0 + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default(), + ) + } + + /// Update data by removing old epochs + /// TODO: should we consider more complex handling of empty epochs in the + /// data below? + pub fn update_data( + &self, + storage: &mut S, + params: &PosParams, + current_epoch: Epoch, + ) -> storage_api::Result<()> + where + S: StorageRead + StorageWrite, + { + let last_update = self.get_last_update(storage)?; + let oldest_epoch = self.get_oldest_epoch(storage)?; + println!( + "\nLast update = {:?}\nOldest epoch = {:?}\n", + last_update, oldest_epoch + ); + if let (Some(last_update), Some(oldest_epoch)) = + (last_update, oldest_epoch) + { + let oldest_to_keep = current_epoch + .0 + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default(); + if oldest_epoch.0 < oldest_to_keep { + let diff = oldest_to_keep - oldest_epoch.0; + // Go through the epochs before the expected oldest epoch and + // keep the latest one + tracing::debug!( + "Trimming nested epoched data in epoch {current_epoch}, \ + last updated at {last_update}." + ); + let data_handler = self.get_data_handler(); + // Remove data before the new oldest epoch, keep the latest + // value + dbg!(&diff); + for epoch in oldest_epoch.iter_range(diff) { + let was_data = data_handler.remove_all(storage, &epoch)?; + if was_data { + tracing::debug!( + "Removed inner map data at epoch {epoch}" + ); + } else { + tracing::debug!("WARNING: was no data in {epoch}"); + } + } + let new_oldest_epoch = + Self::sub_past_epochs(params, current_epoch); + + // if !data_handler.contains(storage, &new_oldest_epoch)? { + // panic!("WARNING: no data existing in + // {new_oldest_epoch}"); } + self.set_oldest_epoch(storage, new_oldest_epoch)?; + + // Update the epoch of the last update to the current epoch + let key = self.get_last_update_storage_key(); + storage.write(&key, current_epoch)?; + return Ok(()); + } + } + + Ok(()) + } } impl diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 3645a84d05..d8af48a1ff 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -85,10 +85,6 @@ pub fn staking_token_address(storage: &impl StorageRead) -> Address { .expect("Must be able to read native token address") } -/// Number of epochs below the current epoch for which full validator sets are -/// stored -const STORE_VALIDATOR_SETS_LEN: u64 = 2; - #[allow(missing_docs)] #[derive(Error, Debug)] pub enum GenesisError { @@ -505,7 +501,12 @@ where credit_tokens(storage, &staking_token, &ADDRESS, total_bonded)?; // Copy the genesis validator set into the pipeline epoch as well for epoch in (current_epoch.next()).iter_range(params.pipeline_len) { - copy_validator_sets_and_positions(storage, current_epoch, epoch)?; + copy_validator_sets_and_positions( + storage, + ¶ms, + current_epoch, + epoch, + )?; } tracing::debug!("Genesis initialized"); @@ -1554,6 +1555,7 @@ where /// Validator sets and positions copying into a future epoch pub fn copy_validator_sets_and_positions( storage: &mut S, + params: &PosParams, current_epoch: Epoch, target_epoch: Epoch, ) -> storage_api::Result<()> @@ -1623,6 +1625,9 @@ where .at(&val_stake) .insert(storage, val_position, val_address)?; } + // Purge consensus and below-capacity validator sets + consensus_validator_set.update_data(storage, params, current_epoch)?; + below_capacity_validator_set.update_data(storage, params, current_epoch)?; // Copy validator positions let mut positions = HashMap::::default(); @@ -1641,6 +1646,13 @@ where } validator_set_positions_handle.set_last_update(storage, current_epoch)?; + // Purge old epochs of validator positions + validator_set_positions_handle.update_data( + storage, + params, + current_epoch, + )?; + // Copy set of all validator addresses let mut all_validators = HashSet::
::default(); let validator_addresses_handle = validator_addresses_handle(); @@ -1656,6 +1668,9 @@ where debug_assert!(!was_in); } + // Purge old epochs of all validator addresses + validator_addresses_handle.update_data(storage, params, current_epoch)?; + Ok(()) } @@ -1700,27 +1715,6 @@ where total_consensus_stake_key_handle().set(storage, total, epoch, 0) } -/// Purge the validator sets from the epochs older than the current epoch minus -/// `STORE_VALIDATOR_SETS_LEN` -pub fn purge_validator_sets_for_old_epoch( - storage: &mut S, - epoch: Epoch, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - if Epoch(STORE_VALIDATOR_SETS_LEN) < epoch { - let old_epoch = epoch - STORE_VALIDATOR_SETS_LEN - 1; - consensus_validator_set_handle() - .get_data_handler() - .remove_all(storage, &old_epoch)?; - below_capacity_validator_set_handle() - .get_data_handler() - .remove_all(storage, &old_epoch)?; - } - Ok(()) -} - /// Read the position of the validator in the subset of validators that have the /// same bonded stake. This information is held in its own epoched structure in /// addition to being inside the validator sets. diff --git a/proof_of_stake/src/tests.rs b/proof_of_stake/src/tests.rs index 4465bdc687..2319319d54 100644 --- a/proof_of_stake/src/tests.rs +++ b/proof_of_stake/src/tests.rs @@ -29,6 +29,7 @@ use proptest::test_runner::Config; // `tracing` logs from tests use test_log::test; +use crate::epoched::DEFAULT_NUM_PAST_EPOCHS; use crate::parameters::testing::arb_pos_params; use crate::parameters::{OwnedPosParams, PosParams}; use crate::types::{ @@ -42,7 +43,7 @@ use crate::{ bond_tokens, bonds_and_unbonds, consensus_validator_set_handle, copy_validator_sets_and_positions, find_validator_by_raw_hash, get_num_consensus_validators, insert_validator_into_validator_set, - is_validator, process_slashes, purge_validator_sets_for_old_epoch, + is_validator, process_slashes, read_below_capacity_validator_set_addresses_with_stake, read_below_threshold_validator_set_addresses, read_consensus_validator_set_addresses_with_stake, read_total_stake, @@ -53,7 +54,7 @@ use crate::{ validator_consensus_key_handle, validator_set_positions_handle, validator_set_update_tendermint, validator_slashes_handle, validator_state_handle, withdraw_tokens, write_validator_address_raw_hash, - BecomeValidator, STORE_VALIDATOR_SETS_LEN, + BecomeValidator, }; proptest! { @@ -1780,7 +1781,7 @@ fn test_validator_sets() { for e in Epoch::iter_bounds_inclusive( start_epoch, last_epoch - .sub_or_default(Epoch(STORE_VALIDATOR_SETS_LEN)) + .sub_or_default(Epoch(DEFAULT_NUM_PAST_EPOCHS)) .sub_or_default(Epoch(1)), ) { assert!( @@ -2047,17 +2048,18 @@ fn get_tendermint_set_updates( } /// Advance to the next epoch. Returns the new epoch. -fn advance_epoch(s: &mut TestWlStorage, params: &OwnedPosParams) -> Epoch { +fn advance_epoch(s: &mut TestWlStorage, params: &PosParams) -> Epoch { s.storage.block.epoch = s.storage.block.epoch.next(); let current_epoch = s.storage.block.epoch; store_total_consensus_stake(s, current_epoch).unwrap(); copy_validator_sets_and_positions( s, + params, current_epoch, current_epoch + params.pipeline_len, ) .unwrap(); - purge_validator_sets_for_old_epoch(s, current_epoch).unwrap(); + // purge_validator_sets_for_old_epoch(s, current_epoch).unwrap(); // process_slashes(s, current_epoch).unwrap(); // dbg!(current_epoch); current_epoch From 85bf4aa5f1271b18ba00b06cd740d364399228cf Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 27 Sep 2023 18:21:59 -0600 Subject: [PATCH 024/161] docstring cleanup --- proof_of_stake/src/epoched.rs | 2 +- proof_of_stake/src/types.rs | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 553eda01ef..d608f6a98c 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -325,7 +325,7 @@ where NestedMap::open(key) } - /// Initialize new nested data at the given epoch offset. + /// Initialize new nested data at the given epoch. pub fn init( &self, storage: &mut S, diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 88881aae84..8807b8072e 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -24,7 +24,8 @@ pub use rev_order::ReverseOrdTokenAmount; use crate::parameters::PosParams; -// TODO: add this to the spec +// TODO: review the offsets for each epoched type!! + /// Stored positions of validators in validator sets pub type ValidatorSetPositions = crate::epoched::NestedEpoched< LazyMap, @@ -32,8 +33,6 @@ pub type ValidatorSetPositions = crate::epoched::NestedEpoched< crate::epoched::OffsetDefaultNumPastEpochs, >; -// TODO: check the offsets for each epoched type!! - /// Epoched validator's consensus key. pub type ValidatorConsensusKeys = crate::epoched::Epoched< common::PublicKey, @@ -87,7 +86,7 @@ pub type BelowCapacityValidatorSets = crate::epoched::NestedEpoched< crate::epoched::OffsetDefaultNumPastEpochs, >; -/// Epoched total consensus validator stake +/// Epoched total consensus validator set stake pub type TotalConsensusStakes = crate::epoched::Epoched< Amount, crate::epoched::OffsetZero, @@ -123,7 +122,7 @@ pub type Bonds = crate::epoched::EpochedDelta< >; /// An epoched lazy set of all known active validator addresses (consensus, -/// below-capacity, jailed) +/// below-capacity, below-threshold, jailed) pub type ValidatorAddresses = crate::epoched::NestedEpoched< LazySet
, crate::epoched::OffsetPipelineLen, From 6364cc09af7fed0ba563e7bb263120a4f7098e72 Mon Sep 17 00:00:00 2001 From: Tomas Zemanovic Date: Thu, 28 Sep 2023 14:16:13 +0200 Subject: [PATCH 025/161] Apply suggestions from code review Co-authored-by: Tiago Carvalho --- core/src/ledger/storage_api/governance.rs | 2 +- proof_of_stake/src/epoched.rs | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/core/src/ledger/storage_api/governance.rs b/core/src/ledger/storage_api/governance.rs index e5b27a5d02..ab4ad27b0b 100644 --- a/core/src/ledger/storage_api/governance.rs +++ b/core/src/ledger/storage_api/governance.rs @@ -264,6 +264,6 @@ where { let key = governance_keys::get_max_proposal_period_key(); let max_proposal_period: u64 = - storage.read(&key)?.expect("Parameter should be definied."); + storage.read(&key)?.expect("Parameter should be defined."); Ok(max_proposal_period) } diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index d608f6a98c..8b8f4f6fdd 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -409,8 +409,8 @@ where } /// Update data by removing old epochs - /// TODO: should we consider more complex handling of empty epochs in the - /// data below? + // TODO: should we consider more complex handling of empty epochs in the + // data below? pub fn update_data( &self, storage: &mut S, @@ -422,10 +422,6 @@ where { let last_update = self.get_last_update(storage)?; let oldest_epoch = self.get_oldest_epoch(storage)?; - println!( - "\nLast update = {:?}\nOldest epoch = {:?}\n", - last_update, oldest_epoch - ); if let (Some(last_update), Some(oldest_epoch)) = (last_update, oldest_epoch) { From 1ddd82b87da365e9b1b4a6a19282e1e70b2541ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 13:24:44 +0100 Subject: [PATCH 026/161] fixup! Apply suggestions from code review --- proof_of_stake/src/epoched.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 8b8f4f6fdd..5622ee6aa4 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -440,7 +440,6 @@ where let data_handler = self.get_data_handler(); // Remove data before the new oldest epoch, keep the latest // value - dbg!(&diff); for epoch in oldest_epoch.iter_range(diff) { let was_data = data_handler.remove_all(storage, &epoch)?; if was_data { From f1bc264dbaf75f6b1ee7def83f97c7579772cc49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 13:31:14 +0100 Subject: [PATCH 027/161] fix PoS crate standalone build --- proof_of_stake/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index d8af48a1ff..96160bd486 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -4057,7 +4057,7 @@ where /// Init PoS genesis wrapper helper that also initializes gov params that are /// used in PoS with default values. -#[cfg(feature = "testing")] +#[cfg(any(test, feature = "testing"))] pub fn test_init_genesis( storage: &mut S, owned: OwnedPosParams, From f40bd267207e3c0166a6e3549fe7916f6cff6955 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 13:47:26 +0100 Subject: [PATCH 028/161] replace direct storage read of PosParams with an RPC fn --- apps/src/lib/client/tx.rs | 8 +------- tests/src/e2e/ibc_tests.rs | 8 +++----- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index c8c42b190b..b7c2bd8fc6 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -12,8 +12,6 @@ use namada::core::ledger::governance::cli::offline::{ use namada::core::ledger::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, }; -use namada::ledger::pos; -use namada::proof_of_stake::parameters::PosParams; use namada::proto::Tx; use namada::sdk::rpc::{TxBroadcastData, TxResponse}; use namada::sdk::wallet::{Wallet, WalletUtils}; @@ -550,11 +548,7 @@ where ) .unwrap(); - let key = pos::params_key(); - let pos_params = - rpc::query_storage_value::(client, &key) - .await - .expect("Pos parameter should be defined."); + let pos_params = rpc::query_pos_parameters(client).await; display_line!(IO, ""); display_line!( diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index c294367f1c..6097482245 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -64,7 +64,7 @@ use namada::types::key::PublicKey; use namada::types::storage::{BlockHeight, Key}; use namada::types::token::Amount; use namada_apps::client::rpc::{ - query_storage_value, query_storage_value_bytes, + query_pos_parameters, query_storage_value, query_storage_value_bytes, }; use namada_apps::client::utils::id_from_pk; use namada_apps::config::ethereum_bridge; @@ -250,10 +250,8 @@ fn make_client_state(test: &Test, height: Height) -> TmClientState { let client = HttpClient::new(ledger_address).unwrap(); let key = pos::params_key(); - let pos_params = test - .async_runtime() - .block_on(query_storage_value::(&client, &key)) - .unwrap(); + let pos_params = + test.async_runtime().block_on(query_pos_parameters(&client)); let pipeline_len = pos_params.pipeline_len; let key = param_storage::get_epoch_duration_storage_key(); From 41d3838079d9d8e7c1ef36dd4a16c94575d3a8e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 13:56:29 +0100 Subject: [PATCH 029/161] fixup! replace direct storage read of PosParams with an RPC fn --- tests/src/e2e/ibc_tests.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index 6097482245..f3ab55d83e 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -53,7 +53,6 @@ use namada::ibc_proto::google::protobuf::Any; use namada::ledger::events::EventType; use namada::ledger::ibc::storage::*; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; -use namada::ledger::pos::{self, PosParams}; use namada::ledger::queries::RPC; use namada::ledger::storage::ics23_specs::ibc_proof_specs; use namada::ledger::storage::traits::Sha256Hasher; @@ -249,7 +248,6 @@ fn make_client_state(test: &Test, height: Height) -> TmClientState { let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); let client = HttpClient::new(ledger_address).unwrap(); - let key = pos::params_key(); let pos_params = test.async_runtime().block_on(query_pos_parameters(&client)); let pipeline_len = pos_params.pipeline_len; From 620e4e26141a3e5b67da4ed53a8986f08a77034b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 13:54:40 +0100 Subject: [PATCH 030/161] core/types/storage:m ore flexible fn checked_sub param --- core/src/types/storage.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/core/src/types/storage.rs b/core/src/types/storage.rs index ad0c14f499..9437a1c8bd 100644 --- a/core/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -1035,11 +1035,12 @@ impl Epoch { /// overflow occurred. #[must_use = "this returns the result of the operation, without modifying \ the original"] - pub fn checked_sub(self, rhs: Epoch) -> Option { - if rhs.0 > self.0 { + pub fn checked_sub(self, rhs: impl Into) -> Option { + let Epoch(rhs) = rhs.into(); + if rhs > self.0 { None } else { - Some(Self(self.0 - rhs.0)) + Some(Self(self.0 - rhs)) } } From 856130ef8d603f76d8fc76906ebdc4c47eebc687 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 28 Sep 2023 13:56:35 +0100 Subject: [PATCH 031/161] PoS: refactor usages of `Epoch::checked_sub` --- proof_of_stake/src/epoched.rs | 42 +++++++++++++---------------------- proof_of_stake/src/lib.rs | 12 +++++----- 2 files changed, 21 insertions(+), 33 deletions(-) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 5622ee6aa4..167d4eb862 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -183,11 +183,10 @@ where (last_update, oldest_epoch) { let oldest_to_keep = current_epoch - .0 .checked_sub(PastEpochs::value(params)) .unwrap_or_default(); - if oldest_epoch.0 < oldest_to_keep { - let diff = oldest_to_keep - oldest_epoch.0; + if oldest_epoch < oldest_to_keep { + let diff = u64::from(oldest_to_keep - oldest_epoch); // Go through the epochs before the expected oldest epoch and // keep the latest one tracing::debug!( @@ -266,12 +265,9 @@ where } fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { - Epoch( - epoch - .0 - .checked_sub(PastEpochs::value(params)) - .unwrap_or_default(), - ) + epoch + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default() } fn get_oldest_epoch_storage_key(&self) -> storage::Key { @@ -400,12 +396,9 @@ where } fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { - Epoch( - epoch - .0 - .checked_sub(PastEpochs::value(params)) - .unwrap_or_default(), - ) + epoch + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default() } /// Update data by removing old epochs @@ -426,11 +419,10 @@ where (last_update, oldest_epoch) { let oldest_to_keep = current_epoch - .0 .checked_sub(PastEpochs::value(params)) .unwrap_or_default(); - if oldest_epoch.0 < oldest_to_keep { - let diff = oldest_to_keep - oldest_epoch.0; + if oldest_epoch < oldest_to_keep { + let diff = u64::from(oldest_to_keep - oldest_epoch); // Go through the epochs before the expected oldest epoch and // keep the latest one tracing::debug!( @@ -610,11 +602,10 @@ where (last_update, oldest_epoch) { let oldest_to_keep = current_epoch - .0 .checked_sub(PastEpochs::value(params)) .unwrap_or_default(); - if oldest_epoch.0 < oldest_to_keep { - let diff = oldest_to_keep - oldest_epoch.0; + if oldest_epoch < oldest_to_keep { + let diff = u64::from(oldest_to_keep - oldest_epoch); // Go through the epochs before the expected oldest epoch and // sum them into it tracing::debug!( @@ -713,12 +704,9 @@ where } fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { - Epoch( - epoch - .0 - .checked_sub(PastEpochs::value(params)) - .unwrap_or_default(), - ) + epoch + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default() } fn get_oldest_epoch_storage_key(&self) -> storage::Key { diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 96160bd486..6f39fc6527 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -3215,10 +3215,10 @@ fn make_unbond_details( if slash.epoch >= start && slash.epoch < withdraw - .checked_sub(Epoch( + .checked_sub( params.unbonding_len + params.cubic_slashing_window_length, - )) + ) .unwrap_or_default() { let cur_rate = slash_rates_by_epoch.entry(slash.epoch).or_default(); @@ -3751,10 +3751,10 @@ where start, Some( infraction_epoch - .checked_sub(Epoch( + .checked_sub( params.unbonding_len + params.cubic_slashing_window_length, - )) + ) .unwrap_or_default(), ), &validator, @@ -3813,10 +3813,10 @@ where start, Some( infraction_epoch - .checked_sub(Epoch( + .checked_sub( params.unbonding_len + params.cubic_slashing_window_length, - )) + ) .unwrap_or_default(), ), &validator, From 20180c4805dd670da9937c01c9bacf933d75a367 Mon Sep 17 00:00:00 2001 From: brentstone Date: Thu, 28 Sep 2023 09:27:08 -0600 Subject: [PATCH 032/161] changelog: add #1944 --- .../unreleased/improvements/1944-tune-storage-past-epochs.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1944-tune-storage-past-epochs.md diff --git a/.changelog/unreleased/improvements/1944-tune-storage-past-epochs.md b/.changelog/unreleased/improvements/1944-tune-storage-past-epochs.md new file mode 100644 index 0000000000..9ddda02759 --- /dev/null +++ b/.changelog/unreleased/improvements/1944-tune-storage-past-epochs.md @@ -0,0 +1,2 @@ +- New implementation and parameters for purging old epochs for Epoched validator + data in storage. ([\#1944](https://github.com/anoma/namada/pull/1944)) \ No newline at end of file From 3fa8d388cf4f21bce4c5aaa796ac40024170e973 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 28 Sep 2023 16:07:44 +0000 Subject: [PATCH 033/161] [ci] wasm checksums update --- wasm/checksums.json | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/wasm/checksums.json b/wasm/checksums.json index 8ebfc20f2b..b628a75ae4 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,22 +1,22 @@ { - "tx_bond.wasm": "tx_bond.126ab38555f9dfbfb689f9774a7a028f7ffc226bd43fef118a3f32261c164fea.wasm", - "tx_bridge_pool.wasm": "tx_bridge_pool.6f6ad3b95e21072af9e854e374fa0d7f691f0743da8cf52a643ed1bdb0e16611.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.a75d583d6ccbd956ef9c9e85010bba75b4bf63a15a67b55dfb159be35cb5c142.wasm", + "tx_bond.wasm": "tx_bond.e68381be69fdbd323507f88b8a1e67537e1c883edabbcb17a77f827748f99d11.wasm", + "tx_bridge_pool.wasm": "tx_bridge_pool.1b7d7a458a365b627f297a8598ab0405c5092f94d3aa6b43d157e91494a8db6d.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.5d34345991a3c3461a55a7a7c801b56944b14a3f0055d2913852c6ab65e4d604.wasm", "tx_ibc.wasm": "tx_ibc.54313469bcc9bcaabf661177f88cb90ac9008f542edbf686f286a02f8cdbfd41.wasm", - "tx_init_account.wasm": "tx_init_account.10ee01dac5325685360119ba8e4b597d776a018ea4c9ac3534dd876ec377789e.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.04cad5a3a71f833a5867bca3ced54b06d34ad07f3f21877599d38581d362ba10.wasm", - "tx_init_validator.wasm": "tx_init_validator.964c9449ffe0fc41649decd283c905c7cd3665127274444fafc6f1347364a61e.wasm", - "tx_resign_steward.wasm": "tx_resign_steward.b5d92c1bd196be0d196ef16e2ceed9a9ced7ac61d7b177fdbad208c0e784e172.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.32011ddc5316705ae005059d5916b071288a04fb4dee80854af16d61548b5c27.wasm", - "tx_transfer.wasm": "tx_transfer.963ec4c2705377423ddc46b4ff3de63f9b625351467d89290fa771a485710c41.wasm", - "tx_unbond.wasm": "tx_unbond.63baa912938704817d2b23b4f889ffa0f40ea38ca1ce1990ed08999a717998d2.wasm", - "tx_unjail_validator.wasm": "tx_unjail_validator.03bcdf9d8f4ff06b87e9eb6207b709b8b99fac4035737dbfc70881fc5810e0c0.wasm", - "tx_update_account.wasm": "tx_update_account.7b4e225a823449d3d8bffde197c439ad24f4f6c95cf754acf62b6373958c4486.wasm", - "tx_update_steward_commission.wasm": "tx_update_steward_commission.0001b21ef3ef4f9b33afb5a5ef75a6a5427fbe221a8350cfbd81781ac18ded6e.wasm", + "tx_init_account.wasm": "tx_init_account.ee302c3711231a402164cb49f8828242f5be7863315fae35e2575f6009dd94d2.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.26d8b4ba0089faa2fbce56e5d088292b801dbbc0d4dcb7e203b3fec7a19b28f6.wasm", + "tx_init_validator.wasm": "tx_init_validator.ad7a059eacc883ebafb612d628a1e33229d2a3e5cda21d48da4263ae4ee437b0.wasm", + "tx_resign_steward.wasm": "tx_resign_steward.f6a526f45dab93076be0d7bdeb90b58183c13925907974093eaaecee2127b5e4.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.b5f6b17103aceb439eaf1a2d79b9bffc47e4175504363ade652e39ea850012be.wasm", + "tx_transfer.wasm": "tx_transfer.9995453c1f8144a7457e7af4c21bc5f358ead953dd6d0cf5fa7ea75365303ce2.wasm", + "tx_unbond.wasm": "tx_unbond.2330b177c14d95a6a92a6a6d0702c447a16d6789d8ce8c4b4889a0818b1e477a.wasm", + "tx_unjail_validator.wasm": "tx_unjail_validator.1e27f103e0ea615cea9f86710bc2c56304516b42d3337e516b97d7e03982aaa1.wasm", + "tx_update_account.wasm": "tx_update_account.602610e10d70faa7c9e65f98de161669eb7ce5fc366b96cc6fb4b17abef226c7.wasm", + "tx_update_steward_commission.wasm": "tx_update_steward_commission.c049d39194fda97c7cd579dfc818ad282a6cb41e65bd4f376a2e61440cebf580.wasm", "tx_vote_proposal.wasm": "tx_vote_proposal.727e36112fcd0753f758370dff981cc93430fe7d6f95ceb570a02a37529a7531.wasm", - "tx_withdraw.wasm": "tx_withdraw.311993b9362f1a66acf002f15fbd9599115e41d9c5b4e1b0fa565335fae147cd.wasm", + "tx_withdraw.wasm": "tx_withdraw.be5b04c3e76850c697a14b4d92c6eeb7f3e20f16b865fecf71572df053589509.wasm", "vp_implicit.wasm": "vp_implicit.e0958c2ec06863f7bd48cd9abb67cc7557f956ce9fa6c714deba885db721fa50.wasm", "vp_masp.wasm": "vp_masp.037671b60b3e9f312c1c5fdc53d040ebfad21a646b9b1e2dac6b3e20fc0d01ec.wasm", "vp_user.wasm": "vp_user.0203fddde57bc31ef411370b628963486928a7c4d34614980d1a52616e0f617b.wasm", "vp_validator.wasm": "vp_validator.39c685bc1407ef484f963aff9f7576273d56bbf283dcbded9f01944cf7ff9bf0.wasm" -} \ No newline at end of file +} From c65a6d0a51af791797c2e4833853af6f3462f899 Mon Sep 17 00:00:00 2001 From: brentstone Date: Thu, 28 Sep 2023 10:31:46 -0600 Subject: [PATCH 034/161] fixed `epoched` tests and `test_validator_sets` --- proof_of_stake/src/epoched.rs | 2 ++ proof_of_stake/src/tests.rs | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 167d4eb862..8b86f4dbd4 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -1338,6 +1338,8 @@ mod test { fn init_storage() -> storage_api::Result { let mut s = TestWlStorage::default(); + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(&mut s)?; crate::init_genesis( &mut s, &PosParams::default(), diff --git a/proof_of_stake/src/tests.rs b/proof_of_stake/src/tests.rs index 2319319d54..045fc383da 100644 --- a/proof_of_stake/src/tests.rs +++ b/proof_of_stake/src/tests.rs @@ -1776,7 +1776,8 @@ fn test_validator_sets() { ); assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk4)); - // Check that the validator sets were purged for the old epochs + // Check that the below-capacity validator set was purged for the old epochs + // but that the consensus_validator_set was not let last_epoch = epoch; for e in Epoch::iter_bounds_inclusive( start_epoch, @@ -1785,7 +1786,7 @@ fn test_validator_sets() { .sub_or_default(Epoch(1)), ) { assert!( - consensus_validator_set_handle() + !consensus_validator_set_handle() .at(&e) .is_empty(&s) .unwrap() From 99714977f8ee6898ba9321ea97c847957235fb06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 29 Sep 2023 10:09:02 +0100 Subject: [PATCH 035/161] fixup! pos/types: configure number of past epochs kept for PoS data --- proof_of_stake/src/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 8807b8072e..d3ac47ac29 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -140,7 +140,7 @@ pub type ValidatorSlashes = NestedMap; pub type EpochedSlashes = crate::epoched::NestedEpoched< ValidatorSlashes, crate::epoched::OffsetUnbondingLen, - crate::epoched::OffsetSlashProcessingLen, /* TODO: should this be slash procesing + cubic window? */ + crate::epoched::OffsetSlashProcessingLen, >; /// Epoched validator's unbonds From fe6c3eb53bf972b204110d3cd34db2719f96bd25 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Tue, 19 Sep 2023 08:36:26 +0200 Subject: [PATCH 036/161] Combined construction of signing data with transaction construction. --- apps/src/lib/client/tx.rs | 314 ++++++++------------------------------ shared/src/sdk/tx.rs | 278 ++++++++++++++++++++++++--------- 2 files changed, 270 insertions(+), 322 deletions(-) diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index c8c42b190b..e00583e72a 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -146,25 +146,13 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let default_signer = Some(args.owner.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.owner.clone()), - default_signer, - ) - .await?; + submit_reveal_aux::<_, IO>(client, ctx, args.tx.clone(), &args.owner).await?; - submit_reveal_aux::<_, IO>(client, ctx, args.tx.clone(), &args.owner) - .await?; - - let (mut tx, _epoch) = tx::build_custom::<_, _, _, IO>( + let (mut tx, signing_data, _epoch) = tx::build_custom::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, args.clone(), - &signing_data.fee_payer, ) .await?; @@ -191,22 +179,11 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let default_signer = Some(args.addr.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.addr.clone()), - default_signer, - ) - .await?; - - let (mut tx, _epoch) = tx::build_update_account::<_, _, _, IO>( + let (mut tx, signing_data, _epoch) = tx::build_update_account::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, args.clone(), - signing_data.fee_payer.clone(), ) .await?; @@ -233,21 +210,11 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - None, - None, - ) - .await?; - - let (mut tx, _epoch) = tx::build_init_account::<_, _, _, IO>( + let (mut tx, signing_data, _epoch) = tx::build_init_account::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, args.clone(), - &signing_data.fee_payer, ) .await?; @@ -714,16 +681,6 @@ pub async fn submit_transfer< args: args::TxTransfer, ) -> Result<(), error::Error> { for _ in 0..2 { - let default_signer = Some(args.source.effective_address()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.source.effective_address()), - default_signer, - ) - .await?; - submit_reveal_aux::<_, IO>( client, &mut ctx, @@ -732,13 +689,11 @@ pub async fn submit_transfer< ) .await?; - let arg = args.clone(); - let (mut tx, tx_epoch) = tx::build_transfer::<_, _, _, IO>( + let (mut tx, signing_data, tx_epoch) = tx::build_transfer::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, - arg, - signing_data.fee_payer.clone(), + args.clone(), ) .await?; signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) @@ -795,25 +750,13 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let default_signer = Some(args.source.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.source.clone()), - default_signer, - ) - .await?; - - submit_reveal_aux::<_, IO>(client, &mut ctx, args.tx.clone(), &args.source) - .await?; + submit_reveal_aux::<_, IO>(client, &mut ctx, args.tx.clone(), &args.source).await?; - let (mut tx, _epoch) = tx::build_ibc_transfer::<_, _, _, IO>( + let (mut tx, signing_data, _epoch) = tx::build_ibc_transfer::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, args.clone(), - signing_data.fee_payer.clone(), ) .await?; signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) @@ -842,7 +785,7 @@ where let current_epoch = rpc::query_and_print_epoch::<_, IO>(client).await; let governance_parameters = rpc::query_governance_parameters(client).await; - let ((mut tx_builder, _fee_unshield_epoch), signing_data) = if args + let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args .is_offline { let proposal = OfflineProposal::try_from(args.proposal_data.as_ref()) @@ -889,16 +832,6 @@ where .validate(&governance_parameters, current_epoch, args.tx.force) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(proposal.proposal.author.clone()), - default_signer, - ) - .await?; - submit_reveal_aux::<_, IO>( client, &mut ctx, @@ -907,18 +840,14 @@ where ) .await?; - ( - tx::build_pgf_funding_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - proposal, - &signing_data.fee_payer.clone(), - ) - .await?, - signing_data, + tx::build_pgf_funding_proposal::<_, _, _, IO>( + client, + &mut ctx.wallet, + &mut ctx.shielded, + args.clone(), + proposal, ) + .await? } else if args.is_pgf_stewards { let proposal = PgfStewardProposal::try_from( args.proposal_data.as_ref(), @@ -941,16 +870,6 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(proposal.proposal.author.clone()), - default_signer, - ) - .await?; - submit_reveal_aux::<_, IO>( client, &mut ctx, @@ -959,18 +878,14 @@ where ) .await?; - ( - tx::build_pgf_stewards_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - proposal, - signing_data.fee_payer.clone(), - ) - .await?, - signing_data, + tx::build_pgf_stewards_proposal::<_, _, _, IO>( + client, + &mut ctx.wallet, + &mut ctx.shielded, + args.clone(), + proposal, ) + .await? } else { let proposal = DefaultProposal::try_from(args.proposal_data.as_ref()) .map_err(|e| { @@ -991,16 +906,6 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(proposal.proposal.author.clone()), - default_signer, - ) - .await?; - submit_reveal_aux::<_, IO>( client, &mut ctx, @@ -1009,18 +914,14 @@ where ) .await?; - ( - tx::build_default_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - proposal, - signing_data.fee_payer.clone(), - ) - .await?, - signing_data, + tx::build_default_proposal::<_, _, _, IO>( + client, + &mut ctx.wallet, + &mut ctx.shielded, + args.clone(), + proposal, ) + .await? }; signing::generate_test_vector::<_, _, IO>( client, @@ -1059,19 +960,17 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let current_epoch = rpc::query_and_print_epoch::<_, IO>(client).await; - - let default_signer = Some(args.voter.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.voter.clone()), - default_signer.clone(), - ) - .await?; - - let (mut tx_builder, _fee_unshield_epoch) = if args.is_offline { + let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline { + let default_signer = Some(args.voter.clone()); + let signing_data = aux_signing_data::<_, IO>( + client, + &mut ctx.wallet, + &args.tx, + Some(args.voter.clone()), + default_signer.clone(), + ) + .await?; + let proposal_vote = ProposalVote::try_from(args.vote) .map_err(|_| error::TxError::InvalidProposalVote)?; @@ -1113,15 +1012,15 @@ where display_line!(IO, "Proposal vote serialized to: {}", output_file_path); return Ok(()); } else { + let current_epoch = rpc::query_and_print_epoch::(client).await; tx::build_vote_proposal::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, args.clone(), current_epoch, - signing_data.fee_payer.clone(), ) - .await? + .await? }; signing::generate_test_vector::<_, _, IO>( client, @@ -1269,25 +1168,13 @@ where C::Error: std::fmt::Display, { let default_address = args.source.clone().unwrap_or(args.validator.clone()); - let default_signer = Some(default_address.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(default_address.clone()), - default_signer, - ) - .await?; - - submit_reveal_aux::<_, IO>(client, ctx, args.tx.clone(), &default_address) - .await?; + submit_reveal_aux::<_, IO>(client, ctx, args.tx.clone(), &default_address).await?; - let (mut tx, _fee_unshield_epoch) = tx::build_bond::<_, _, _, IO>( + let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_bond::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, args.clone(), - signing_data.fee_payer.clone(), ) .await?; signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) @@ -1297,9 +1184,8 @@ where tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx).await?; } Ok(()) @@ -1314,24 +1200,12 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let default_address = args.source.clone().unwrap_or(args.validator.clone()); - let default_signer = Some(default_address.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(default_address), - default_signer, - ) - .await?; - - let (mut tx, _fee_unshield_epoch, latest_withdrawal_pre) = + let (mut tx, signing_data, _fee_unshield_epoch, latest_withdrawal_pre) = tx::build_unbond::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, args.clone(), - signing_data.fee_payer.clone(), ) .await?; signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) @@ -1361,23 +1235,11 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let default_address = args.source.clone().unwrap_or(args.validator.clone()); - let default_signer = Some(default_address.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(default_address), - default_signer, - ) - .await?; - - let (mut tx, _fee_unshield_epoch) = tx::build_withdraw::<_, _, _, IO>( + let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_withdraw::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, args.clone(), - signing_data.fee_payer.clone(), ) .await?; signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) @@ -1403,27 +1265,14 @@ pub async fn submit_validator_commission_change( where C: namada::ledger::queries::Client + Sync, { - let default_signer = Some(args.validator.clone()); - let signing_data = aux_signing_data::<_, IO>( + let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_validator_commission_change::<_, _, _, IO>( client, &mut ctx.wallet, - &args.tx, - Some(args.validator.clone()), - default_signer, + &mut ctx.shielded, + args.clone(), ) .await?; - - let (mut tx, _fee_unshield_epoch) = - tx::build_validator_commission_change::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); @@ -1448,27 +1297,14 @@ pub async fn submit_unjail_validator< where C::Error: std::fmt::Display, { - let default_signer = Some(args.validator.clone()); - let signing_data = aux_signing_data::<_, IO>( + let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_unjail_validator::<_, _, _, IO>( client, &mut ctx.wallet, - &args.tx, - Some(args.validator.clone()), - default_signer, + &mut ctx.shielded, + args.clone(), ) .await?; - - let (mut tx, _fee_unshield_epoch) = - tx::build_unjail_validator::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); @@ -1494,26 +1330,14 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let default_signer = Some(args.steward.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( + let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_update_steward_commission::<_, _, _, IO>( client, &mut ctx.wallet, - &args.tx, - Some(args.steward.clone()), - default_signer, + &mut ctx.shielded, + args.clone(), ) .await?; - let (mut tx, _fee_unshield_epoch) = - tx::build_update_steward_commission::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - &signing_data.fee_payer, - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) .await?; @@ -1537,26 +1361,14 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let default_signer = Some(args.steward.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( + let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_resign_steward::<_, _, _, IO>( client, &mut ctx.wallet, - &args.tx, - Some(args.steward.clone()), - default_signer, + &mut ctx.shielded, + args.clone(), ) .await?; - let (mut tx, _fee_unshield_epoch) = - tx::build_resign_steward::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - &signing_data.fee_payer, - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) .await?; diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 9d7fe0cfe4..5885ebda44 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -42,6 +42,7 @@ use crate::ibc::core::timestamp::Timestamp as IbcTimestamp; use crate::ibc::core::Msg; use crate::ibc::Height as IbcHeight; use crate::ledger::ibc::storage::ibc_denom_key; +use crate::sdk::signing::SigningTxData; use crate::proto::{MaspBuilder, Tx}; use crate::sdk::args::{self, InputAmount}; use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; @@ -530,8 +531,17 @@ pub async fn build_validator_commission_change< rate, tx_code_path, }: args::CommissionRateChange, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(validator.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx_args, + Some(validator.clone()), + default_signer, + ) + .await?; + let epoch = rpc::query_epoch(client).await?; let params: PosParams = rpc::get_pos_params(client).await?; @@ -605,10 +615,10 @@ pub async fn build_validator_commission_change< tx_code_path, data, do_nothing, - &fee_payer, + &signing_data.fee_payer, None, ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Craft transaction to update a steward commission @@ -627,8 +637,17 @@ pub async fn build_update_steward_commission< commission, tx_code_path, }: args::UpdateStewardCommission, - gas_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(steward.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx_args, + Some(steward.clone()), + default_signer, + ) + .await?; + if !rpc::is_steward(client, &steward).await && !tx_args.force { edisplay_line!(IO, "The given address {} is not a steward.", &steward); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); @@ -660,10 +679,10 @@ pub async fn build_update_steward_commission< tx_code_path, data, do_nothing, - gas_payer, + &signing_data.fee_payer, None, ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Craft transaction to resign as a steward @@ -681,8 +700,17 @@ pub async fn build_resign_steward< steward, tx_code_path, }: args::ResignSteward, - gas_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(steward.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx_args, + Some(steward.clone()), + default_signer, + ) + .await?; + if !rpc::is_steward(client, &steward).await && !tx_args.force { edisplay_line!(IO, "The given address {} is not a steward.", &steward); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); @@ -696,10 +724,10 @@ pub async fn build_resign_steward< tx_code_path, steward, do_nothing, - gas_payer, + &signing_data.fee_payer, None, ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit transaction to unjail a jailed validator @@ -717,8 +745,17 @@ pub async fn build_unjail_validator< validator, tx_code_path, }: args::TxUnjailValidator, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(validator.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx_args, + Some(validator.clone()), + default_signer, + ) + .await?; + if !rpc::is_validator(client, &validator).await? { edisplay_line!( IO, @@ -803,10 +840,10 @@ pub async fn build_unjail_validator< tx_code_path, validator, do_nothing, - &fee_payer, + &signing_data.fee_payer, None, ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit transaction to withdraw an unbond @@ -825,8 +862,18 @@ pub async fn build_withdraw< source, tx_code_path, }: args::Withdraw, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_address = source.clone().unwrap_or(validator.clone()); + let default_signer = Some(default_address.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx_args, + Some(default_address), + default_signer, + ) + .await?; + let epoch = rpc::query_epoch(client).await?; let validator = known_validator_or_err::<_, IO>( @@ -879,10 +926,10 @@ pub async fn build_withdraw< tx_code_path, data, do_nothing, - &fee_payer, + &signing_data.fee_payer, None, ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit a transaction to unbond @@ -902,8 +949,18 @@ pub async fn build_unbond< source, tx_code_path, }: args::Unbond, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option, Option<(Epoch, token::Amount)>)> { +) -> Result<(Tx, SigningTxData, Option, Option<(Epoch, token::Amount)>)> { + let default_address = source.clone().unwrap_or(validator.clone()); + let default_signer = Some(default_address.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx_args, + Some(default_address), + default_signer, + ) + .await?; + let source = source.clone(); // Check the source's current bond amount let bond_source = source.clone().unwrap_or_else(|| validator.clone()); @@ -969,11 +1026,11 @@ pub async fn build_unbond< tx_code_path, data, do_nothing, - &fee_payer, + &signing_data.fee_payer, None, ) .await?; - Ok((tx, epoch, latest_withdrawal_pre)) + Ok((tx, signing_data, epoch, latest_withdrawal_pre)) } /// Query the unbonds post-tx @@ -1062,14 +1119,21 @@ pub async fn build_bond< native_token, tx_code_path, }: args::Bond, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { - let validator = known_validator_or_err::<_, IO>( - validator.clone(), - tx_args.force, +) -> Result<(Tx, SigningTxData, Option)> { + let default_address = source.clone().unwrap_or(validator.clone()); + let default_signer = Some(default_address.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( client, + wallet, + &tx_args, + Some(default_address.clone()), + default_signer, ) - .await?; + .await?; + + let validator = + known_validator_or_err::<_, IO>(validator.clone(), tx_args.force, client) + .await?; // Check that the source address exists on chain let source = match source.clone() { @@ -1115,10 +1179,10 @@ pub async fn build_bond< tx_code_path, data, do_nothing, - &fee_payer, + &signing_data.fee_payer, tx_source_balance, ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a default proposal governance @@ -1141,8 +1205,17 @@ pub async fn build_default_proposal< tx_code_path, }: args::InitProposal, proposal: DefaultProposal, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(proposal.proposal.author.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx, + Some(proposal.proposal.author.clone()), + default_signer, + ) + .await?; + let init_proposal_data = InitProposalData::try_from(proposal.clone()) .map_err(|e| TxError::InvalidProposal(e.to_string()))?; @@ -1168,10 +1241,10 @@ pub async fn build_default_proposal< tx_code_path, init_proposal_data, push_data, - &fee_payer, + &signing_data.fee_payer, None, // TODO: need to pay the fee to submit a proposal ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a proposal vote @@ -1194,8 +1267,17 @@ pub async fn build_vote_proposal< tx_code_path, }: args::VoteProposal, epoch: Epoch, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(voter.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx, + Some(voter.clone()), + default_signer.clone(), + ) + .await?; + let proposal_vote = ProposalVote::try_from(vote) .map_err(|_| TxError::InvalidProposalVote)?; @@ -1255,10 +1337,10 @@ pub async fn build_vote_proposal< tx_code_path, data, do_nothing, - &fee_payer, + &signing_data.fee_payer, None, ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a pgf funding proposal governance @@ -1281,8 +1363,17 @@ pub async fn build_pgf_funding_proposal< tx_code_path, }: args::InitProposal, proposal: PgfFundingProposal, - fee_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(proposal.proposal.author.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx, + Some(proposal.proposal.author.clone()), + default_signer, + ) + .await?; + let init_proposal_data = InitProposalData::try_from(proposal.clone()) .map_err(|e| TxError::InvalidProposal(e.to_string()))?; @@ -1300,10 +1391,10 @@ pub async fn build_pgf_funding_proposal< tx_code_path, init_proposal_data, add_section, - fee_payer, + &signing_data.fee_payer, None, // TODO: need to pay the fee to submit a proposal ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a pgf funding proposal governance @@ -1326,8 +1417,17 @@ pub async fn build_pgf_stewards_proposal< tx_code_path, }: args::InitProposal, proposal: PgfStewardProposal, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(proposal.proposal.author.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx, + Some(proposal.proposal.author.clone()), + default_signer, + ) + .await?; + let init_proposal_data = InitProposalData::try_from(proposal.clone()) .map_err(|e| TxError::InvalidProposal(e.to_string()))?; @@ -1346,10 +1446,10 @@ pub async fn build_pgf_stewards_proposal< tx_code_path, init_proposal_data, add_section, - &fee_payer, + &signing_data.fee_payer, None, // TODO: need to pay the fee to submit a proposal ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit an IBC transfer @@ -1363,8 +1463,16 @@ pub async fn build_ibc_transfer< wallet: &mut Wallet, shielded: &mut ShieldedContext, args: args::TxIbcTransfer, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(args.source.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &args.tx, + Some(args.source.clone()), + default_signer, + ) + .await?; // Check that the source address exists on chain let source = source_exists_or_err::<_, IO>( args.source.clone(), @@ -1486,12 +1594,12 @@ pub async fn build_ibc_transfer< shielded, &args.tx, &mut tx, - fee_payer, + signing_data.fee_payer.clone(), tx_source_balance, ) .await?; - Ok((tx, epoch)) + Ok((tx, signing_data, epoch)) } /// Abstraction for helping build transactions @@ -1663,8 +1771,17 @@ pub async fn build_transfer< wallet: &mut Wallet, shielded: &mut ShieldedContext, mut args: args::TxTransfer, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(args.source.effective_address()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &args.tx, + Some(args.source.effective_address()), + default_signer, + ) + .await?; + let source = args.source.effective_address(); let target = args.target.effective_address(); let token = args.token.clone(); @@ -1796,7 +1913,7 @@ pub async fn build_transfer< args.tx_code_path, transfer, add_shielded, - &fee_payer, + &signing_data.fee_payer, tx_source_balance, ) .await?; @@ -1819,7 +1936,7 @@ pub async fn build_transfer< (None, Some(_transfer_unshield_epoch)) => shielded_tx_epoch, (None, None) => None, }; - Ok((tx, masp_epoch)) + Ok((tx, signing_data, masp_epoch)) } /// Submit a transaction to initialize an account @@ -1839,10 +1956,11 @@ pub async fn build_init_account< public_keys, threshold, }: args::TxInitAccount, - fee_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { - let vp_code_hash = - query_wasm_code_hash_buf::<_, IO>(client, &vp_code_path).await?; +) -> Result<(Tx, SigningTxData, Option)> { + let signing_data = + signing::aux_signing_data::<_, _, IO>(client, wallet, &tx_args, None, None).await?; + + let vp_code_hash = query_wasm_code_hash_buf::<_, IO>(client, &vp_code_path).await?; let threshold = match threshold { Some(threshold) => threshold, @@ -1875,10 +1993,10 @@ pub async fn build_init_account< tx_code_path, data, add_code_hash, - fee_payer, + &signing_data.fee_payer, None, ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit a transaction to update a VP @@ -1899,8 +2017,17 @@ pub async fn build_update_account< public_keys, threshold, }: args::TxUpdateAccount, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(addr.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx_args, + Some(addr.clone()), + default_signer, + ) + .await?; + let addr = if let Some(account) = rpc::get_account_info(client, &addr).await? { account.address @@ -1945,10 +2072,10 @@ pub async fn build_update_account< tx_code_path, data, add_code_hash, - &fee_payer, + &signing_data.fee_payer, None, ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit a custom transaction @@ -1966,10 +2093,19 @@ pub async fn build_custom< code_path, data_path, serialized_tx, - owner: _, + owner, }: args::TxCustom, - fee_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(owner.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + &tx_args, + Some(owner.clone()), + default_signer, + ) + .await?; + let mut tx = if let Some(serialized_tx) = serialized_tx { Tx::deserialize(serialized_tx.as_ref()).map_err(|_| { Error::Other("Invalid tx deserialization.".to_string()) @@ -1994,12 +2130,12 @@ pub async fn build_custom< shielded, &tx_args, &mut tx, - fee_payer.clone(), + signing_data.fee_payer.clone(), None, ) .await?; - Ok((tx, epoch)) + Ok((tx, signing_data, epoch)) } async fn expect_dry_broadcast< From 16425d50944fd5405381677e51dd1892d02dd61b Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Wed, 20 Sep 2023 13:26:07 +0200 Subject: [PATCH 037/161] Created builders and constructors for each type of transaction. --- apps/src/lib/cli.rs | 8 +- apps/src/lib/cli/client.rs | 54 +- apps/src/lib/client/rpc.rs | 28 +- apps/src/lib/client/tx.rs | 462 +++------- benches/lib.rs | 15 +- shared/src/ledger/eth_bridge/bridge_pool.rs | 65 +- shared/src/ledger/eth_bridge/validator_set.rs | 4 +- shared/src/ledger/mod.rs | 410 +++++++++ shared/src/sdk/args.rs | 770 +++++++++++++++- shared/src/sdk/masp.rs | 50 +- shared/src/sdk/rpc.rs | 26 +- shared/src/sdk/signing.rs | 169 ++-- shared/src/sdk/tx.rs | 870 +++++++----------- shared/src/types/io.rs | 4 +- tests/src/e2e/ledger_tests.rs | 6 +- tests/src/integration/masp.rs | 10 +- 16 files changed, 1827 insertions(+), 1124 deletions(-) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 135ff1e3c5..531027a102 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -15,7 +15,7 @@ pub mod wallet; use clap::{ArgGroup, ArgMatches, ColorChoice}; use color_eyre::eyre::Result; -use namada::types::io::DefaultIo; +use namada::types::io::StdIo; use utils::*; pub use utils::{safe_exit, Cmd}; @@ -3525,8 +3525,8 @@ pub mod args { target, token, amount, - native_token: (), tx_code_path, + native_token: (), } } @@ -3881,8 +3881,8 @@ pub mod args { validator, amount, source, - native_token: (), tx_code_path, + native_token: (), } } @@ -5799,7 +5799,7 @@ pub fn namada_relayer_cli() -> Result { cmds::EthBridgePool::WithContext(sub_cmd), ) => { let global_args = args::Global::parse(&matches); - let context = Context::new::(global_args)?; + let context = Context::new::(global_args)?; Ok(NamadaRelayer::EthBridgePoolWithCtx(Box::new(( sub_cmd, context, )))) diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index 1a7d9f534a..5af2aaa2b2 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -1,9 +1,11 @@ use color_eyre::eyre::{eyre, Report, Result}; -use namada::ledger::eth_bridge::bridge_pool; + use namada::sdk::tx::dump_tx; -use namada::sdk::{signing, tx as sdk_tx}; +use namada::sdk::signing; use namada::types::control_flow::ProceedOrElse; use namada::types::io::Io; +use namada::ledger::NamadaImpl; +use namada::ledger::Namada; use crate::cli; use crate::cli::api::{CliApi, CliClient}; @@ -256,58 +258,30 @@ impl CliApi { let args = args.to_sdk(&mut ctx); let tx_args = args.tx.clone(); - let default_signer = Some(args.sender.clone()); - let signing_data = tx::aux_signing_data::<_, IO>( + let mut namada = NamadaImpl::new( &client, &mut ctx.wallet, - &args.tx, - Some(args.sender.clone()), - default_signer, - ) - .await?; + &mut ctx.shielded, + ); - let (mut tx, _epoch) = - bridge_pool::build_bridge_pool_tx::<_, _, _, IO>( - &client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; + let (mut tx, signing_data, _epoch) = + args.clone().build(&mut namada).await?; - signing::generate_test_vector::<_, _, IO>( - &client, - &mut ctx.wallet, - &tx, - ) - .await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { dump_tx::(&args.tx, tx); } else { - tx::submit_reveal_aux::<_, IO>( - &client, - &mut ctx, + tx::submit_reveal_aux( + &mut namada, tx_args.clone(), &args.sender, ) .await?; - signing::sign_tx( - &mut ctx.wallet, - &tx_args, - &mut tx, - signing_data, - )?; + namada.sign(&mut tx, &tx_args, signing_data)?; - sdk_tx::process_tx::<_, _, IO>( - &client, - &mut ctx.wallet, - &tx_args, - tx, - ) - .await?; + namada.submit(tx, &tx_args).await?; } } Sub::TxUnjailValidator(TxUnjailValidator(mut args)) => { diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index d750ccc759..72d6514c35 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -173,7 +173,7 @@ pub async fn query_transfers< // Realize the rewards that would have been attained upon the // transaction's reception let amt = shielded - .compute_exchanged_amount::<_, IO>( + .compute_exchanged_amount( client, amt, epoch, @@ -224,7 +224,7 @@ pub async fn query_transfers< IO, " {}{} {}", sign, - format_denominated_amount::<_, IO>( + format_denominated_amount( client, asset, change.into(), @@ -252,7 +252,7 @@ pub async fn query_transfers< IO, " {}{} {}", sign, - format_denominated_amount::<_, IO>( + format_denominated_amount( client, &token_addr, val.into(), @@ -358,7 +358,7 @@ pub async fn query_transparent_balance< .await { Ok(balance) => { - let balance = format_denominated_amount::<_, IO>( + let balance = format_denominated_amount( client, &token, balance, ) .await; @@ -380,7 +380,7 @@ pub async fn query_transparent_balance< for (token_alias, token) in tokens { let balance = get_token_balance(client, &token, &owner).await; if !balance.is_zero() { - let balance = format_denominated_amount::<_, IO>( + let balance = format_denominated_amount( client, &token, balance, ) .await; @@ -518,7 +518,7 @@ pub async fn query_pinned_balance< token_alias ); } else { - let formatted = format_denominated_amount::<_, IO>( + let formatted = format_denominated_amount( client, token, total_balance.into(), @@ -552,7 +552,7 @@ pub async fn query_pinned_balance< ); found_any = true; } - let formatted = format_denominated_amount::<_, IO>( + let formatted = format_denominated_amount( client, token_addr, (*value).into(), @@ -599,7 +599,7 @@ async fn print_balances( owner.clone(), format!( ": {}, owned by {}", - format_denominated_amount::<_, IO>(client, tok, balance) + format_denominated_amount(client, tok, balance) .await, wallet.lookup_alias(owner) ), @@ -785,7 +785,7 @@ pub async fn query_shielded_balance< IO, "{}: {}", token_alias, - format_denominated_amount::<_, IO>( + format_denominated_amount( client, &token, token::Amount::from(total_balance) @@ -853,7 +853,7 @@ pub async fn query_shielded_balance< .map(|a| a.to_string()) .unwrap_or_else(|| token.to_string()); display_line!(IO, "Shielded Token {}:", alias); - let formatted = format_denominated_amount::<_, IO>( + let formatted = format_denominated_amount( client, &token, token_balance.into(), @@ -904,7 +904,7 @@ pub async fn query_shielded_balance< if !val.is_zero() { found_any = true; } - let formatted = format_denominated_amount::<_, IO>( + let formatted = format_denominated_amount( client, address, (*val).into(), @@ -975,7 +975,7 @@ pub async fn print_decoded_balance< IO, "{} : {}", wallet.lookup_alias(token_addr), - format_denominated_amount::<_, IO>( + format_denominated_amount( client, token_addr, (*amount).into() @@ -1009,7 +1009,7 @@ pub async fn print_decoded_balance_with_epoch< "{} | {} : {}", alias, epoch, - format_denominated_amount::<_, IO>(client, token_addr, asset_value) + format_denominated_amount(client, token_addr, asset_value) .await, ); } @@ -2195,7 +2195,7 @@ pub async fn query_wasm_code_hash< client: &C, code_path: impl AsRef, ) -> Result { - rpc::query_wasm_code_hash::<_, IO>(client, code_path).await + rpc::query_wasm_code_hash(client, code_path).await } /// Query a storage value and decode it with [`BorshDeserialize`]. diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index e00583e72a..4b2aa9b865 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -36,39 +36,33 @@ use crate::node::ledger::tendermint_node; use crate::wallet::{ gen_validator_keys, read_and_confirm_encryption_password, CliWalletUtils, }; +use namada::ledger::NamadaImpl; +use namada::ledger::Namada; +use namada::types::io::StdIo; /// Wrapper around `signing::aux_signing_data` that stores the optional /// disposable address to the wallet -pub async fn aux_signing_data< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn aux_signing_data<'a>( + context: &mut impl Namada<'a, WalletUtils = CliWalletUtils>, args: &args::Tx, owner: Option
, default_signer: Option
, ) -> Result { - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - args, - owner, - default_signer, - ) - .await?; + let signing_data = + signing::aux_signing_data(context, args, owner, default_signer) + .await?; if args.disposable_signing_key { if !(args.dry_run || args.dry_run_wrapper) { // Store the generated signing key to wallet in case of need - crate::wallet::save(wallet).map_err(|_| { + crate::wallet::save(context.wallet).map_err(|_| { error::Error::Other( "Failed to save disposable address to wallet".to_string(), ) })?; } else { display_line!( - IO, + StdIo, "Transaction dry run. The disposable address will not be \ saved to wallet." ) @@ -79,12 +73,8 @@ pub async fn aux_signing_data< } // Build a transaction to reveal the signer of the given transaction. -pub async fn submit_reveal_aux< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - ctx: &mut Context, +pub async fn submit_reveal_aux<'a>( + context: &mut impl Namada<'a>, args: args::Tx, address: &Address, ) -> Result<(), error::Error> { @@ -93,44 +83,28 @@ pub async fn submit_reveal_aux< } if let Address::Implicit(ImplicitAddress(pkh)) = address { - let key = ctx + let key = context .wallet .find_key_by_pkh(pkh, args.clone().password) .map_err(|e| error::Error::Other(e.to_string()))?; let public_key = key.ref_to(); - if tx::is_reveal_pk_needed::(client, address, args.force).await? { - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args, - None, - None, - ) - .await?; - - let (mut tx, _epoch) = tx::build_reveal_pk::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, + if tx::is_reveal_pk_needed(context.client, address, args.force).await? { + println!( + "Submitting a tx to reveal the public key for address {address}..." + ); + let (mut tx, signing_data, _epoch) = tx::build_reveal_pk( + context, &args, - address, &public_key, - &signing_data.fee_payer, ) .await?; - signing::generate_test_vector::<_, _, IO>( - client, - &mut ctx.wallet, - &tx, - ) - .await?; + signing::generate_test_vector(context, &tx).await?; - signing::sign_tx(&mut ctx.wallet, &args, &mut tx, signing_data)?; + context.sign(&mut tx, &args, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args, tx) - .await?; + context.submit(tx, &args).await?; } } @@ -146,25 +120,19 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - submit_reveal_aux::<_, IO>(client, ctx, args.tx.clone(), &args.owner).await?; - - let (mut tx, signing_data, _epoch) = tx::build_custom::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + submit_reveal_aux(&mut namada, args.tx.clone(), &args.owner).await?; + + let (mut tx, signing_data, _epoch) = args.build(&mut namada).await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data)?; + namada.submit(tx, &args.tx).await?; } Ok(()) @@ -179,23 +147,17 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let (mut tx, signing_data, _epoch) = tx::build_update_account::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let (mut tx, signing_data, _epoch) = args.build(&mut namada).await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data)?; + namada.submit(tx, &args.tx).await?; } Ok(()) @@ -210,23 +172,18 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let (mut tx, signing_data, _epoch) = tx::build_init_account::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; - - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let (mut tx, signing_data, _epoch) = tx::build_init_account(&mut namada, &args) .await?; + signing::generate_test_vector(&mut namada, &tx).await?; + if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data)?; + namada.submit(tx, &args.tx).await?; } Ok(()) @@ -438,19 +395,13 @@ where tx.add_code_from_hash(tx_code_hash).add_data(data); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - &mut ctx.wallet, - &tx_args, - None, - None, - ) - .await?; + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let signing_data = + aux_signing_data(&mut namada, &tx_args, None, None).await?; - tx::prepare_tx::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, + tx::prepare_tx( + &mut namada, &tx_args, &mut tx, signing_data.fee_payer.clone(), @@ -458,18 +409,14 @@ where ) .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(&mut namada, &tx).await?; if tx_args.dump_tx { tx::dump_tx::(&tx_args, tx); } else { - signing::sign_tx(&mut ctx.wallet, &tx_args, &mut tx, signing_data)?; + namada.sign(&mut tx, &tx_args, signing_data)?; - let result = - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &tx_args, tx) - .await? - .initialized_accounts(); + let result = namada.submit(tx, &tx_args).await?.initialized_accounts(); if !tx_args.dry_run { let (validator_address_alias, validator_address) = match &result[..] @@ -681,36 +628,25 @@ pub async fn submit_transfer< args: args::TxTransfer, ) -> Result<(), error::Error> { for _ in 0..2 { - submit_reveal_aux::<_, IO>( - client, - &mut ctx, + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + + submit_reveal_aux( + &mut namada, args.tx.clone(), &args.source.effective_address(), ) .await?; - let (mut tx, signing_data, tx_epoch) = tx::build_transfer::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + let (mut tx, signing_data, tx_epoch) = args.clone().build(&mut namada).await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); break; } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - let result = tx::process_tx::<_, _, IO>( - client, - &mut ctx.wallet, - &args.tx, - tx, - ) - .await?; + namada.sign(&mut tx, &args.tx, signing_data)?; + let result = namada.submit(tx, &args.tx).await?; let submission_epoch = rpc::query_and_print_epoch::<_, IO>(client).await; @@ -750,24 +686,17 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - submit_reveal_aux::<_, IO>(client, &mut ctx, args.tx.clone(), &args.source).await?; - - let (mut tx, signing_data, _epoch) = tx::build_ibc_transfer::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + submit_reveal_aux(&mut namada, args.tx.clone(), &args.source).await?; + let (mut tx, signing_data, _epoch) = args.build(&mut namada).await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data)?; + namada.submit(tx, &args.tx).await?; } Ok(()) @@ -784,7 +713,8 @@ where { let current_epoch = rpc::query_and_print_epoch::<_, IO>(client).await; let governance_parameters = rpc::query_governance_parameters(client).await; - + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args .is_offline { @@ -798,9 +728,8 @@ where .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; let default_signer = Some(proposal.author.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, + let signing_data = aux_signing_data( + &mut namada, &args.tx, Some(proposal.author.clone()), default_signer, @@ -832,19 +761,16 @@ where .validate(&governance_parameters, current_epoch, args.tx.force) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - submit_reveal_aux::<_, IO>( - client, - &mut ctx, + submit_reveal_aux( + &mut namada, args.tx.clone(), &proposal.proposal.author, ) .await?; - tx::build_pgf_funding_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), + tx::build_pgf_funding_proposal( + &mut namada, + &args, proposal, ) .await? @@ -870,19 +796,16 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - submit_reveal_aux::<_, IO>( - client, - &mut ctx, + submit_reveal_aux( + &mut namada, args.tx.clone(), &proposal.proposal.author, ) .await?; - tx::build_pgf_stewards_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), + tx::build_pgf_stewards_proposal( + &mut namada, + &args, proposal, ) .await? @@ -906,46 +829,27 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - submit_reveal_aux::<_, IO>( - client, - &mut ctx, + submit_reveal_aux( + &mut namada, args.tx.clone(), &proposal.proposal.author, ) .await?; - tx::build_default_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), + tx::build_default_proposal( + &mut namada, + &args, proposal, ) .await? }; - signing::generate_test_vector::<_, _, IO>( - client, - &mut ctx.wallet, - &tx_builder, - ) - .await?; + signing::generate_test_vector(&mut namada, &tx_builder).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx_builder); } else { - signing::sign_tx( - &mut ctx.wallet, - &args.tx, - &mut tx_builder, - signing_data, - )?; - tx::process_tx::<_, _, IO>( - client, - &mut ctx.wallet, - &args.tx, - tx_builder, - ) - .await?; + namada.sign(&mut tx_builder, &args.tx, signing_data)?; + namada.submit(tx_builder, &args.tx).await?; } Ok(()) @@ -960,11 +864,12 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline { let default_signer = Some(args.voter.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, + let signing_data = aux_signing_data( + &mut namada, &args.tx, Some(args.voter.clone()), default_signer.clone(), @@ -1012,39 +917,15 @@ where display_line!(IO, "Proposal vote serialized to: {}", output_file_path); return Ok(()); } else { - let current_epoch = rpc::query_and_print_epoch::(client).await; - tx::build_vote_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - current_epoch, - ) - .await? + args.build(&mut namada).await? }; - signing::generate_test_vector::<_, _, IO>( - client, - &mut ctx.wallet, - &tx_builder, - ) - .await?; + signing::generate_test_vector(&mut namada, &tx_builder).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx_builder); } else { - signing::sign_tx( - &mut ctx.wallet, - &args.tx, - &mut tx_builder, - signing_data, - )?; - tx::process_tx::<_, _, IO>( - client, - &mut ctx.wallet, - &args.tx, - tx_builder, - ) - .await?; + namada.sign(&mut tx_builder, &args.tx, signing_data)?; + namada.submit(tx_builder, &args.tx).await?; } Ok(()) @@ -1069,11 +950,11 @@ where edisplay_line!(IO, "Couldn't decode the transaction."); safe_exit(1) }; - + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let default_signer = Some(owner.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, + let signing_data = aux_signing_data( + &mut namada, &tx_args, Some(owner.clone()), default_signer, @@ -1147,13 +1028,9 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - submit_reveal_aux::<_, IO>( - client, - ctx, - args.tx, - &(&args.public_key).into(), - ) - .await?; + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + submit_reveal_aux(&mut namada, args.tx, &(&args.public_key).into()).await?; Ok(()) } @@ -1167,25 +1044,20 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let default_address = args.source.clone().unwrap_or(args.validator.clone()); - submit_reveal_aux::<_, IO>(client, ctx, args.tx.clone(), &default_address).await?; + submit_reveal_aux(&mut namada, args.tx.clone(), &default_address).await?; - let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_bond::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + let (mut tx, signing_data, _fee_unshield_epoch) = args.build(&mut namada).await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx).await?; + namada.submit(tx, &args.tx).await?; } Ok(()) @@ -1200,24 +1072,18 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch, latest_withdrawal_pre) = - tx::build_unbond::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + args.build(&mut namada).await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.submit(tx, &args.tx).await?; tx::query_unbonds::<_, IO>(client, args.clone(), latest_withdrawal_pre) .await?; @@ -1235,23 +1101,18 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_withdraw::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let (mut tx, signing_data, _fee_unshield_epoch) = + args.build(&mut namada).await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.submit(tx, &args.tx).await?; } Ok(()) @@ -1265,22 +1126,18 @@ pub async fn submit_validator_commission_change( where C: namada::ledger::queries::Client + Sync, { - let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_validator_commission_change::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx).await?; + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let (mut tx, signing_data, _fee_unshield_epoch) = + args.build(&mut namada).await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.submit(tx, &args.tx).await?; } Ok(()) @@ -1297,22 +1154,18 @@ pub async fn submit_unjail_validator< where C::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_unjail_validator::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx).await?; + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let (mut tx, signing_data, _fee_unshield_epoch) = + args.build(&mut namada).await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.submit(tx, &args.tx).await?; } Ok(()) @@ -1330,23 +1183,18 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_update_steward_commission::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let (mut tx, signing_data, _fee_unshield_epoch) = + args.build(&mut namada).await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data)?; + namada.submit(tx, &args.tx).await?; } Ok(()) @@ -1361,23 +1209,17 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = tx::build_resign_steward::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - ) - .await?; + let mut namada = + NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let (mut tx, signing_data, _epoch) = args.build(&mut namada).await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data)?; + namada.submit(tx, &args.tx).await?; } Ok(()) diff --git a/benches/lib.rs b/benches/lib.rs index 47645abdf4..b420d24a43 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -80,7 +80,7 @@ use namada::tendermint::Hash; use namada::tendermint_rpc::{self}; use namada::types::address::InternalAddress; use namada::types::chain::ChainId; -use namada::types::io::DefaultIo; +use namada::types::io::StdIo; use namada::types::masp::{ ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, }; @@ -104,6 +104,7 @@ use namada_test_utils::tx_data::TxWriteData; use rand_core::OsRng; use sha2::{Digest, Sha256}; use tempfile::TempDir; +use namada::ledger::NamadaImpl; pub const WASM_DIR: &str = "../wasm"; pub const TX_BOND_WASM: &str = "tx_bond.wasm"; @@ -682,7 +683,7 @@ impl Default for BenchShieldedCtx { let mut shell = BenchShell::default(); let mut ctx = - Context::new::(namada_apps::cli::args::Global { + Context::new::(namada_apps::cli::args::Global { chain_id: None, base_dir: shell.tempdir.as_ref().canonicalize().unwrap(), wasm_dir: Some(WASM_DIR.into()), @@ -803,11 +804,13 @@ impl BenchShieldedCtx { &[], )) .unwrap(); + let mut namada = NamadaImpl::new( + &self.shell, + &mut self.wallet, + &mut self.shielded, + ); let shielded = async_runtime - .block_on( - self.shielded - .gen_shielded_transfer::<_, DefaultIo>(&self.shell, args), - ) + .block_on(ShieldedContext::::gen_shielded_transfer(&mut namada, &args)) .unwrap() .map( |ShieldedTransfer { diff --git a/shared/src/ledger/eth_bridge/bridge_pool.rs b/shared/src/ledger/eth_bridge/bridge_pool.rs index b9573cab97..38cfcd51cb 100644 --- a/shared/src/ledger/eth_bridge/bridge_pool.rs +++ b/shared/src/ledger/eth_bridge/bridge_pool.rs @@ -9,7 +9,6 @@ use borsh::BorshSerialize; use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; -use namada_core::types::key::common; use namada_core::types::storage::Epoch; use owo_colors::OwoColorize; use serde::{Deserialize, Serialize}; @@ -23,10 +22,8 @@ use crate::ledger::queries::{ use crate::proto::Tx; use crate::sdk::args; use crate::sdk::error::Error; -use crate::sdk::masp::{ShieldedContext, ShieldedUtils}; use crate::sdk::rpc::{query_wasm_code_hash, validate_amount}; use crate::sdk::tx::prepare_tx; -use crate::sdk::wallet::{Wallet, WalletUtils}; use crate::types::address::Address; use crate::types::control_flow::time::{Duration, Instant}; use crate::types::control_flow::{ @@ -41,17 +38,14 @@ use crate::types::keccak::KeccakHash; use crate::types::token::{Amount, DenominatedAmount}; use crate::types::voting_power::FractionalVotingPower; use crate::{display, display_line}; +use crate::ledger::Namada; +use crate::sdk::signing::aux_signing_data; +use crate::sdk::signing::SigningTxData; + /// Craft a transaction that adds a transfer to the Ethereum bridge pool. -pub async fn build_bridge_pool_tx< - C: crate::ledger::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_bridge_pool_tx<'a>( + context: &mut impl Namada<'a>, args::EthereumBridgePool { tx: tx_args, nut, @@ -64,11 +58,18 @@ pub async fn build_bridge_pool_tx< fee_token, code_path, }: args::EthereumBridgePool, - wrapper_fee_payer: common::PublicKey, -) -> Result<(Tx, Option), Error> { +) -> Result<(Tx, SigningTxData, Option), Error> { + let default_signer = Some(sender.clone()); + let signing_data = aux_signing_data( + context, + &tx_args, + Some(sender.clone()), + default_signer, + ) + .await?; let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); - let DenominatedAmount { amount, .. } = validate_amount::<_, IO>( - client, + let DenominatedAmount { amount, .. } = validate_amount( + context.client, amount, &wrapped_erc20s::token(&asset), tx_args.force, @@ -77,7 +78,7 @@ pub async fn build_bridge_pool_tx< .map_err(|e| Error::Other(format!("Failed to validate amount. {}", e)))?; let DenominatedAmount { amount: fee_amount, .. - } = validate_amount::<_, IO>(client, fee_amount, &fee_token, tx_args.force) + } = validate_amount(context.client, fee_amount, &fee_token, tx_args.force) .await .map_err(|e| { Error::Other(format!( @@ -105,7 +106,7 @@ pub async fn build_bridge_pool_tx< }; let tx_code_hash = - query_wasm_code_hash::<_, IO>(client, code_path.to_str().unwrap()) + query_wasm_code_hash(context.client, code_path.to_str().unwrap()) .await .unwrap(); @@ -115,18 +116,16 @@ pub async fn build_bridge_pool_tx< // TODO(namada#1800): validate the tx on the client side - let epoch = prepare_tx::( - client, - wallet, - shielded, + let epoch = prepare_tx( + context, &tx_args, &mut tx, - wrapper_fee_payer, + signing_data.fee_payer.clone(), None, ) .await?; - Ok((tx, epoch)) + Ok((tx, signing_data, epoch)) } /// A json serializable representation of the Ethereum @@ -913,7 +912,7 @@ mod recommendations { use super::*; use crate::types::control_flow::ProceedOrElse; - use crate::types::io::DefaultIo; + use crate::types::io::StdIo; /// An established user address for testing & development pub fn bertha_address() -> Address { @@ -1019,7 +1018,7 @@ mod recommendations { signed_pool: &mut signed_pool, expected_eligible: &mut expected, }); - let eligible = generate_eligible::( + let eligible = generate_eligible::( &table, &in_progress, signed_pool, @@ -1114,7 +1113,7 @@ mod recommendations { let profitable = vec![transfer(100_000); 17]; let hash = profitable[0].keccak256().to_string(); let expected = vec![hash; 17]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations::( process_transfers(profitable), &Default::default(), Uint::from_u64(800_000), @@ -1133,7 +1132,7 @@ mod recommendations { let hash = transfers[0].keccak256().to_string(); transfers.push(transfer(0)); let expected: Vec<_> = vec![hash; 17]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations::( process_transfers(transfers), &Default::default(), Uint::from_u64(800_000), @@ -1151,7 +1150,7 @@ mod recommendations { let transfers = vec![transfer(75_000); 4]; let hash = transfers[0].keccak256().to_string(); let expected = vec![hash; 2]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations::( process_transfers(transfers), &Default::default(), Uint::from_u64(50_000), @@ -1173,7 +1172,7 @@ mod recommendations { .map(|t| t.keccak256().to_string()) .take(5) .collect(); - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations::( process_transfers(transfers), &Default::default(), Uint::from_u64(150_000), @@ -1192,7 +1191,7 @@ mod recommendations { let hash = transfers[0].keccak256().to_string(); let expected = vec![hash; 4]; transfers.extend([transfer(17_500), transfer(17_500)]); - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations::( process_transfers(transfers), &Default::default(), Uint::from_u64(150_000), @@ -1208,7 +1207,7 @@ mod recommendations { #[test] fn test_wholly_infeasible() { let transfers = vec![transfer(75_000); 4]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations::( process_transfers(transfers), &Default::default(), Uint::from_u64(300_000), @@ -1289,7 +1288,7 @@ mod recommendations { const VALIDATOR_GAS_FEE: Uint = Uint::from_u64(100_000); - let recommended_batch = generate_recommendations::( + let recommended_batch = generate_recommendations::( eligible, &conversion_table, // gas spent by validator signature checks diff --git a/shared/src/ledger/eth_bridge/validator_set.rs b/shared/src/ledger/eth_bridge/validator_set.rs index 4ae08dd598..be99e130f8 100644 --- a/shared/src/ledger/eth_bridge/validator_set.rs +++ b/shared/src/ledger/eth_bridge/validator_set.rs @@ -26,7 +26,7 @@ use crate::types::control_flow::{ self, install_shutdown_signal, Halt, TryHalt, }; use crate::types::ethereum_events::EthAddress; -use crate::types::io::{DefaultIo, Io}; +use crate::types::io::{Io, StdIo}; use crate::types::vote_extensions::validator_set_update::ValidatorSetArgs; use crate::{display_line, edisplay_line}; @@ -513,7 +513,7 @@ where time::sleep(sleep_for).await; let is_synchronizing = - eth_sync_or::<_, _, _, DefaultIo>(&*eth_client, || ()) + eth_sync_or::<_, _, _, StdIo>(&*eth_client, || ()) .await .is_break(); if is_synchronizing { diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 04b5809bc2..505699d707 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -17,3 +17,413 @@ pub mod vp_host_fns; pub use namada_core::ledger::{ gas, parameters, replay_protection, storage_api, tx_env, vp_env, }; + +use crate::sdk::wallet::{Wallet, WalletUtils}; +use crate::sdk::masp::{ShieldedContext, ShieldedUtils}; +use crate::types::masp::{TransferSource, TransferTarget}; +use crate::types::address::Address; +use crate::sdk::args::{self, InputAmount}; +use crate::sdk::tx::{ + TX_TRANSFER_WASM, TX_REVEAL_PK, TX_BOND_WASM, TX_UNBOND_WASM, TX_IBC_WASM, + TX_INIT_PROPOSAL, TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL, VP_USER_WASM, + TX_CHANGE_COMMISSION_WASM, TX_INIT_VALIDATOR_WASM, TX_UNJAIL_VALIDATOR_WASM, + TX_WITHDRAW_WASM, TX_BRIDGE_POOL_WASM, TX_RESIGN_STEWARD, + TX_UPDATE_STEWARD_COMMISSION, self, +}; +use std::path::PathBuf; +use crate::types::transaction::GasLimit; +use crate::sdk::signing::{SigningTxData, self}; +use crate::proto::Tx; +use crate::types::key::*; +use crate::types::token; +use crate::sdk::tx::ProcessTxResponse; +use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::types::token::NATIVE_MAX_DECIMAL_PLACES; +use std::str::FromStr; +use std::ops::{Deref, DerefMut}; +use namada_core::types::dec::Dec; +use namada_core::types::ethereum_events::EthAddress; + +/// Encapsulates a Namada session to enable splitting borrows of its parts +pub struct NamadaStruct<'a, C, U, V> where + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, + V: ShieldedUtils, +{ + /// Used to send and receive messages from the ledger + pub client: &'a C, + /// Stores the addresses and keys required for ledger interactions + pub wallet: &'a mut Wallet, + /// Stores the current state of the shielded pool + pub shielded: &'a mut ShieldedContext, +} + +#[async_trait::async_trait(?Send)] +/// An interface for high-level interaction with the Namada SDK +pub trait Namada<'a> : DerefMut> { + /// A client with async request dispatcher method + type Client: 'a + crate::ledger::queries::Client + Sync; + /// Captures the interactive parts of the wallet's functioning + type WalletUtils: 'a + WalletUtils; + /// Abstracts platform specific details away from the logic of shielded pool + /// operations. + type ShieldedUtils: 'a + ShieldedUtils; + + /// Make a tx builder using no arguments + fn tx_builder(&mut self) -> args::Tx { + args::Tx { + dry_run: false, + dry_run_wrapper: false, + dump_tx: false, + output_folder: None, + force: false, + broadcast_only: false, + ledger_address: (), + initialized_account_alias: None, + wallet_alias_force: false, + fee_amount: None, + wrapper_fee_payer: None, + fee_token: self.wallet.find_address(args::NAM).expect("NAM not in wallet").clone(), + fee_unshield: None, + gas_limit: GasLimit::from(20_000), + expiration: None, + disposable_signing_key: false, + chain_id: None, + signing_keys: vec![], + signatures: vec![], + tx_reveal_code_path: PathBuf::from(TX_REVEAL_PK), + verification_key: None, + password: None, + } + } + + /// Make a TxTransfer builder from the given minimum set of arguments + fn new_transfer( + &mut self, + source: TransferSource, + target: TransferTarget, + token: Address, + amount: InputAmount, + ) -> args::TxTransfer { + args::TxTransfer { + source, + target, + token, + amount, + tx_code_path: PathBuf::from(TX_TRANSFER_WASM), + tx: self.tx_builder(), + native_token: self.wallet.find_address(args::NAM).expect("NAM not in wallet").clone(), + } + } + + /// Make a RevealPK builder from the given minimum set of arguments + fn new_reveal_pk( + &mut self, + public_key: common::PublicKey, + ) -> args::RevealPk { + args::RevealPk { + public_key, + tx: self.tx_builder(), + } + } + + /// Make a Bond builder from the given minimum set of arguments + fn new_bond( + &mut self, + validator: Address, + amount: token::Amount, + ) -> args::Bond { + args::Bond { + validator, + amount, + source: None, + tx: self.tx_builder(), + native_token: self.wallet.find_address(args::NAM).expect("NAM not in wallet").clone(), + tx_code_path: PathBuf::from(TX_BOND_WASM), + } + } + + /// Make a Unbond builder from the given minimum set of arguments + fn new_unbond( + &mut self, + validator: Address, + amount: token::Amount, + ) -> args::Unbond { + args::Unbond { + validator, + amount, + source: None, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_UNBOND_WASM), + } + } + + /// Make a TxIbcTransfer builder from the given minimum set of arguments + fn new_ibc_transfer( + &mut self, + source: Address, + receiver: String, + token: Address, + amount: InputAmount, + channel_id: ChannelId, + ) -> args::TxIbcTransfer { + args::TxIbcTransfer { + source, + receiver, + token, + amount, + channel_id, + port_id: PortId::from_str("transfer").unwrap(), + timeout_height: None, + timeout_sec_offset: None, + memo: None, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_IBC_WASM), + } + } + + /// Make a InitProposal builder from the given minimum set of arguments + fn new_init_proposal( + &mut self, + proposal_data: Vec, + ) -> args::InitProposal { + args::InitProposal { + proposal_data, + native_token: self.wallet.find_address(args::NAM).expect("NAM not in wallet").clone(), + is_offline: false, + is_pgf_stewards: false, + is_pgf_funding: false, + tx_code_path: PathBuf::from(TX_INIT_PROPOSAL), + tx: self.tx_builder(), + } + } + + /// Make a TxUpdateAccount builder from the given minimum set of arguments + fn new_update_account( + &mut self, + addr: Address, + ) -> args::TxUpdateAccount { + args::TxUpdateAccount { + addr, + vp_code_path: None, + public_keys: vec![], + threshold: None, + tx_code_path: PathBuf::from(TX_UPDATE_ACCOUNT_WASM), + tx: self.tx_builder(), + } + } + + /// Make a VoteProposal builder from the given minimum set of arguments + fn new_vote_prposal( + &mut self, + vote: String, + voter: Address, + ) -> args::VoteProposal { + args::VoteProposal { + vote, + voter, + proposal_id: None, + is_offline: false, + proposal_data: None, + tx_code_path: PathBuf::from(TX_VOTE_PROPOSAL), + tx: self.tx_builder(), + } + } + + /// Make a CommissionRateChange builder from the given minimum set of arguments + fn new_change_commission_rate( + &mut self, + rate: Dec, + validator: Address, + ) -> args::CommissionRateChange { + args::CommissionRateChange { + rate, + validator, + tx_code_path: PathBuf::from(TX_CHANGE_COMMISSION_WASM), + tx: self.tx_builder(), + } + } + + /// Make a TxInitValidator builder from the given minimum set of arguments + fn new_init_validator( + &mut self, + commission_rate: Dec, + max_commission_rate_change: Dec, + ) -> args::TxInitValidator { + args::TxInitValidator { + commission_rate, + max_commission_rate_change, + scheme: SchemeType::Ed25519, + account_keys: vec![], + threshold: None, + consensus_key: None, + eth_cold_key: None, + eth_hot_key: None, + protocol_key: None, + validator_vp_code_path: PathBuf::from(VP_USER_WASM), + unsafe_dont_encrypt: false, + tx_code_path: PathBuf::from(TX_INIT_VALIDATOR_WASM), + tx: self.tx_builder(), + } + } + + /// Make a TxUnjailValidator builder from the given minimum set of arguments + fn new_unjail_validator( + &mut self, + validator: Address, + ) -> args::TxUnjailValidator { + args::TxUnjailValidator { + validator, + tx_code_path: PathBuf::from(TX_UNJAIL_VALIDATOR_WASM), + tx: self.tx_builder(), + } + } + + /// Make a Withdraw builder from the given minimum set of arguments + fn new_withdraw( + &mut self, + validator: Address, + ) -> args::Withdraw { + args::Withdraw { + validator, + source: None, + tx_code_path: PathBuf::from(TX_WITHDRAW_WASM), + tx: self.tx_builder(), + } + } + + /// Make a Withdraw builder from the given minimum set of arguments + fn new_add_erc20_transfer( + &mut self, + sender: Address, + recipient: EthAddress, + asset: EthAddress, + amount: InputAmount, + ) -> args::EthereumBridgePool { + args::EthereumBridgePool { + sender, + recipient, + asset, + amount, + fee_amount: InputAmount::Unvalidated(token::DenominatedAmount { + amount: token::Amount::default(), + denom: NATIVE_MAX_DECIMAL_PLACES.into(), + }), + fee_payer: None, + fee_token: self.wallet.find_address(args::NAM).expect("NAM not in wallet").clone(), + nut: false, + code_path: PathBuf::from(TX_BRIDGE_POOL_WASM), + tx: self.tx_builder(), + } + } + + /// Make a ResignSteward builder from the given minimum set of arguments + fn new_resign_steward(&mut self, steward: Address) -> args::ResignSteward { + args::ResignSteward { + steward, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_RESIGN_STEWARD), + } + } + + /// Make a UpdateStewardCommission builder from the given minimum set of + /// arguments + fn new_update_steward_rewards( + &mut self, + steward: Address, + commission: Vec, + ) -> args::UpdateStewardCommission { + args::UpdateStewardCommission { + steward, + commission, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_UPDATE_STEWARD_COMMISSION), + } + } + + /// Make a TxCustom builder from the given minimum set of arguments + fn new_custom( + &mut self, + owner: Address, + ) -> args::TxCustom { + args::TxCustom { + owner, + tx: self.tx_builder(), + code_path: None, + data_path: None, + serialized_tx: None, + } + } + + /// Sign the given transaction using the given signing data + fn sign( + &mut self, + tx: &mut Tx, + args: &args::Tx, + signing_data: SigningTxData, + ) -> crate::sdk::error::Result<()> { + signing::sign_tx(self.wallet, args, tx, signing_data) + } + + /// Process the given transaction using the given flags + async fn submit( + &mut self, + tx: Tx, + args: &args::Tx, + ) -> crate::sdk::error::Result { + tx::process_tx(self.client, self.wallet, args, tx).await + } +} + +/// Provides convenience methods for common Namada interactions +pub struct NamadaImpl<'a, C, U, V>(NamadaStruct<'a, C, U, V>) where + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, + V: ShieldedUtils; + +impl<'a, C, U, V> NamadaImpl<'a, C, U, V> where + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, + V: ShieldedUtils, +{ + /// Construct a new Namada context + pub fn new( + client: &'a C, + wallet: &'a mut Wallet, + shielded: &'a mut ShieldedContext, + ) -> Self { + Self(NamadaStruct { client, wallet, shielded }) + } +} + +impl<'a, C, U, V> Deref for NamadaImpl<'a, C, U, V> where + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, + V: ShieldedUtils, +{ + type Target = NamadaStruct<'a, C, U, V>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a, C, U, V> DerefMut for NamadaImpl<'a, C, U, V> where + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, + V: ShieldedUtils, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<'a, C, U, V> Namada<'a> for NamadaImpl<'a, C, U, V> where + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, + V: ShieldedUtils, +{ + type Client = C; + type WalletUtils = U; + type ShieldedUtils = V; +} diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index b765dece5a..6e1381b18f 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -19,6 +19,16 @@ use crate::types::masp::MaspValue; use crate::types::storage::Epoch; use crate::types::transaction::GasLimit; use crate::types::{storage, token}; +use crate::ledger::Namada; +use crate::sdk::signing::SigningTxData; +use crate::sdk::{tx, rpc}; +use crate::ledger::eth_bridge::bridge_pool; +use namada_core::ledger::governance::cli::onchain::{ + DefaultProposal, PgfFundingProposal, PgfStewardProposal, +}; + +/// The Namada token +pub const NAM: &str = "NAM"; /// [`Duration`](StdDuration) wrapper that provides a /// method to parse a value from a string. @@ -124,6 +134,52 @@ pub struct TxCustom { pub owner: C::Address, } +impl TxBuilder for TxCustom { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + TxCustom { tx: func(self.tx), ..self } + } +} + +impl TxCustom { + /// Path to the tx WASM code file + pub fn code_path(self, code_path: PathBuf) -> Self { + Self { code_path: Some(code_path), ..self } + } + /// Path to the data file + pub fn data_path(self, data_path: C::Data) -> Self { + Self { data_path: Some(data_path), ..self } + } + /// Path to the serialized transaction + pub fn serialized_tx(self, serialized_tx: C::Data) -> Self { + Self { serialized_tx: Some(serialized_tx), ..self } + } + /// The address that correspond to the signatures/signing-keys + pub fn owner(self, owner: C::Address) -> Self { + Self { owner, ..self } + } +} + +impl TxCustom { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_custom(context, self).await + } +} + +/// An amount read in by the cli +#[derive(Copy, Clone, Debug)] +pub enum InputAmount { + /// An amount whose representation has been validated + /// against the allowed representation in storage + Validated(token::DenominatedAmount), + /// The parsed amount read in from the cli. It has + /// not yet been validated against the allowed + /// representation in storage. + Unvalidated(token::DenominatedAmount), +} + /// Transfer transaction arguments #[derive(Clone, Debug)] pub struct TxTransfer { @@ -142,16 +198,47 @@ pub struct TxTransfer { /// Path to the TX WASM code file pub tx_code_path: PathBuf, } -/// An amount read in by the cli -#[derive(Copy, Clone, Debug)] -pub enum InputAmount { - /// An amount whose representation has been validated - /// against the allowed representation in storage - Validated(token::DenominatedAmount), - /// The parsed amount read in from the cli. It has - /// not yet been validated against the allowed - /// representation in storage. - Unvalidated(token::DenominatedAmount), + +impl TxBuilder for TxTransfer { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + TxTransfer { tx: func(self.tx), ..self } + } +} + +impl TxTransfer { + /// Transfer source address + pub fn source(self, source: C::TransferSource) -> Self { + Self { source, ..self } + } + /// Transfer target address + pub fn receiver(self, target: C::TransferTarget) -> Self { + Self { target, ..self } + } + /// Transferred token address + pub fn token(self, token: C::Address) -> Self { + Self { token, ..self } + } + /// Transferred token amount + pub fn amount(self, amount: InputAmount) -> Self { + Self { amount, ..self } + } + /// Native token address + pub fn native_token(self, native_token: C::NativeAddress) -> Self { + Self { native_token, ..self } + } + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + +impl TxTransfer { + /// Build a transaction from this builder + pub async fn build<'a>(self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_transfer(context, self).await + } } /// IBC transfer transaction arguments @@ -163,7 +250,7 @@ pub struct TxIbcTransfer { pub source: C::Address, /// Transfer target address pub receiver: String, - /// Transferred token addres s + /// Transferred token address pub token: C::Address, /// Transferred token amount pub amount: InputAmount, @@ -181,6 +268,65 @@ pub struct TxIbcTransfer { pub tx_code_path: PathBuf, } +impl TxBuilder for TxIbcTransfer { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + TxIbcTransfer { tx: func(self.tx), ..self } + } +} + +impl TxIbcTransfer { + /// Transfer source address + pub fn source(self, source: C::Address) -> Self { + Self { source, ..self } + } + /// Transfer target address + pub fn receiver(self, receiver: String) -> Self { + Self { receiver, ..self } + } + /// Transferred token address + pub fn token(self, token: C::Address) -> Self { + Self { token, ..self } + } + /// Transferred token amount + pub fn amount(self, amount: InputAmount) -> Self { + Self { amount, ..self } + } + /// Port ID + pub fn port_id(self, port_id: PortId) -> Self { + Self { port_id, ..self } + } + /// Channel ID + pub fn channel_id(self, channel_id: ChannelId) -> Self { + Self { channel_id, ..self } + } + /// Timeout height of the destination chain + pub fn timeout_height(self, timeout_height: u64) -> Self { + Self { timeout_height: Some(timeout_height), ..self } + } + /// Timeout timestamp offset + pub fn timeout_sec_offset(self, timeout_sec_offset: u64) -> Self { + Self { timeout_sec_offset: Some(timeout_sec_offset), ..self } + } + /// Memo + pub fn memo(self, memo: String) -> Self { + Self { memo: Some(memo), ..self } + } + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + +impl TxIbcTransfer { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_ibc_transfer(context, self).await + } +} + + /// Transaction to initialize create a new proposal #[derive(Clone, Debug)] pub struct InitProposal { @@ -200,6 +346,121 @@ pub struct InitProposal { pub tx_code_path: PathBuf, } +impl TxBuilder for InitProposal { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + InitProposal { tx: func(self.tx), ..self } + } +} + +impl InitProposal { + /// The proposal data + pub fn proposal_data(self, proposal_data: C::Data) -> Self { + Self { proposal_data, ..self } + } + /// Native token address + pub fn native_token(self, native_token: C::NativeAddress) -> Self { + Self { native_token, ..self } + } + /// Flag if proposal should be run offline + pub fn is_offline(self, is_offline: bool) -> Self { + Self { is_offline, ..self } + } + /// Flag if proposal is of type Pgf stewards + pub fn is_pgf_stewards(self, is_pgf_stewards: bool) -> Self { + Self { is_pgf_stewards, ..self } + } + /// Flag if proposal is of type Pgf funding + pub fn is_pgf_funding(self, is_pgf_funding: bool) -> Self { + Self { is_pgf_funding, ..self } + } + /// Path to the tx WASM file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + +impl InitProposal { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + let current_epoch = rpc::query_epoch(context.client).await?; + let governance_parameters = rpc::query_governance_parameters(context.client).await; + + if self.is_pgf_funding { + let proposal = + PgfFundingProposal::try_from(self.proposal_data.as_ref()) + .map_err(|e| { + crate::sdk::error::TxError::FailedGovernaneProposalDeserialize( + e.to_string(), + ) + })? + .validate(&governance_parameters, current_epoch, self.tx.force) + .map_err(|e| crate::sdk::error::TxError::InvalidProposal(e.to_string()))?; + + tx::build_pgf_funding_proposal( + context, + self, + proposal, + ) + .await + } else if self.is_pgf_stewards { + let proposal = PgfStewardProposal::try_from( + self.proposal_data.as_ref(), + ) + .map_err(|e| { + crate::sdk::error::TxError::FailedGovernaneProposalDeserialize(e.to_string()) + })?; + let author_balance = rpc::get_token_balance( + context.client, + context.wallet.find_address(NAM).expect("NAM not in wallet"), + &proposal.proposal.author, + ) + .await?; + let proposal = proposal + .validate( + &governance_parameters, + current_epoch, + author_balance, + self.tx.force, + ) + .map_err(|e| crate::sdk::error::TxError::InvalidProposal(e.to_string()))?; + + tx::build_pgf_stewards_proposal( + context, + self, + proposal, + ) + .await + } else { + let proposal = DefaultProposal::try_from(self.proposal_data.as_ref()) + .map_err(|e| { + crate::sdk::error::TxError::FailedGovernaneProposalDeserialize(e.to_string()) + })?; + let author_balance = rpc::get_token_balance( + context.client, + context.wallet.find_address(NAM).expect("NAM not in wallet"), + &proposal.proposal.author, + ) + .await?; + let proposal = proposal + .validate( + &governance_parameters, + current_epoch, + author_balance, + self.tx.force, + ) + .map_err(|e| crate::sdk::error::TxError::InvalidProposal(e.to_string()))?; + tx::build_default_proposal( + context, + self, + proposal, + ) + .await + } + } +} + /// Transaction to vote on a proposal #[derive(Clone, Debug)] pub struct VoteProposal { @@ -219,6 +480,49 @@ pub struct VoteProposal { pub tx_code_path: PathBuf, } +impl TxBuilder for VoteProposal { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + VoteProposal { tx: func(self.tx), ..self } + } +} + +impl VoteProposal { + /// Proposal id + pub fn proposal_id(self, proposal_id: u64) -> Self { + Self { proposal_id: Some(proposal_id), ..self } + } + /// The vote + pub fn vote(self, vote: String) -> Self { + Self { vote, ..self } + } + /// The address of the voter + pub fn voter(self, voter: C::Address) -> Self { + Self { voter, ..self } + } + /// Flag if proposal vote should be run offline + pub fn is_offline(self, is_offline: bool) -> Self { + Self { is_offline, ..self } + } + /// The proposal file path + pub fn proposal_data(self, proposal_data: C::Data) -> Self { + Self { proposal_data: Some(proposal_data), ..self } + } + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + +impl VoteProposal { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + let current_epoch = rpc::query_epoch(context.client).await?; + tx::build_vote_proposal(context, self, current_epoch).await + } +} + /// Transaction to initialize a new account #[derive(Clone, Debug)] pub struct TxInitAccount { @@ -282,6 +586,44 @@ pub struct TxUpdateAccount { pub threshold: Option, } +impl TxBuilder for TxUpdateAccount { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + TxUpdateAccount { tx: func(self.tx), ..self } + } +} + +impl TxUpdateAccount { + /// Path to the VP WASM code file + pub fn vp_code_path(self, vp_code_path: PathBuf) -> Self { + Self { vp_code_path: Some(vp_code_path), ..self } + } + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } + /// Address of the account whose VP is to be updated + pub fn addr(self, addr: C::Address) -> Self { + Self { addr, ..self } + } + /// Public keys + pub fn public_keys(self, public_keys: Vec) -> Self { + Self { public_keys, ..self } + } + /// The account threshold + pub fn threshold(self, threshold: u8) -> Self { + Self { threshold: Some(threshold), ..self } + } +} + +impl TxUpdateAccount { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_update_account(context, self).await + } +} + /// Bond arguments #[derive(Clone, Debug)] pub struct Bond { @@ -300,6 +642,45 @@ pub struct Bond { pub tx_code_path: PathBuf, } +impl TxBuilder for Bond { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + Bond { tx: func(self.tx), ..self } + } +} + +impl Bond { + /// Validator address + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + /// Amount of tokens to stake in a bond + pub fn amount(self, amount: token::Amount) -> Self { + Self { amount, ..self } + } + /// Source address for delegations. For self-bonds, the validator is + /// also the source. + pub fn source(self, source: C::Address) -> Self { + Self { source: Some(source), ..self } + } + /// Native token address + pub fn native_token(self, native_token: C::NativeAddress) -> Self { + Self { native_token, ..self } + } + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + +impl Bond { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_bond(context, self).await + } +} + /// Unbond arguments #[derive(Clone, Debug)] pub struct Unbond { @@ -316,6 +697,41 @@ pub struct Unbond { pub tx_code_path: PathBuf, } +impl Unbond { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option, Option<(Epoch, token::Amount)>)> + { + tx::build_unbond(context, self).await + } +} + +impl TxBuilder for Unbond { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + Unbond { tx: func(self.tx), ..self } + } +} + +impl Unbond { + /// Validator address + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + /// Amount of tokens to unbond from a bond + pub fn amount(self, amount: token::Amount) -> Self { + Self { amount, ..self } + } + /// Source address for unbonding from delegations. For unbonding from + /// self-bonds, the validator is also the source + pub fn source(self, source: C::Address) -> Self { + Self { source: Some(source), ..self } + } + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + /// Reveal public key #[derive(Clone, Debug)] pub struct RevealPk { @@ -325,6 +741,32 @@ pub struct RevealPk { pub public_key: C::PublicKey, } +impl TxBuilder for RevealPk { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + RevealPk { tx: func(self.tx), ..self } + } +} + +impl RevealPk { + /// A public key to be revealed on-chain + pub fn public_key(self, public_key: C::PublicKey) -> Self { + Self { public_key, ..self } + } +} + +impl RevealPk { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_reveal_pk( + context, + &self.tx, + &self.public_key, + ).await + } +} + /// Query proposal #[derive(Clone, Debug)] pub struct QueryProposal { @@ -362,6 +804,37 @@ pub struct Withdraw { pub tx_code_path: PathBuf, } +impl TxBuilder for Withdraw { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + Withdraw { tx: func(self.tx), ..self } + } +} + +impl Withdraw { + /// Validator address + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + /// Source address for withdrawing from delegations. For withdrawing + /// from self-bonds, the validator is also the source + pub fn source(self, source: C::Address) -> Self { + Self { source: Some(source), ..self } + } + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + +impl Withdraw { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_withdraw(context, self).await + } +} + /// Query asset conversions #[derive(Clone, Debug)] pub struct QueryConversions { @@ -452,6 +925,37 @@ pub struct CommissionRateChange { pub tx_code_path: PathBuf, } +impl TxBuilder for CommissionRateChange { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + CommissionRateChange { tx: func(self.tx), ..self } + } +} + + +impl CommissionRateChange { + /// Validator address (should be self) + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + /// Value to which the tx changes the commission rate + pub fn rate(self, rate: Dec) -> Self { + Self { rate, ..self } + } + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + +impl CommissionRateChange { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_validator_commission_change(context, self).await + } +} + #[derive(Clone, Debug)] /// Commission rate change args pub struct UpdateStewardCommission { @@ -465,6 +969,36 @@ pub struct UpdateStewardCommission { pub tx_code_path: PathBuf, } +impl TxBuilder for UpdateStewardCommission { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + UpdateStewardCommission { tx: func(self.tx), ..self } + } +} + +impl UpdateStewardCommission { + /// Steward address + pub fn steward(self, steward: C::Address) -> Self { + Self { steward, ..self } + } + /// Value to which the tx changes the commission rate + pub fn commission(self, commission: C::Data) -> Self { + Self { commission, ..self } + } + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + +impl UpdateStewardCommission { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_update_steward_commission(context, self).await + } +} + #[derive(Clone, Debug)] /// Commission rate change args pub struct ResignSteward { @@ -476,6 +1010,32 @@ pub struct ResignSteward { pub tx_code_path: PathBuf, } +impl TxBuilder for ResignSteward { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + ResignSteward { tx: func(self.tx), ..self } + } +} + +impl ResignSteward { + /// Validator address + pub fn steward(self, steward: C::Address) -> Self { + Self { steward, ..self } + } + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + +impl ResignSteward { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_resign_steward(context, self).await + } +} + #[derive(Clone, Debug)] /// Re-activate a jailed validator args pub struct TxUnjailValidator { @@ -487,6 +1047,32 @@ pub struct TxUnjailValidator { pub tx_code_path: PathBuf, } +impl TxBuilder for TxUnjailValidator { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + TxUnjailValidator { tx: func(self.tx), ..self } + } +} + +impl TxUnjailValidator { + /// Validator address (should be self) + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + /// Path to the TX WASM code file + pub fn tc_code_path(self, tx_code_path: PathBuf) -> Self { + Self { tx_code_path, ..self } + } +} + +impl TxUnjailValidator { + /// Build a transaction from this builder + pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_unjail_validator(context, self).await + } +} + #[derive(Clone, Debug)] /// Sign a transaction offline pub struct SignTx { @@ -597,6 +1183,109 @@ pub struct Tx { pub password: Option>, } +/// Builder functions for Tx +pub trait TxBuilder : Sized { + /// Apply the given function to the Tx inside self + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx; + /// Simulate applying the transaction + fn dry_run(self, dry_run: bool) -> Self { + self.tx(|x| Tx { dry_run, ..x }) + } + /// Simulate applying both the wrapper and inner transactions + fn dry_run_wrapper(self, dry_run_wrapper: bool) -> Self { + self.tx(|x| Tx { dry_run_wrapper, ..x }) + } + /// Dump the transaction bytes to file + fn dump_tx(self, dump_tx: bool) -> Self { + self.tx(|x| Tx { dump_tx, ..x }) + } + /// The output directory path to where serialize the data + fn output_folder(self, output_folder: PathBuf) -> Self { + self.tx(|x| Tx { output_folder: Some(output_folder), ..x }) + } + /// Submit the transaction even if it doesn't pass client checks + fn force(self, force: bool) -> Self { + self.tx(|x| Tx { force, ..x }) + } + /// Do not wait for the transaction to be added to the blockchain + fn broadcast_only(self, broadcast_only: bool) -> Self { + self.tx(|x| Tx { broadcast_only, ..x }) + } + /// The address of the ledger node as host:port + fn ledger_address(self, ledger_address: C::TendermintAddress) -> Self { + self.tx(|x| Tx { ledger_address, ..x }) + } + /// If any new account is initialized by the tx, use the given alias to + /// save it in the wallet. + fn initialized_account_alias(self, initialized_account_alias: String) -> Self { + self.tx(|x| Tx { initialized_account_alias: Some(initialized_account_alias), ..x }) + } + /// Whether to force overwrite the above alias, if it is provided, in the + /// wallet. + fn wallet_alias_force(self, wallet_alias_force: bool) -> Self { + self.tx(|x| Tx { wallet_alias_force, ..x }) + } + /// The amount being payed (for gas unit) to include the transaction + fn fee_amount(self, fee_amount: InputAmount) -> Self { + self.tx(|x| Tx { fee_amount: Some(fee_amount), ..x }) + } + /// The fee payer signing key + fn wrapper_fee_payer(self, wrapper_fee_payer: C::Keypair) -> Self { + self.tx(|x| Tx { wrapper_fee_payer: Some(wrapper_fee_payer), ..x }) + } + /// The token in which the fee is being paid + fn fee_token(self, fee_token: C::Address) -> Self { + self.tx(|x| Tx { fee_token, ..x }) + } + /// The optional spending key for fee unshielding + fn fee_unshield(self, fee_unshield: C::TransferSource) -> Self { + self.tx(|x| Tx { fee_unshield: Some(fee_unshield), ..x }) + } + /// The max amount of gas used to process tx + fn gas_limit(self, gas_limit: GasLimit) -> Self { + self.tx(|x| Tx { gas_limit, ..x }) + } + /// The optional expiration of the transaction + fn expiration(self, expiration: DateTimeUtc) -> Self { + self.tx(|x| Tx { expiration: Some(expiration), ..x }) + } + /// Generate an ephimeral signing key to be used only once to sign a + /// wrapper tx + fn disposable_signing_key(self, disposable_signing_key: bool) -> Self { + self.tx(|x| Tx { disposable_signing_key, ..x }) + } + /// The chain id for which the transaction is intended + fn chain_id(self, chain_id: ChainId) -> Self { + self.tx(|x| Tx { chain_id: Some(chain_id), ..x }) + } + /// Sign the tx with the key for the given alias from your wallet + fn signing_keys(self, signing_keys: Vec) -> Self { + self.tx(|x| Tx { signing_keys, ..x }) + } + /// List of signatures to attach to the transaction + fn signatures(self, signatures: Vec) -> Self { + self.tx(|x| Tx { signatures, ..x }) + } + /// Path to the TX WASM code file to reveal PK + fn tx_reveal_code_path(self, tx_reveal_code_path: PathBuf) -> Self { + self.tx(|x| Tx { tx_reveal_code_path, ..x }) + } + /// Sign the tx with the public key for the given alias from your wallet + fn verification_key(self, verification_key: C::PublicKey) -> Self { + self.tx(|x| Tx { verification_key: Some(verification_key), ..x }) + } + /// Password to decrypt key + fn password(self, password: Zeroizing) -> Self { + self.tx(|x| Tx { password: Some(password), ..x }) + } +} + +impl TxBuilder for Tx { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + func(self) + } +} + /// MASP add key or address arguments #[derive(Clone, Debug)] pub struct MaspAddrKeyAdd { @@ -775,6 +1464,65 @@ pub struct EthereumBridgePool { pub code_path: PathBuf, } +impl TxBuilder for EthereumBridgePool { + fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + EthereumBridgePool { tx: func(self.tx), ..self } + } +} + +impl EthereumBridgePool { + /// Whether the transfer is for a NUT. + /// + /// By default, we add wrapped ERC20s onto the + /// Bridge pool. + pub fn nut(self, nut: bool) -> Self { + Self { nut, ..self } + } + /// The type of token + pub fn asset(self, asset: EthAddress) -> Self { + Self { asset, ..self } + } + /// The recipient address + pub fn recipient(self, recipient: EthAddress) -> Self { + Self { recipient, ..self } + } + /// The sender of the transfer + pub fn sender(self, sender: C::Address) -> Self { + Self { sender, ..self } + } + /// The amount to be transferred + pub fn amount(self, amount: InputAmount) -> Self { + Self { amount, ..self } + } + /// The amount of gas fees + pub fn fee_amount(self, fee_amount: InputAmount) -> Self { + Self { fee_amount, ..self } + } + /// The account of fee payer. + /// + /// If unset, it is the same as the sender. + pub fn fee_payer(self, fee_payer: C::Address) -> Self { + Self { fee_payer: Some(fee_payer), ..self } + } + /// The token in which the gas is being paid + pub fn fee_token(self, fee_token: C::Address) -> Self { + Self { fee_token, ..self } + } + /// Path to the tx WASM code file + pub fn code_path(self, code_path: PathBuf) -> Self { + Self { code_path, ..self } + } +} + +impl EthereumBridgePool { + /// Build a transaction from this builder + pub async fn build<'a>(self, context: &mut impl Namada<'a>) -> + crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + bridge_pool::build_bridge_pool_tx(context, self).await + } +} + /// Bridge pool proof arguments. #[derive(Debug, Clone)] pub struct BridgePoolProof { diff --git a/shared/src/sdk/masp.rs b/shared/src/sdk/masp.rs index 739f941b9a..6b44dd018d 100644 --- a/shared/src/sdk/masp.rs +++ b/shared/src/sdk/masp.rs @@ -68,7 +68,7 @@ use crate::sdk::{args, rpc}; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; use crate::types::address::{masp, Address}; -use crate::types::io::Io; +use crate::types::io::{Io, StdIo}; use crate::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; use crate::types::token; @@ -77,6 +77,7 @@ use crate::types::token::{ }; use crate::types::transaction::{EllipticCurve, PairingEngine, WrapperTx}; use crate::{display_line, edisplay_line}; +use crate::ledger::Namada; /// Env var to point to a dir with MASP parameters. When not specified, /// the default OS specific path is used. @@ -1035,7 +1036,7 @@ impl ShieldedContext { if let Some(balance) = self.compute_shielded_balance(client, vk).await? { let exchanged_amount = self - .compute_exchanged_amount::<_, IO>( + .compute_exchanged_amount( client, balance, target_epoch, @@ -1058,7 +1059,7 @@ impl ShieldedContext { /// the trace amount that could not be converted is moved from input to /// output. #[allow(clippy::too_many_arguments)] - async fn apply_conversion( + async fn apply_conversion( &mut self, client: &C, conv: AllowedConversion, @@ -1080,7 +1081,7 @@ impl ShieldedContext { let threshold = -conv[&masp_asset]; if threshold == 0 { edisplay_line!( - IO, + StdIo, "Asset threshold of selected conversion for asset type {} is \ 0, this is a bug, please report it.", masp_asset @@ -1110,7 +1111,7 @@ impl ShieldedContext { /// note of the conversions that were used. Note that this function does /// not assume that allowed conversions from the ledger are expressed in /// terms of the latest asset types. - pub async fn compute_exchanged_amount( + pub async fn compute_exchanged_amount( &mut self, client: &C, mut input: MaspAmount, @@ -1149,14 +1150,14 @@ impl ShieldedContext { (conversions.get_mut(&asset_type), at_target_asset_type) { display_line!( - IO, + StdIo, "converting current asset type to latest asset type..." ); // Not at the target asset type, not at the latest asset // type. Apply conversion to get from // current asset type to the latest // asset type. - self.apply_conversion::<_, IO>( + self.apply_conversion( client, conv.clone(), (asset_epoch, token_addr.clone(), denom), @@ -1171,14 +1172,14 @@ impl ShieldedContext { at_target_asset_type, ) { display_line!( - IO, + StdIo, "converting latest asset type to target asset type..." ); // Not at the target asset type, yet at the latest asset // type. Apply inverse conversion to get // from latest asset type to the target // asset type. - self.apply_conversion::<_, IO>( + self.apply_conversion( client, conv.clone(), (asset_epoch, token_addr.clone(), denom), @@ -1213,7 +1214,7 @@ impl ShieldedContext { /// of the specified asset type. Return the total value accumulated plus /// notes and the corresponding diversifiers/merkle paths that were used to /// achieve the total value. - pub async fn collect_unspent_notes( + pub async fn collect_unspent_notes( &mut self, client: &C, vk: &ViewingKey, @@ -1259,7 +1260,7 @@ impl ShieldedContext { })?; let input = self.decode_all_amounts(client, pre_contr).await; let (contr, proposed_convs) = self - .compute_exchanged_amount::<_, IO>( + .compute_exchanged_amount( client, input, target_epoch, @@ -1413,7 +1414,7 @@ impl ShieldedContext { display_line!(IO, "Decoded pinned balance: {:?}", amount); // Finally, exchange the balance to the transaction's epoch let computed_amount = self - .compute_exchanged_amount::<_, IO>( + .compute_exchanged_amount( client, amount, ep, @@ -1483,10 +1484,9 @@ impl ShieldedContext { /// understood that transparent account changes are effected only by the /// amounts and signatures specified by the containing Transfer object. #[cfg(feature = "masp-tx-gen")] - pub async fn gen_shielded_transfer( - &mut self, - client: &C, - args: args::TxTransfer, + pub async fn gen_shielded_transfer<'a>( + context: &mut impl Namada<'a>, + args: &args::TxTransfer, ) -> Result, TransferErr> { // No shielded components are needed when neither source nor destination // are shielded @@ -1507,12 +1507,13 @@ impl ShieldedContext { let spending_key = spending_key.map(|x| x.into()); let spending_keys: Vec<_> = spending_key.into_iter().collect(); // Load the current shielded context given the spending key we possess - let _ = self.load().await; - self.fetch(client, &spending_keys, &[]).await?; + let _ = context.shielded.load().await; + let context = &mut **context; + context.shielded.fetch(context.client, &spending_keys, &[]).await?; // Save the update state so that future fetches can be short-circuited - let _ = self.save().await; + let _ = context.shielded.save().await; // Determine epoch in which to submit potential shielded transaction - let epoch = rpc::query_epoch(client).await?; + let epoch = rpc::query_epoch(context.client).await?; // Context required for storing which notes are in the source's // possesion let memo = MemoBytes::empty(); @@ -1552,9 +1553,10 @@ impl ShieldedContext { // If there are shielded inputs if let Some(sk) = spending_key { // Locate unspent notes that can help us meet the transaction amount - let (_, unspent_notes, used_convs) = self - .collect_unspent_notes::<_, IO>( - client, + let (_, unspent_notes, used_convs) = context + .shielded + .collect_unspent_notes( + context.client, &to_viewing_key(&sk).vk, I128Sum::from_sum(amount), epoch, @@ -1743,7 +1745,7 @@ impl ShieldedContext { let build_transfer = || -> Result> { let (masp_tx, metadata) = builder.build( - &self.utils.local_tx_prover(), + &context.shielded.utils.local_tx_prover(), &FeeRule::non_standard(U64Sum::zero()), )?; Ok(ShieldedTransfer { diff --git a/shared/src/sdk/rpc.rs b/shared/src/sdk/rpc.rs index 58609bed42..cb0e55835a 100644 --- a/shared/src/sdk/rpc.rs +++ b/shared/src/sdk/rpc.rs @@ -38,7 +38,7 @@ use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; use crate::types::control_flow::{time, Halt, TryHalt}; use crate::types::hash::Hash; -use crate::types::io::Io; +use crate::types::io::{Io, StdIo}; use crate::types::key::common; use crate::types::storage::{BlockHeight, BlockResults, Epoch, PrefixValue}; use crate::types::{storage, token}; @@ -239,7 +239,6 @@ pub async fn query_conversion( /// Query a wasm code hash pub async fn query_wasm_code_hash< C: crate::ledger::queries::Client + Sync, - IO: Io, >( client: &C, code_path: impl AsRef, @@ -252,7 +251,7 @@ pub async fn query_wasm_code_hash< Some(hash) => Ok(Hash::try_from(&hash[..]).expect("Invalid code hash")), None => { edisplay_line!( - IO, + StdIo, "The corresponding wasm code of the code path {} doesn't \ exist on chain.", code_path.as_ref(), @@ -789,7 +788,6 @@ pub async fn get_public_key_at( /// Query a validator's unbonds for a given epoch pub async fn query_and_print_unbonds< C: crate::ledger::queries::Client + Sync, - IO: Io, >( client: &C, source: &Address, @@ -811,17 +809,17 @@ pub async fn query_and_print_unbonds< } if total_withdrawable != token::Amount::default() { display_line!( - IO, + StdIo, "Total withdrawable now: {}.", total_withdrawable.to_string_native() ); } if !not_yet_withdrawable.is_empty() { - display_line!(IO, "Current epoch: {current_epoch}.") + display_line!(StdIo, "Current epoch: {current_epoch}.") } for (withdraw_epoch, amount) in not_yet_withdrawable { display_line!( - IO, + StdIo, "Amount {} withdrawable starting from epoch {withdraw_epoch}.", amount.to_string_native() ); @@ -939,7 +937,6 @@ pub async fn enriched_bonds_and_unbonds< /// Get the correct representation of the amount given the token type. pub async fn validate_amount< C: crate::ledger::queries::Client + Sync, - IO: Io, >( client: &C, amount: InputAmount, @@ -958,14 +955,14 @@ pub async fn validate_amount< None => { if force { display_line!( - IO, + StdIo, "No denomination found for token: {token}, but --force \ was passed. Defaulting to the provided denomination." ); Ok(input_amount.denom) } else { display_line!( - IO, + StdIo, "No denomination found for token: {token}, the input \ arguments could not be parsed." ); @@ -977,7 +974,7 @@ pub async fn validate_amount< }?; if denom < input_amount.denom && !force { display_line!( - IO, + StdIo, "The input amount contained a higher precision than allowed by \ {token}." ); @@ -988,7 +985,7 @@ pub async fn validate_amount< } else { input_amount.increase_precision(denom).map_err(|_err| { display_line!( - IO, + StdIo, "The amount provided requires more the 256 bits to represent." ); Error::from(QueryError::General( @@ -1065,7 +1062,6 @@ where /// correctly as a string. pub async fn format_denominated_amount< C: crate::ledger::queries::Client + Sync, - IO: Io, >( client: &C, token: &Address, @@ -1075,12 +1071,12 @@ pub async fn format_denominated_amount< RPC.vp().token().denomination(client, token).await, ) .unwrap_or_else(|t| { - display_line!(IO, "Error in querying for denomination: {t}"); + display_line!(StdIo, "Error in querying for denomination: {t}"); None }) .unwrap_or_else(|| { display_line!( - IO, + StdIo, "No denomination found for token: {token}, defaulting to zero \ decimal places" ); diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 042be03a63..3afb990422 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -24,14 +24,13 @@ use sha2::Digest; use zeroize::Zeroizing; use crate::display_line; +use super::masp::{ShieldedContext, ShieldedTransfer}; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc_proto::google::protobuf::Any; use crate::ledger::parameters::storage as parameter_storage; use crate::proto::{MaspBuilder, Section, Tx}; use crate::sdk::error::{EncodingError, Error, TxError}; -use crate::sdk::masp::{ - make_asset_type, ShieldedContext, ShieldedTransfer, ShieldedUtils, -}; +use crate::sdk::masp::make_asset_type; use crate::sdk::rpc::{ format_denominated_amount, query_wasm_code_hash, validate_amount, }; @@ -55,6 +54,8 @@ use crate::types::transaction::governance::{ }; use crate::types::transaction::pos::InitValidator; use crate::types::transaction::Fee; +use crate::ledger::Namada; +use crate::sdk::args::SdkTypes; #[cfg(feature = "std")] /// Env. var specifying where to store signing test vectors @@ -85,7 +86,6 @@ pub struct SigningTxData { pub async fn find_pk< C: crate::ledger::queries::Client + Sync, U: WalletUtils, - IO: Io, >( client: &C, wallet: &mut Wallet, @@ -95,7 +95,7 @@ pub async fn find_pk< match addr { Address::Established(_) => { display_line!( - IO, + StdIo, "Looking-up public key of {} from the ledger...", addr.encode() ); @@ -153,14 +153,9 @@ pub fn find_key_by_pk( /// signer. Return the given signing key or public key of the given signer if /// possible. If no explicit signer given, use the `default`. If no `default` /// is given, an `Error` is returned. -pub async fn tx_signers< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - args: &args::Tx, +pub async fn tx_signers<'a>( + context: &mut impl Namada<'a>, + args: &args::Tx, default: Option
, ) -> Result, Error> { let signer = if !&args.signing_keys.is_empty() { @@ -179,7 +174,7 @@ pub async fn tx_signers< Some(signer) if signer == masp() => Ok(vec![masp_tx_key().ref_to()]), Some(signer) => Ok(vec![ - find_pk::(client, wallet, &signer, args.password.clone()) + find_pk(context.client, context.wallet, &signer, args.password.clone()) .await?, ]), None => other_err( @@ -242,27 +237,21 @@ pub fn sign_tx( /// Return the necessary data regarding an account to be able to generate a /// multisignature section -pub async fn aux_signing_data< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - args: &args::Tx, +pub async fn aux_signing_data<'a>( + context: &mut impl Namada<'a>, + args: &args::Tx, owner: Option
, default_signer: Option
, ) -> Result { let public_keys = if owner.is_some() || args.wrapper_fee_payer.is_none() { - tx_signers::(client, wallet, args, default_signer.clone()) - .await? + tx_signers(context, args, default_signer.clone()).await? } else { vec![] }; let (account_public_keys_map, threshold) = match &owner { Some(owner @ Address::Established(_)) => { - let account = rpc::get_account_info::(client, owner).await?; + let account = rpc::get_account_info(context.client, owner).await?; if let Some(account) = account { (Some(account.public_keys_map), account.threshold) } else { @@ -282,7 +271,7 @@ pub async fn aux_signing_data< }; let fee_payer = if args.disposable_signing_key { - wallet.generate_disposable_signing_key().to_public() + context.wallet.generate_disposable_signing_key().to_public() } else { match &args.wrapper_fee_payer { Some(keypair) => keypair.to_public(), @@ -322,15 +311,10 @@ pub struct TxSourcePostBalance { /// wrapper and its payload which is needed for monitoring its /// progress on chain. #[allow(clippy::too_many_arguments)] -pub async fn wrap_tx< - C: crate::sdk::queries::Client + Sync, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - shielded: &mut ShieldedContext, +pub async fn wrap_tx<'a, N: Namada<'a>>( + context: &mut N, tx: &mut Tx, - args: &args::Tx, + args: &args::Tx, tx_source_balance: Option, epoch: Epoch, fee_payer: common::PublicKey, @@ -339,9 +323,9 @@ pub async fn wrap_tx< // Validate fee amount and token let gas_cost_key = parameter_storage::get_gas_cost_key(); let minimum_fee = match rpc::query_storage_value::< - C, + _, BTreeMap, - >(client, &gas_cost_key) + >(context.client, &gas_cost_key) .await .and_then(|map| { map.get(&args.fee_token) @@ -364,14 +348,10 @@ pub async fn wrap_tx< }; let fee_amount = match args.fee_amount { Some(amount) => { - let validated_fee_amount = validate_amount::<_, IO>( - client, - amount, - &args.fee_token, - args.force, - ) - .await - .expect("Expected to be able to validate fee"); + let validated_fee_amount = + validate_amount(context.client, amount, &args.fee_token, args.force) + .await + .expect("Expected to be able to validate fee"); let amount = Amount::from_uint(validated_fee_amount.amount, 0).unwrap(); @@ -381,7 +361,7 @@ pub async fn wrap_tx< } else if !args.force { // Update the fee amount if it's not enough display_line!( - IO, + StdIo, "The provided gas price {} is less than the minimum \ amount required {}, changing it to match the minimum", amount.to_string_native(), @@ -405,7 +385,7 @@ pub async fn wrap_tx< let balance_key = token::balance_key(&args.fee_token, &fee_payer_address); - rpc::query_storage_value::(client, &balance_key) + rpc::query_storage_value::<_, token::Amount>(context.client, &balance_key) .await .unwrap_or_default() } @@ -441,8 +421,7 @@ pub async fn wrap_tx< tx_code_path: PathBuf::new(), }; - match shielded - .gen_shielded_transfer::<_, IO>(client, transfer_args) + match ShieldedContext::::gen_shielded_transfer(context, &transfer_args) .await { Ok(Some(ShieldedTransfer { @@ -471,8 +450,8 @@ pub async fn wrap_tx< let descriptions_limit_key= parameter_storage::get_fee_unshielding_descriptions_limit_key(); let descriptions_limit = - rpc::query_storage_value::( - client, + rpc::query_storage_value::<_, u64>( + context.client, &descriptions_limit_key, ) .await @@ -519,15 +498,15 @@ pub async fn wrap_tx< } else { let token_addr = args.fee_token.clone(); if !args.force { - let fee_amount = format_denominated_amount::<_, IO>( - client, + let fee_amount = format_denominated_amount( + context.client, &token_addr, total_fee, ) .await; - let balance = format_denominated_amount::<_, IO>( - client, + let balance = format_denominated_amount( + context.client, &token_addr, updated_balance, ) @@ -546,7 +525,7 @@ pub async fn wrap_tx< _ => { if args.fee_unshield.is_some() { display_line!( - IO, + StdIo, "Enough transparent balance to pay fees: the fee \ unshielding spending key will be ignored" ); @@ -616,7 +595,6 @@ fn make_ledger_amount_addr( /// type async fn make_ledger_amount_asset< C: crate::ledger::queries::Client + Sync, - IO: Io, >( client: &C, tokens: &HashMap, @@ -629,7 +607,7 @@ async fn make_ledger_amount_asset< if let Some((token, _, _epoch)) = assets.get(token) { // If the AssetType can be decoded, then at least display Addressees let formatted_amt = - format_denominated_amount::<_, IO>(client, token, amount.into()) + format_denominated_amount(client, token, amount.into()) .await; if let Some(token) = tokens.get(token) { output @@ -716,7 +694,6 @@ fn format_outputs(output: &mut Vec) { /// transactions pub async fn make_ledger_masp_endpoints< C: crate::ledger::queries::Client + Sync, - IO: Io, >( client: &C, tokens: &HashMap, @@ -740,7 +717,7 @@ pub async fn make_ledger_masp_endpoints< for sapling_input in builder.builder.sapling_inputs() { let vk = ExtendedViewingKey::from(*sapling_input.key()); output.push(format!("Sender : {}", vk)); - make_ledger_amount_asset::<_, IO>( + make_ledger_amount_asset( client, tokens, output, @@ -767,7 +744,7 @@ pub async fn make_ledger_masp_endpoints< for sapling_output in builder.builder.sapling_outputs() { let pa = PaymentAddress::from(sapling_output.address()); output.push(format!("Destination : {}", pa)); - make_ledger_amount_asset::<_, IO>( + make_ledger_amount_asset( client, tokens, output, @@ -792,13 +769,8 @@ pub async fn make_ledger_masp_endpoints< /// Internal method used to generate transaction test vectors #[cfg(feature = "std")] -pub async fn generate_test_vector< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn generate_test_vector<'a>( + context: &mut impl Namada<'a>, tx: &Tx, ) -> Result<(), Error> { use std::env; @@ -810,8 +782,7 @@ pub async fn generate_test_vector< // Contract the large data blobs in the transaction tx.wallet_filter(); // Convert the transaction to Ledger format - let decoding = - to_ledger_vector::<_, _, IO>(client, wallet, &tx).await?; + let decoding = to_ledger_vector(context, &tx).await?; let output = serde_json::to_string(&decoding) .map_err(|e| Error::from(EncodingError::Serde(e.to_string())))?; // Record the transaction at the identified path @@ -849,46 +820,36 @@ pub async fn generate_test_vector< /// Converts the given transaction to the form that is displayed on the Ledger /// device -pub async fn to_ledger_vector< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn to_ledger_vector<'a>( + context: &mut impl Namada<'a>, tx: &Tx, ) -> Result { let init_account_hash = - query_wasm_code_hash::<_, IO>(client, TX_INIT_ACCOUNT_WASM).await?; + query_wasm_code_hash(context.client, TX_INIT_ACCOUNT_WASM).await?; let init_validator_hash = - query_wasm_code_hash::<_, IO>(client, TX_INIT_VALIDATOR_WASM).await?; + query_wasm_code_hash(context.client, TX_INIT_VALIDATOR_WASM).await?; let init_proposal_hash = - query_wasm_code_hash::<_, IO>(client, TX_INIT_PROPOSAL).await?; + query_wasm_code_hash(context.client, TX_INIT_PROPOSAL).await?; let vote_proposal_hash = - query_wasm_code_hash::<_, IO>(client, TX_VOTE_PROPOSAL).await?; - let reveal_pk_hash = - query_wasm_code_hash::<_, IO>(client, TX_REVEAL_PK).await?; + query_wasm_code_hash(context.client, TX_VOTE_PROPOSAL).await?; + let reveal_pk_hash = query_wasm_code_hash(context.client, TX_REVEAL_PK).await?; let update_account_hash = - query_wasm_code_hash::<_, IO>(client, TX_UPDATE_ACCOUNT_WASM).await?; - let transfer_hash = - query_wasm_code_hash::<_, IO>(client, TX_TRANSFER_WASM).await?; - let ibc_hash = query_wasm_code_hash::<_, IO>(client, TX_IBC_WASM).await?; - let bond_hash = query_wasm_code_hash::<_, IO>(client, TX_BOND_WASM).await?; - let unbond_hash = - query_wasm_code_hash::<_, IO>(client, TX_UNBOND_WASM).await?; - let withdraw_hash = - query_wasm_code_hash::<_, IO>(client, TX_WITHDRAW_WASM).await?; + query_wasm_code_hash(context.client, TX_UPDATE_ACCOUNT_WASM).await?; + let transfer_hash = query_wasm_code_hash(context.client, TX_TRANSFER_WASM).await?; + let ibc_hash = query_wasm_code_hash(context.client, TX_IBC_WASM).await?; + let bond_hash = query_wasm_code_hash(context.client, TX_BOND_WASM).await?; + let unbond_hash = query_wasm_code_hash(context.client, TX_UNBOND_WASM).await?; + let withdraw_hash = query_wasm_code_hash(context.client, TX_WITHDRAW_WASM).await?; let change_commission_hash = - query_wasm_code_hash::<_, IO>(client, TX_CHANGE_COMMISSION_WASM) - .await?; - let user_hash = query_wasm_code_hash::<_, IO>(client, VP_USER_WASM).await?; + query_wasm_code_hash(context.client, TX_CHANGE_COMMISSION_WASM).await?; + let user_hash = query_wasm_code_hash(context.client, VP_USER_WASM).await?; // To facilitate lookups of human-readable token names - let tokens: HashMap = wallet + let tokens: HashMap = context.wallet .get_addresses_with_vp_type(AddressVpType::Token) .into_iter() .map(|addr| { - let alias = match wallet.find_alias(&addr) { + let alias = match context.wallet.find_alias(&addr) { Some(alias) => alias.to_string(), None => addr.to_string(), }; @@ -1174,8 +1135,8 @@ pub async fn to_ledger_vector< tv.name = "Transfer 0".to_string(); tv.output.push("Type : Transfer".to_string()); - make_ledger_masp_endpoints::<_, IO>( - client, + make_ledger_masp_endpoints( + context.client, &tokens, &mut tv.output, &transfer, @@ -1183,8 +1144,8 @@ pub async fn to_ledger_vector< &asset_types, ) .await; - make_ledger_masp_endpoints::<_, IO>( - client, + make_ledger_masp_endpoints( + context.client, &tokens, &mut tv.output_expert, &transfer, @@ -1356,14 +1317,14 @@ pub async fn to_ledger_vector< if let Some(wrapper) = tx.header.wrapper() { let gas_token = wrapper.fee.token.clone(); - let gas_limit = format_denominated_amount::<_, IO>( - client, + let gas_limit = format_denominated_amount( + context.client, &gas_token, Amount::from(wrapper.gas_limit), ) .await; - let fee_amount_per_gas_unit = format_denominated_amount::<_, IO>( - client, + let fee_amount_per_gas_unit = format_denominated_amount( + context.client, &gas_token, wrapper.fee.amount_per_gas_unit, ) diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 5885ebda44..2ca4879bfe 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -43,21 +43,21 @@ use crate::ibc::core::Msg; use crate::ibc::Height as IbcHeight; use crate::ledger::ibc::storage::ibc_denom_key; use crate::sdk::signing::SigningTxData; -use crate::proto::{MaspBuilder, Tx}; -use crate::sdk::args::{self, InputAmount}; -use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; use crate::sdk::masp::TransferErr::Build; -use crate::sdk::masp::{ShieldedContext, ShieldedTransfer, ShieldedUtils}; +use crate::sdk::masp::{ShieldedContext, ShieldedTransfer}; use crate::sdk::rpc::{ - self, format_denominated_amount, query_wasm_code_hash, validate_amount, - TxBroadcastData, TxResponse, + self, format_denominated_amount, validate_amount, TxBroadcastData, + TxResponse, query_wasm_code_hash }; -use crate::sdk::signing::{self, TxSourcePostBalance}; use crate::sdk::wallet::{Wallet, WalletUtils}; +use crate::proto::{MaspBuilder, Tx}; +use crate::sdk::args::{self, InputAmount}; +use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; +use crate::sdk::signing::{self, TxSourcePostBalance}; use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::tendermint_rpc::error::Error as RpcError; use crate::types::control_flow::{time, ProceedOrElse}; -use crate::types::io::Io; +use crate::types::io::{Io, StdIo}; use crate::types::key::*; use crate::types::masp::TransferTarget; use crate::types::storage::Epoch; @@ -66,11 +66,14 @@ use crate::types::transaction::account::{InitAccount, UpdateAccount}; use crate::types::transaction::{pos, TxType}; use crate::types::{storage, token}; use crate::{display_line, edisplay_line, vm}; +use crate::ledger::Namada; /// Initialize account transaction WASM pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; /// Initialize validator transaction WASM path pub const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; +/// Unjail validator transaction WASM path +pub const TX_UNJAIL_VALIDATOR_WASM: &str = "tx_unjail_validator.wasm"; /// Initialize proposal transaction WASM path pub const TX_INIT_PROPOSAL: &str = "tx_init_proposal.wasm"; /// Vote transaction WASM path @@ -91,9 +94,16 @@ pub const TX_BOND_WASM: &str = "tx_bond.wasm"; pub const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; /// Withdraw WASM path pub const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; +/// Bridge pool WASM path +pub const TX_BRIDGE_POOL_WASM: &str = "tx_bridge_pool.wasm"; /// Change commission WASM path pub const TX_CHANGE_COMMISSION_WASM: &str = "tx_change_validator_commission.wasm"; +/// Resign steward WASM path +pub const TX_RESIGN_STEWARD: &str = "tx_resign_steward.wasm"; +/// Update steward commission WASM path +pub const TX_UPDATE_STEWARD_COMMISSION: &str = + "tx_update_steward_commission.wasm"; /// Default timeout in seconds for requests to the `/accepted` /// and `/applied` ABCI query endpoints. @@ -148,26 +158,18 @@ pub fn dump_tx(args: &args::Tx, tx: Tx) { /// Prepare a transaction for signing and submission by adding a wrapper header /// to it. #[allow(clippy::too_many_arguments)] -pub async fn prepare_tx< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - _wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn prepare_tx<'a>( + context: &mut impl Namada<'a>, args: &args::Tx, tx: &mut Tx, fee_payer: common::PublicKey, tx_source_balance: Option, ) -> Result> { if !args.dry_run { - let epoch = rpc::query_epoch(client).await?; + let epoch = rpc::query_epoch(context.client).await?; - signing::wrap_tx::<_, _, IO>( - client, - shielded, + signing::wrap_tx( + context, tx, args, tx_source_balance, @@ -185,7 +187,6 @@ pub async fn prepare_tx< pub async fn process_tx< C: crate::sdk::queries::Client + Sync, U: WalletUtils, - IO: Io, >( client: &C, wallet: &mut Wallet, @@ -203,7 +204,7 @@ pub async fn process_tx< // println!("HTTP request body: {}", request_body); if args.dry_run || args.dry_run_wrapper { - expect_dry_broadcast::<_, IO>(TxBroadcastData::DryRun(tx), client).await + expect_dry_broadcast::<_, StdIo>(TxBroadcastData::DryRun(tx), client).await } else { // We use this to determine when the wrapper tx makes it on-chain let wrapper_hash = tx.header_hash().to_string(); @@ -223,13 +224,13 @@ pub async fn process_tx< // of masp epoch Either broadcast or submit transaction and // collect result into sum type if args.broadcast_only { - broadcast_tx::<_, IO>(client, &to_broadcast) + broadcast_tx::<_, StdIo>(client, &to_broadcast) .await .map(ProcessTxResponse::Broadcast) } else { - match submit_tx::<_, IO>(client, to_broadcast).await { + match submit_tx::<_, StdIo>(client, to_broadcast).await { Ok(x) => { - save_initialized_accounts::( + save_initialized_accounts::( wallet, args, x.initialized_accounts.clone(), @@ -265,37 +266,25 @@ pub async fn has_revealed_pk( } /// Submit transaction to reveal the given public key -pub async fn build_reveal_pk< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_reveal_pk<'a>( + context: &mut impl Namada<'a>, args: &args::Tx, - address: &Address, public_key: &common::PublicKey, - fee_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { - display_line!( - IO, - "Submitting a tx to reveal the public key for address {address}..." - ); +) -> Result<(Tx, SigningTxData, Option)> { + let signing_data = + signing::aux_signing_data(context, args, None, None) + .await?; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, + build( + context, args, args.tx_reveal_code_path.clone(), public_key, do_nothing, - fee_payer, + &signing_data.fee_payer, None, ) - .await + .await.map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Broadcast a transaction to be included in the blockchain and checks that @@ -516,51 +505,43 @@ pub async fn save_initialized_accounts( } /// Submit validator comission rate change -pub async fn build_validator_commission_change< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_validator_commission_change<'a>( + context: &mut impl Namada<'a>, args::CommissionRateChange { tx: tx_args, validator, rate, tx_code_path, - }: args::CommissionRateChange, + }: &args::CommissionRateChange, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(validator.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx_args, + let signing_data = signing::aux_signing_data( + context, + tx_args, Some(validator.clone()), default_signer, ) .await?; - let epoch = rpc::query_epoch(client).await?; + let epoch = rpc::query_epoch(context.client).await?; - let params: PosParams = rpc::get_pos_params(client).await?; + let params: PosParams = rpc::get_pos_params(context.client).await?; let validator = validator.clone(); - if rpc::is_validator(client, &validator).await? { - if rate < Dec::zero() || rate > Dec::one() { + if rpc::is_validator(context.client, &validator).await? { + if *rate < Dec::zero() || *rate > Dec::one() { edisplay_line!( - IO, + StdIo, "Invalid new commission rate, received {}", rate ); - return Err(Error::from(TxError::InvalidCommissionRate(rate))); + return Err(Error::from(TxError::InvalidCommissionRate(*rate))); } let pipeline_epoch_minus_one = epoch + params.pipeline_len - 1; match rpc::query_commission_rate( - client, + context.client, &validator, Some(pipeline_epoch_minus_one), ) @@ -574,27 +555,27 @@ pub async fn build_validator_commission_change< > max_commission_change_per_epoch { edisplay_line!( - IO, + StdIo, "New rate is too large of a change with respect to \ the predecessor epoch in which the rate will take \ effect." ); if !tx_args.force { return Err(Error::from( - TxError::InvalidCommissionRate(rate), + TxError::InvalidCommissionRate(*rate), )); } } } None => { - edisplay_line!(IO, "Error retrieving from storage"); + edisplay_line!(StdIo, "Error retrieving from storage"); if !tx_args.force { return Err(Error::from(TxError::Retrieval)); } } } } else { - edisplay_line!(IO, "The given address {validator} is not a validator."); + edisplay_line!(StdIo, "The given address {validator} is not a validator."); if !tx_args.force { return Err(Error::from(TxError::InvalidValidatorAddress( validator, @@ -604,15 +585,13 @@ pub async fn build_validator_commission_change< let data = pos::CommissionChange { validator: validator.clone(), - new_rate: rate, + new_rate: *rate, }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, do_nothing, &signing_data.fee_payer, @@ -622,34 +601,26 @@ pub async fn build_validator_commission_change< } /// Craft transaction to update a steward commission -pub async fn build_update_steward_commission< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_update_steward_commission<'a>( + context: &mut impl Namada<'a>, args::UpdateStewardCommission { tx: tx_args, steward, commission, tx_code_path, - }: args::UpdateStewardCommission, + }: &args::UpdateStewardCommission, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(steward.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx_args, + let signing_data = signing::aux_signing_data( + context, + tx_args, Some(steward.clone()), default_signer, ) .await?; - if !rpc::is_steward(client, &steward).await && !tx_args.force { - edisplay_line!(IO, "The given address {} is not a steward.", &steward); + if !rpc::is_steward(context.client, steward).await && !tx_args.force { + edisplay_line!(StdIo, "The given address {} is not a steward.", &steward); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); }; @@ -658,7 +629,7 @@ pub async fn build_update_steward_commission< if !commission.is_valid() && !tx_args.force { edisplay_line!( - IO, + StdIo, "The sum of all percentage must not be greater than 1." ); return Err(Error::from(TxError::InvalidStewardCommission( @@ -671,12 +642,10 @@ pub async fn build_update_steward_commission< commission: commission.reward_distribution, }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, do_nothing, &signing_data.fee_payer, @@ -686,43 +655,33 @@ pub async fn build_update_steward_commission< } /// Craft transaction to resign as a steward -pub async fn build_resign_steward< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_resign_steward<'a>( + context: &mut impl Namada<'a>, args::ResignSteward { tx: tx_args, steward, tx_code_path, - }: args::ResignSteward, + }: &args::ResignSteward, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(steward.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx_args, + let signing_data = signing::aux_signing_data( + context, + tx_args, Some(steward.clone()), default_signer, ) .await?; - if !rpc::is_steward(client, &steward).await && !tx_args.force { - edisplay_line!(IO, "The given address {} is not a steward.", &steward); + if !rpc::is_steward(context.client, steward).await && !tx_args.force { + edisplay_line!(StdIo, "The given address {} is not a steward.", &steward); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, - steward, + build( + context, + tx_args, + tx_code_path.clone(), + steward.clone(), do_nothing, &signing_data.fee_payer, None, @@ -731,34 +690,26 @@ pub async fn build_resign_steward< } /// Submit transaction to unjail a jailed validator -pub async fn build_unjail_validator< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_unjail_validator<'a>( + context: &mut impl Namada<'a>, args::TxUnjailValidator { tx: tx_args, validator, tx_code_path, - }: args::TxUnjailValidator, + }: &args::TxUnjailValidator, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(validator.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx_args, + let signing_data = signing::aux_signing_data( + context, + tx_args, Some(validator.clone()), default_signer, ) .await?; - if !rpc::is_validator(client, &validator).await? { + if !rpc::is_validator(context.client, validator).await? { edisplay_line!( - IO, + StdIo, "The given address {} is not a validator.", &validator ); @@ -769,12 +720,12 @@ pub async fn build_unjail_validator< } } - let params: PosParams = rpc::get_pos_params(client).await?; - let current_epoch = rpc::query_epoch(client).await?; + let params: PosParams = rpc::get_pos_params(context.client).await?; + let current_epoch = rpc::query_epoch(context.client).await?; let pipeline_epoch = current_epoch + params.pipeline_len; let validator_state_at_pipeline = - rpc::get_validator_state(client, &validator, Some(pipeline_epoch)) + rpc::get_validator_state(context.client, validator, Some(pipeline_epoch)) .await? .ok_or_else(|| { Error::from(TxError::Other( @@ -783,7 +734,7 @@ pub async fn build_unjail_validator< })?; if validator_state_at_pipeline != ValidatorState::Jailed { edisplay_line!( - IO, + StdIo, "The given validator address {} is not jailed at the pipeline \ epoch when it would be restored to one of the validator sets.", &validator @@ -796,9 +747,9 @@ pub async fn build_unjail_validator< } let last_slash_epoch_key = - crate::ledger::pos::validator_last_slash_key(&validator); + crate::ledger::pos::validator_last_slash_key(validator); let last_slash_epoch = - rpc::query_storage_value::(client, &last_slash_epoch_key) + rpc::query_storage_value::<_, Epoch>(context.client, &last_slash_epoch_key) .await; match last_slash_epoch { Ok(last_slash_epoch) => { @@ -806,7 +757,7 @@ pub async fn build_unjail_validator< last_slash_epoch + params.slash_processing_epoch_offset(); if current_epoch < eligible_epoch { edisplay_line!( - IO, + StdIo, "The given validator address {} is currently frozen and \ not yet eligible to be unjailed.", &validator @@ -832,13 +783,11 @@ pub async fn build_unjail_validator< Err(err) => return Err(err), } - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, - validator, + build( + context, + tx_args, + tx_code_path.clone(), + validator.clone(), do_nothing, &signing_data.fee_payer, None, @@ -847,48 +796,37 @@ pub async fn build_unjail_validator< } /// Submit transaction to withdraw an unbond -pub async fn build_withdraw< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_withdraw<'a>( + context: &mut impl Namada<'a>, args::Withdraw { tx: tx_args, validator, source, tx_code_path, - }: args::Withdraw, + }: &args::Withdraw, ) -> Result<(Tx, SigningTxData, Option)> { let default_address = source.clone().unwrap_or(validator.clone()); let default_signer = Some(default_address.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx_args, + let signing_data = signing::aux_signing_data( + context, + tx_args, Some(default_address), default_signer, ) .await?; - let epoch = rpc::query_epoch(client).await?; + let epoch = rpc::query_epoch(context.client).await?; - let validator = known_validator_or_err::<_, IO>( - validator.clone(), - tx_args.force, - client, - ) - .await?; + let validator = + known_validator_or_err(validator.clone(), tx_args.force, context.client) + .await?; let source = source.clone(); // Check the source's current unbond amount let bond_source = source.clone().unwrap_or_else(|| validator.clone()); let tokens = rpc::query_withdrawable_tokens( - client, + context.client, &bond_source, &validator, Some(epoch), @@ -897,33 +835,30 @@ pub async fn build_withdraw< if tokens.is_zero() { edisplay_line!( - IO, + StdIo, "There are no unbonded bonds ready to withdraw in the current \ epoch {}.", epoch ); - rpc::query_and_print_unbonds::<_, IO>(client, &bond_source, &validator) - .await?; + rpc::query_and_print_unbonds(context.client, &bond_source, &validator).await?; if !tx_args.force { return Err(Error::from(TxError::NoUnbondReady(epoch))); } } else { display_line!( - IO, + StdIo, "Found {} tokens that can be withdrawn.", tokens.to_string_native() ); - display_line!(IO, "Submitting transaction to withdraw them..."); + display_line!(StdIo, "Submitting transaction to withdraw them..."); } let data = pos::Withdraw { validator, source }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, do_nothing, &signing_data.fee_payer, @@ -933,29 +868,21 @@ pub async fn build_withdraw< } /// Submit a transaction to unbond -pub async fn build_unbond< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_unbond<'a>( + context: &mut impl Namada<'a>, args::Unbond { tx: tx_args, validator, amount, source, tx_code_path, - }: args::Unbond, + }: &args::Unbond, ) -> Result<(Tx, SigningTxData, Option, Option<(Epoch, token::Amount)>)> { let default_address = source.clone().unwrap_or(validator.clone()); let default_signer = Some(default_address.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx_args, + let signing_data = signing::aux_signing_data( + context, + tx_args, Some(default_address), default_signer, ) @@ -966,24 +893,20 @@ pub async fn build_unbond< let bond_source = source.clone().unwrap_or_else(|| validator.clone()); if !tx_args.force { - known_validator_or_err::<_, IO>( - validator.clone(), - tx_args.force, - client, - ) - .await?; + known_validator_or_err(validator.clone(), tx_args.force, context.client) + .await?; let bond_amount = - rpc::query_bond(client, &bond_source, &validator, None).await?; + rpc::query_bond(context.client, &bond_source, validator, None).await?; display_line!( - IO, + StdIo, "Bond amount available for unbonding: {} NAM", bond_amount.to_string_native() ); - if amount > bond_amount { + if *amount > bond_amount { edisplay_line!( - IO, + StdIo, "The total bonds of the source {} is lower than the amount to \ be unbonded. Amount to unbond is {} and the total bonds is \ {}.", @@ -1003,7 +926,7 @@ pub async fn build_unbond< // Query the unbonds before submitting the tx let unbonds = - rpc::query_unbond_with_slashing(client, &bond_source, &validator) + rpc::query_unbond_with_slashing(context.client, &bond_source, validator) .await?; let mut withdrawable = BTreeMap::::new(); for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { @@ -1014,16 +937,14 @@ pub async fn build_unbond< let data = pos::Unbond { validator: validator.clone(), - amount, + amount: *amount, source: source.clone(), }; - let (tx, epoch) = build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + let (tx, epoch) = build( + context, + tx_args, + tx_code_path.clone(), data, do_nothing, &signing_data.fee_payer, @@ -1102,15 +1023,8 @@ pub async fn query_unbonds( } /// Submit a transaction to bond -pub async fn build_bond< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_bond<'a>( + context: &mut impl Namada<'a>, args::Bond { tx: tx_args, validator, @@ -1118,65 +1032,60 @@ pub async fn build_bond< source, native_token, tx_code_path, - }: args::Bond, + }: &args::Bond, ) -> Result<(Tx, SigningTxData, Option)> { let default_address = source.clone().unwrap_or(validator.clone()); let default_signer = Some(default_address.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx_args, + let signing_data = signing::aux_signing_data( + context, + tx_args, Some(default_address.clone()), default_signer, ) .await?; let validator = - known_validator_or_err::<_, IO>(validator.clone(), tx_args.force, client) + known_validator_or_err(validator.clone(), tx_args.force, context.client) .await?; // Check that the source address exists on chain let source = match source.clone() { - Some(source) => { - source_exists_or_err::<_, IO>(source, tx_args.force, client) - .await - .map(Some) - } + Some(source) => source_exists_or_err(source, tx_args.force, context.client) + .await + .map(Some), None => Ok(source.clone()), }?; // Check bond's source (source for delegation or validator for self-bonds) // balance let bond_source = source.as_ref().unwrap_or(&validator); - let balance_key = token::balance_key(&native_token, bond_source); + let balance_key = token::balance_key(native_token, bond_source); // TODO Should we state the same error message for the native token? - let post_balance = check_balance_too_low_err::<_, IO>( - &native_token, + let post_balance = check_balance_too_low_err( + native_token, bond_source, - amount, + *amount, balance_key, tx_args.force, - client, + context.client, ) .await?; let tx_source_balance = Some(TxSourcePostBalance { post_balance, source: bond_source.clone(), - token: native_token, + token: native_token.clone(), }); let data = pos::Bond { validator, - amount, + amount: *amount, source, }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, do_nothing, &signing_data.fee_payer, @@ -1186,15 +1095,8 @@ pub async fn build_bond< } /// Build a default proposal governance -pub async fn build_default_proposal< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_default_proposal<'a>( + context: &mut impl Namada<'a>, args::InitProposal { tx, proposal_data: _, @@ -1203,14 +1105,13 @@ pub async fn build_default_proposal< is_pgf_stewards: _, is_pgf_funding: _, tx_code_path, - }: args::InitProposal, + }: &args::InitProposal, proposal: DefaultProposal, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx, + let signing_data = signing::aux_signing_data( + context, + tx, Some(proposal.proposal.author.clone()), default_signer, ) @@ -1233,12 +1134,10 @@ pub async fn build_default_proposal< }; Ok(()) }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx, - tx_code_path, + build( + context, + tx, + tx_code_path.clone(), init_proposal_data, push_data, &signing_data.fee_payer, @@ -1248,15 +1147,8 @@ pub async fn build_default_proposal< } /// Build a proposal vote -pub async fn build_vote_proposal< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_vote_proposal<'a>( + context: &mut impl Namada<'a>, args::VoteProposal { tx, proposal_id, @@ -1265,27 +1157,26 @@ pub async fn build_vote_proposal< is_offline: _, proposal_data: _, tx_code_path, - }: args::VoteProposal, + }: &args::VoteProposal, epoch: Epoch, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(voter.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx, + let signing_data = signing::aux_signing_data( + context, + tx, Some(voter.clone()), default_signer.clone(), ) .await?; - let proposal_vote = ProposalVote::try_from(vote) + let proposal_vote = ProposalVote::try_from(vote.clone()) .map_err(|_| TxError::InvalidProposalVote)?; let proposal_id = proposal_id.ok_or_else(|| { Error::Other("Proposal id must be defined.".to_string()) })?; let proposal = if let Some(proposal) = - rpc::query_proposal_by_id(client, proposal_id).await? + rpc::query_proposal_by_id(context.client, proposal_id).await? { proposal } else { @@ -1300,7 +1191,7 @@ pub async fn build_vote_proposal< )) })?; - let is_validator = rpc::is_validator(client, &voter).await?; + let is_validator = rpc::is_validator(context.client, voter).await?; if !proposal.can_be_voted(epoch, is_validator) { if tx.force { @@ -1313,8 +1204,8 @@ pub async fn build_vote_proposal< } let delegations = rpc::get_delegators_delegation_at( - client, - &voter, + context.client, + voter, proposal.voting_start_epoch, ) .await? @@ -1329,12 +1220,10 @@ pub async fn build_vote_proposal< delegations, }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx, - tx_code_path, + build( + context, + tx, + tx_code_path.clone(), data, do_nothing, &signing_data.fee_payer, @@ -1344,15 +1233,8 @@ pub async fn build_vote_proposal< } /// Build a pgf funding proposal governance -pub async fn build_pgf_funding_proposal< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_pgf_funding_proposal<'a>( + context: &mut impl Namada<'a>, args::InitProposal { tx, proposal_data: _, @@ -1361,14 +1243,13 @@ pub async fn build_pgf_funding_proposal< is_pgf_stewards: _, is_pgf_funding: _, tx_code_path, - }: args::InitProposal, + }: &args::InitProposal, proposal: PgfFundingProposal, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx, + let signing_data = signing::aux_signing_data( + context, + tx, Some(proposal.proposal.author.clone()), default_signer, ) @@ -1383,12 +1264,10 @@ pub async fn build_pgf_funding_proposal< data.content = extra_section_hash; Ok(()) }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx, - tx_code_path, + build( + context, + tx, + tx_code_path.clone(), init_proposal_data, add_section, &signing_data.fee_payer, @@ -1398,15 +1277,8 @@ pub async fn build_pgf_funding_proposal< } /// Build a pgf funding proposal governance -pub async fn build_pgf_stewards_proposal< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_pgf_stewards_proposal<'a>( + context: &mut impl Namada<'a>, args::InitProposal { tx, proposal_data: _, @@ -1415,14 +1287,13 @@ pub async fn build_pgf_stewards_proposal< is_pgf_stewards: _, is_pgf_funding: _, tx_code_path, - }: args::InitProposal, + }: &args::InitProposal, proposal: PgfStewardProposal, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx, + let signing_data = signing::aux_signing_data( + context, + tx, Some(proposal.proposal.author.clone()), default_signer, ) @@ -1438,12 +1309,10 @@ pub async fn build_pgf_stewards_proposal< Ok(()) }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx, - tx_code_path, + build( + context, + tx, + tx_code_path.clone(), init_proposal_data, add_section, &signing_data.fee_payer, @@ -1453,43 +1322,29 @@ pub async fn build_pgf_stewards_proposal< } /// Submit an IBC transfer -pub async fn build_ibc_transfer< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, - args: args::TxIbcTransfer, +pub async fn build_ibc_transfer<'a>( + context: &mut impl Namada<'a>, + args: &args::TxIbcTransfer, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(args.source.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, + let signing_data = signing::aux_signing_data( + context, &args.tx, Some(args.source.clone()), default_signer, ) .await?; // Check that the source address exists on chain - let source = source_exists_or_err::<_, IO>( - args.source.clone(), - args.tx.force, - client, - ) - .await?; + let source = + source_exists_or_err(args.source.clone(), args.tx.force, context.client) + .await?; // We cannot check the receiver // validate the amount given - let validated_amount = validate_amount::<_, IO>( - client, - args.amount, - &args.token, - args.tx.force, - ) - .await?; + let validated_amount = + validate_amount(context.client, args.amount, &args.token, args.tx.force) + .await + .expect("expected to validate amount"); if validated_amount.canonical().denom.0 != 0 { return Err(Error::Other(format!( "The amount for the IBC transfer should be an integer: {}", @@ -1500,13 +1355,13 @@ pub async fn build_ibc_transfer< // Check source balance let balance_key = token::balance_key(&args.token, &source); - let post_balance = check_balance_too_low_err::<_, IO>( + let post_balance = check_balance_too_low_err( &args.token, &source, validated_amount.amount, balance_key, args.tx.force, - client, + context.client, ) .await?; let tx_source_balance = Some(TxSourcePostBalance { @@ -1515,17 +1370,15 @@ pub async fn build_ibc_transfer< token: args.token.clone(), }); - let tx_code_hash = query_wasm_code_hash::<_, IO>( - client, - args.tx_code_path.to_str().unwrap(), - ) - .await - .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let tx_code_hash = + query_wasm_code_hash(context.client, args.tx_code_path.to_str().unwrap()) + .await + .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; let ibc_denom = match &args.token { Address::Internal(InternalAddress::IbcToken(hash)) => { let ibc_denom_key = ibc_denom_key(hash); - rpc::query_storage_value::(client, &ibc_denom_key) + rpc::query_storage_value::<_, String>(context.client, &ibc_denom_key) .await .map_err(|_e| TxError::TokenDoesNotExist(args.token.clone()))? } @@ -1539,8 +1392,8 @@ pub async fn build_ibc_transfer< let packet_data = PacketData { token, sender: source.to_string().into(), - receiver: args.receiver.into(), - memo: args.memo.unwrap_or_default().into(), + receiver: args.receiver.clone().into(), + memo: args.memo.clone().unwrap_or_default().into(), }; // this height should be that of the destination chain, not this chain @@ -1571,8 +1424,8 @@ pub async fn build_ibc_transfer< }; let msg = MsgTransfer { - port_id_on_a: args.port_id, - chan_id_on_a: args.channel_id, + port_id_on_a: args.port_id.clone(), + chan_id_on_a: args.channel_id.clone(), packet_data, timeout_height_on_b: timeout_height, timeout_timestamp_on_b: timeout_timestamp, @@ -1588,10 +1441,8 @@ pub async fn build_ibc_transfer< tx.add_code_from_hash(tx_code_hash) .add_serialized_data(data); - let epoch = prepare_tx::( - client, - wallet, - shielded, + let epoch = prepare_tx( + context, &args.tx, &mut tx, signing_data.fee_payer.clone(), @@ -1604,10 +1455,8 @@ pub async fn build_ibc_transfer< /// Abstraction for helping build transactions #[allow(clippy::too_many_arguments)] -pub async fn build( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build<'a, F, D>( + context: &mut impl Namada<'a>, tx_args: &crate::sdk::args::Tx, path: PathBuf, data: D, @@ -1618,14 +1467,9 @@ pub async fn build( where F: FnOnce(&mut Tx, &mut D) -> Result<()>, D: BorshSerialize, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, { - build_pow_flag::<_, _, _, _, _, IO>( - client, - wallet, - shielded, + build_pow_flag( + context, tx_args, path, data, @@ -1637,17 +1481,8 @@ where } #[allow(clippy::too_many_arguments)] -async fn build_pow_flag< - C: crate::ledger::queries::Client + Sync, - U, - V, - F, - D, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +async fn build_pow_flag<'a, F, D>( + context: &mut impl Namada<'a>, tx_args: &crate::sdk::args::Tx, path: PathBuf, mut data: D, @@ -1658,26 +1493,21 @@ async fn build_pow_flag< where F: FnOnce(&mut Tx, &mut D) -> Result<()>, D: BorshSerialize, - U: WalletUtils, - V: ShieldedUtils, { let chain_id = tx_args.chain_id.clone().unwrap(); let mut tx_builder = Tx::new(chain_id, tx_args.expiration); - let tx_code_hash = - query_wasm_code_hash::<_, IO>(client, path.to_string_lossy()) - .await - .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let tx_code_hash = query_wasm_code_hash(context.client, path.to_string_lossy()) + .await + .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; on_tx(&mut tx_builder, &mut data)?; tx_builder.add_code_from_hash(tx_code_hash).add_data(data); - let epoch = prepare_tx::( - client, - wallet, - shielded, + let epoch = prepare_tx( + context, tx_args, &mut tx_builder, gas_payer.clone(), @@ -1689,17 +1519,14 @@ where /// Try to decode the given asset type and add its decoding to the supplied set. /// Returns true only if a new decoding has been added to the given set. -async fn add_asset_type< - C: crate::sdk::queries::Client + Sync, - U: ShieldedUtils, ->( +async fn add_asset_type<'a>( asset_types: &mut HashSet<(Address, MaspDenom, Epoch)>, - shielded: &mut ShieldedContext, - client: &C, + context: &mut impl Namada<'a>, asset_type: AssetType, ) -> bool { + let context = &mut **context; if let Some(asset_type) = - shielded.decode_asset_type(client, asset_type).await + context.shielded.decode_asset_type(context.client, asset_type).await { asset_types.insert(asset_type) } else { @@ -1710,42 +1537,33 @@ async fn add_asset_type< /// Collect the asset types used in the given Builder and decode them. This /// function provides the data necessary for offline wallets to present asset /// type information. -async fn used_asset_types< - C: crate::sdk::queries::Client + Sync, - U: ShieldedUtils, - P, - R, - K, - N, ->( - shielded: &mut ShieldedContext, - client: &C, +async fn used_asset_types<'a, P, R, K, N>( + context: &mut impl Namada<'a>, builder: &Builder, ) -> std::result::Result, RpcError> { let mut asset_types = HashSet::new(); // Collect all the asset types used in the Sapling inputs for input in builder.sapling_inputs() { - add_asset_type(&mut asset_types, shielded, client, input.asset_type()) + add_asset_type(&mut asset_types, context, input.asset_type()) .await; } // Collect all the asset types used in the transparent inputs for input in builder.transparent_inputs() { add_asset_type( &mut asset_types, - shielded, - client, + context, input.coin().asset_type(), ) .await; } // Collect all the asset types used in the Sapling outputs for output in builder.sapling_outputs() { - add_asset_type(&mut asset_types, shielded, client, output.asset_type()) + add_asset_type(&mut asset_types, context, output.asset_type()) .await; } // Collect all the asset types used in the transparent outputs for output in builder.transparent_outputs() { - add_asset_type(&mut asset_types, shielded, client, output.asset_type()) + add_asset_type(&mut asset_types, context, output.asset_type()) .await; } // Collect all the asset types used in the Sapling converts @@ -1753,7 +1571,7 @@ async fn used_asset_types< for (asset_type, _) in I32Sum::from(output.conversion().clone()).components() { - add_asset_type(&mut asset_types, shielded, client, *asset_type) + add_asset_type(&mut asset_types, context, *asset_type) .await; } } @@ -1761,21 +1579,13 @@ async fn used_asset_types< } /// Submit an ordinary transfer -pub async fn build_transfer< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_transfer<'a, N: Namada<'a>>( + context: &mut N, mut args: args::TxTransfer, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(args.source.effective_address()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, + let signing_data = signing::aux_signing_data( + context, &args.tx, Some(args.source.effective_address()), default_signer, @@ -1787,27 +1597,24 @@ pub async fn build_transfer< let token = args.token.clone(); // Check that the source address exists on chain - source_exists_or_err::<_, IO>(source.clone(), args.tx.force, client) - .await?; + source_exists_or_err(source.clone(), args.tx.force, context.client).await?; // Check that the target address exists on chain - target_exists_or_err::<_, IO>(target.clone(), args.tx.force, client) - .await?; + target_exists_or_err(target.clone(), args.tx.force, context.client).await?; // Check source balance let balance_key = token::balance_key(&token, &source); // validate the amount given let validated_amount = - validate_amount::<_, IO>(client, args.amount, &token, args.tx.force) - .await?; + validate_amount(context.client, args.amount, &token, args.tx.force).await?; args.amount = InputAmount::Validated(validated_amount); - let post_balance = check_balance_too_low_err::( + let post_balance = check_balance_too_low_err( &token, &source, validated_amount.amount, balance_key, args.tx.force, - client, + context.client, ) .await?; let tx_source_balance = Some(TxSourcePostBalance { @@ -1835,9 +1642,7 @@ pub async fn build_transfer< }; // Construct the shielded part of the transaction, if any - let stx_result = shielded - .gen_shielded_transfer::<_, IO>(client, args.clone()) - .await; + let stx_result = ShieldedContext::::gen_shielded_transfer(context, &args).await; let shielded_parts = match stx_result { Ok(stx) => Ok(stx), @@ -1859,7 +1664,7 @@ pub async fn build_transfer< // Get the decoded asset types used in the transaction to give // offline wallet users more information let asset_types = - used_asset_types(shielded, client, &transfer.builder) + used_asset_types(context, &transfer.builder) .await .unwrap_or_default(); Some(asset_types) @@ -1905,12 +1710,10 @@ pub async fn build_transfer< }; Ok(()) }; - let (tx, unshielding_epoch) = build_pow_flag::<_, _, _, _, _, IO>( - client, - wallet, - shielded, + let (tx, unshielding_epoch) = build_pow_flag( + context, &args.tx, - args.tx_code_path, + args.tx_code_path.clone(), transfer, add_shielded, &signing_data.fee_payer, @@ -1940,30 +1743,23 @@ pub async fn build_transfer< } /// Submit a transaction to initialize an account -pub async fn build_init_account< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_init_account<'a>( + context: &mut impl Namada<'a>, args::TxInitAccount { tx: tx_args, vp_code_path, tx_code_path, public_keys, threshold, - }: args::TxInitAccount, + }: &args::TxInitAccount, ) -> Result<(Tx, SigningTxData, Option)> { let signing_data = - signing::aux_signing_data::<_, _, IO>(client, wallet, &tx_args, None, None).await?; + signing::aux_signing_data(context, tx_args, None, None).await?; - let vp_code_hash = query_wasm_code_hash_buf::<_, IO>(client, &vp_code_path).await?; + let vp_code_hash = query_wasm_code_hash_buf(context.client, vp_code_path).await?; let threshold = match threshold { - Some(threshold) => threshold, + Some(threshold) => *threshold, None => { if public_keys.len() == 1 { 1u8 @@ -1974,7 +1770,7 @@ pub async fn build_init_account< }; let data = InitAccount { - public_keys, + public_keys: public_keys.clone(), // We will add the hash inside the add_code_hash function vp_code_hash: Hash::zero(), threshold, @@ -1985,12 +1781,10 @@ pub async fn build_init_account< data.vp_code_hash = extra_section_hash; Ok(()) }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, add_code_hash, &signing_data.fee_payer, @@ -2000,15 +1794,8 @@ pub async fn build_init_account< } /// Submit a transaction to update a VP -pub async fn build_update_account< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_update_account<'a>( + context: &mut impl Namada<'a>, args::TxUpdateAccount { tx: tx_args, vp_code_path, @@ -2016,31 +1803,29 @@ pub async fn build_update_account< addr, public_keys, threshold, - }: args::TxUpdateAccount, + }: &args::TxUpdateAccount, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(addr.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx_args, + let signing_data = signing::aux_signing_data( + context, + tx_args, Some(addr.clone()), default_signer, ) .await?; let addr = - if let Some(account) = rpc::get_account_info(client, &addr).await? { + if let Some(account) = rpc::get_account_info(context.client, addr).await? { account.address } else if tx_args.force { - addr + addr.clone() } else { - return Err(Error::from(TxError::LocationDoesNotExist(addr))); + return Err(Error::from(TxError::LocationDoesNotExist(addr.clone()))); }; let vp_code_hash = match vp_code_path { Some(code_path) => { - let vp_hash = - query_wasm_code_hash_buf::<_, IO>(client, &code_path).await?; + let vp_hash = query_wasm_code_hash_buf(context.client, code_path).await?; Some(vp_hash) } None => None, @@ -2054,8 +1839,8 @@ pub async fn build_update_account< let data = UpdateAccount { addr, vp_code_hash: extra_section_hash, - public_keys, - threshold, + public_keys: public_keys.clone(), + threshold: *threshold, }; let add_code_hash = |tx: &mut Tx, data: &mut UpdateAccount| { @@ -2064,12 +1849,10 @@ pub async fn build_update_account< data.vp_code_hash = extra_section_hash; Ok(()) }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, add_code_hash, &signing_data.fee_payer, @@ -2079,28 +1862,20 @@ pub async fn build_update_account< } /// Submit a custom transaction -pub async fn build_custom< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_custom<'a>( + context: &mut impl Namada<'a>, args::TxCustom { tx: tx_args, code_path, data_path, serialized_tx, owner, - }: args::TxCustom, + }: &args::TxCustom, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(owner.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - &tx_args, + let signing_data = signing::aux_signing_data( + context, + tx_args, Some(owner.clone()), default_signer, ) @@ -2111,24 +1886,22 @@ pub async fn build_custom< Error::Other("Invalid tx deserialization.".to_string()) })? } else { - let tx_code_hash = query_wasm_code_hash_buf::<_, IO>( - client, - &code_path + let tx_code_hash = query_wasm_code_hash_buf( + context.client, + code_path.as_ref() .ok_or(Error::Other("No code path supplied".to_string()))?, ) .await?; let chain_id = tx_args.chain_id.clone().unwrap(); let mut tx = Tx::new(chain_id, tx_args.expiration); tx.add_code_from_hash(tx_code_hash); - data_path.map(|data| tx.add_serialized_data(data)); + data_path.clone().map(|data| tx.add_serialized_data(data)); tx }; - let epoch = prepare_tx::( - client, - wallet, - shielded, - &tx_args, + let epoch = prepare_tx( + context, + tx_args, &mut tx, signing_data.fee_payer.clone(), None, @@ -2167,7 +1940,6 @@ fn lift_rpc_error(res: std::result::Result) -> Result { /// if it isn't a validator async fn known_validator_or_err< C: crate::ledger::queries::Client + Sync, - IO: Io, >( validator: Address, force: bool, @@ -2178,7 +1950,7 @@ async fn known_validator_or_err< if !is_validator { if force { edisplay_line!( - IO, + StdIo, "The address {} doesn't belong to any known validator account.", validator ); @@ -2194,7 +1966,7 @@ async fn known_validator_or_err< /// general pattern for checking if an address exists on the chain, or /// throwing an error if it's not forced. Takes a generic error /// message and the error type. -async fn address_exists_or_err( +async fn address_exists_or_err( addr: Address, force: bool, client: &C, @@ -2208,7 +1980,7 @@ where let addr_exists = rpc::known_address::(client, &addr).await?; if !addr_exists { if force { - edisplay_line!(IO, "{}", message); + edisplay_line!(StdIo, "{}", message); Ok(addr) } else { Err(err(addr)) @@ -2223,7 +1995,6 @@ where /// if it isn't on chain async fn source_exists_or_err< C: crate::ledger::queries::Client + Sync, - IO: Io, >( token: Address, force: bool, @@ -2231,7 +2002,7 @@ async fn source_exists_or_err< ) -> Result
{ let message = format!("The source address {} doesn't exist on chain.", token); - address_exists_or_err::<_, _, IO>(token, force, client, message, |err| { + address_exists_or_err(token, force, client, message, |err| { Error::from(TxError::SourceDoesNotExist(err)) }) .await @@ -2242,7 +2013,6 @@ async fn source_exists_or_err< /// if it isn't on chain async fn target_exists_or_err< C: crate::ledger::queries::Client + Sync, - IO: Io, >( token: Address, force: bool, @@ -2250,7 +2020,7 @@ async fn target_exists_or_err< ) -> Result
{ let message = format!("The target address {} doesn't exist on chain.", token); - address_exists_or_err::<_, _, IO>(token, force, client, message, |err| { + address_exists_or_err(token, force, client, message, |err| { Error::from(TxError::TargetLocationDoesNotExist(err)) }) .await @@ -2261,7 +2031,6 @@ async fn target_exists_or_err< /// overrides this. Returns the updated balance for fee check if necessary async fn check_balance_too_low_err< C: crate::ledger::queries::Client + Sync, - IO: Io, >( token: &Address, source: &Address, @@ -2278,17 +2047,17 @@ async fn check_balance_too_low_err< None => { if force { edisplay_line!( - IO, + StdIo, "The balance of the source {} of token {} is lower \ than the amount to be transferred. Amount to \ transfer is {} and the balance is {}.", source, token, - format_denominated_amount::<_, IO>( + format_denominated_amount( client, token, amount ) .await, - format_denominated_amount::<_, IO>( + format_denominated_amount( client, token, balance ) .await, @@ -2309,7 +2078,7 @@ async fn check_balance_too_low_err< )) => { if force { edisplay_line!( - IO, + StdIo, "No balance found for the source {} of token {}", source, token @@ -2350,12 +2119,11 @@ fn validate_untrusted_code_err( } async fn query_wasm_code_hash_buf< C: crate::ledger::queries::Client + Sync, - IO: Io, >( client: &C, path: &Path, ) -> Result { - query_wasm_code_hash::<_, IO>(client, path.to_string_lossy()).await + query_wasm_code_hash(client, path.to_string_lossy()).await } /// A helper for [`fn build`] that can be used for `on_tx` arg that does nothing diff --git a/shared/src/types/io.rs b/shared/src/types/io.rs index 462dbef95f..007d5acd93 100644 --- a/shared/src/types/io.rs +++ b/shared/src/types/io.rs @@ -3,10 +3,10 @@ //! functions. /// Rust native I/O handling. -pub struct DefaultIo; +pub struct StdIo; #[async_trait::async_trait(?Send)] -impl Io for DefaultIo {} +impl Io for StdIo {} #[async_trait::async_trait(?Send)] #[allow(missing_docs)] diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 2f5bbe4ea7..b763aec013 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -20,7 +20,7 @@ use borsh::BorshSerialize; use color_eyre::eyre::Result; use data_encoding::HEXLOWER; use namada::types::address::Address; -use namada::types::io::DefaultIo; +use namada::types::io::StdIo; use namada::types::storage::Epoch; use namada::types::token; use namada_apps::client::tx::CLIShieldedUtils; @@ -688,7 +688,7 @@ fn ledger_txs_and_queries() -> Result<()> { #[test] fn masp_txs_and_queries() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -836,7 +836,7 @@ fn masp_txs_and_queries() -> Result<()> { #[test] fn wrapper_disposable_signer() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. diff --git a/tests/src/integration/masp.rs b/tests/src/integration/masp.rs index ecd1b34465..1cd08b9976 100644 --- a/tests/src/integration/masp.rs +++ b/tests/src/integration/masp.rs @@ -2,7 +2,7 @@ use std::path::PathBuf; use color_eyre::eyre::Result; use color_eyre::owo_colors::OwoColorize; -use namada::types::io::DefaultIo; +use namada::types::io::StdIo; use namada_apps::client::tx::CLIShieldedUtils; use namada_apps::node::ledger::shell::testing::client::run; use namada_apps::node::ledger::shell::testing::utils::{Bin, CapturedOutput}; @@ -29,7 +29,7 @@ fn masp_incentives() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -765,7 +765,7 @@ fn masp_pinned_txs() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); let mut node = setup::setup()?; // Wait till epoch boundary @@ -928,7 +928,7 @@ fn masp_txs_and_queries() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); enum Response { Ok(&'static str), @@ -1234,7 +1234,7 @@ fn wrapper_fee_unshielding() { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. From d10ba96b8039d9ac480d0cd1798e599ac6c76914 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Mon, 25 Sep 2023 10:48:29 +0200 Subject: [PATCH 038/161] Allow the prototypical Tx builder to be modified in NamadaImpl instances. --- shared/src/ledger/mod.rs | 78 ++++++++++++++++++++++++++++++++++------ shared/src/sdk/args.rs | 10 +++++- shared/src/sdk/tx.rs | 5 +-- 3 files changed, 80 insertions(+), 13 deletions(-) diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 505699d707..d82e09bd8a 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -23,6 +23,7 @@ use crate::sdk::masp::{ShieldedContext, ShieldedUtils}; use crate::types::masp::{TransferSource, TransferTarget}; use crate::types::address::Address; use crate::sdk::args::{self, InputAmount}; +use crate::sdk::args::SdkTypes; use crate::sdk::tx::{ TX_TRANSFER_WASM, TX_REVEAL_PK, TX_BOND_WASM, TX_UNBOND_WASM, TX_IBC_WASM, TX_INIT_PROPOSAL, TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL, VP_USER_WASM, @@ -68,6 +69,11 @@ pub trait Namada<'a> : DerefMut Address { + self.wallet.find_address(args::NAM).expect("NAM not in wallet").clone() + } /// Make a tx builder using no arguments fn tx_builder(&mut self) -> args::Tx { @@ -83,7 +89,7 @@ pub trait Namada<'a> : DerefMut : DerefMut : DerefMut : DerefMut args::InitProposal { args::InitProposal { proposal_data, - native_token: self.wallet.find_address(args::NAM).expect("NAM not in wallet").clone(), + native_token: self.native_token(), is_offline: false, is_pgf_stewards: false, is_pgf_funding: false, @@ -310,7 +316,7 @@ pub trait Namada<'a> : DerefMut : DerefMut(NamadaStruct<'a, C, U, V>) where +pub struct NamadaImpl<'a, C, U, V> where C: crate::ledger::queries::Client + Sync, U: WalletUtils, - V: ShieldedUtils; + V: ShieldedUtils, +{ + namada: NamadaStruct<'a, C, U, V>, + prototype: args::Tx, +} impl<'a, C, U, V> NamadaImpl<'a, C, U, V> where C: crate::ledger::queries::Client + Sync, @@ -392,7 +402,37 @@ impl<'a, C, U, V> NamadaImpl<'a, C, U, V> where wallet: &'a mut Wallet, shielded: &'a mut ShieldedContext, ) -> Self { - Self(NamadaStruct { client, wallet, shielded }) + let fee_token = wallet + .find_address(args::NAM) + .expect("NAM not in wallet") + .clone(); + Self { + namada: NamadaStruct { client, wallet, shielded }, + prototype: args::Tx { + dry_run: false, + dry_run_wrapper: false, + dump_tx: false, + output_folder: None, + force: false, + broadcast_only: false, + ledger_address: (), + initialized_account_alias: None, + wallet_alias_force: false, + fee_amount: None, + wrapper_fee_payer: None, + fee_token, + fee_unshield: None, + gas_limit: GasLimit::from(20_000), + expiration: None, + disposable_signing_key: false, + chain_id: None, + signing_keys: vec![], + signatures: vec![], + tx_reveal_code_path: PathBuf::from(TX_REVEAL_PK), + verification_key: None, + password: None, + }, + } } } @@ -404,7 +444,7 @@ impl<'a, C, U, V> Deref for NamadaImpl<'a, C, U, V> where type Target = NamadaStruct<'a, C, U, V>; fn deref(&self) -> &Self::Target { - &self.0 + &self.namada } } @@ -414,7 +454,7 @@ impl<'a, C, U, V> DerefMut for NamadaImpl<'a, C, U, V> where V: ShieldedUtils, { fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 + &mut self.namada } } @@ -426,4 +466,22 @@ impl<'a, C, U, V> Namada<'a> for NamadaImpl<'a, C, U, V> where type Client = C; type WalletUtils = U; type ShieldedUtils = V; + + /// Obtain the prototypical Tx builder + fn tx_builder(&mut self) -> args::Tx { + self.prototype.clone() + } +} + +/// Allow the prototypical Tx builder to be modified +impl<'a, C, U, V> args::TxBuilder for NamadaImpl<'a, C, U, V> where + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, + V: ShieldedUtils, +{ + fn tx(self, func: F) -> Self where + F: FnOnce(args::Tx) -> args::Tx, + { + Self { prototype: func(self.prototype), ..self } + } } diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index 6e1381b18f..3c6dca8cfa 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -180,6 +180,14 @@ pub enum InputAmount { Unvalidated(token::DenominatedAmount), } +impl std::str::FromStr for InputAmount { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + token::DenominatedAmount::from_str(s).map(InputAmount::Unvalidated) + } +} + /// Transfer transaction arguments #[derive(Clone, Debug)] pub struct TxTransfer { @@ -234,7 +242,7 @@ impl TxTransfer { impl TxTransfer { /// Build a transaction from this builder - pub async fn build<'a>(self, context: &mut impl Namada<'a>) -> + pub async fn build<'a>(&mut self, context: &mut impl Namada<'a>) -> crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> { tx::build_transfer(context, self).await diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 2ca4879bfe..4178986caf 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -110,6 +110,7 @@ pub const TX_UPDATE_STEWARD_COMMISSION: &str = const DEFAULT_NAMADA_EVENTS_MAX_WAIT_TIME_SECONDS: u64 = 60; /// Capture the result of running a transaction +#[derive(Debug)] pub enum ProcessTxResponse { /// Result of submitting a transaction to the blockchain Applied(TxResponse), @@ -1581,7 +1582,7 @@ async fn used_asset_types<'a, P, R, K, N>( /// Submit an ordinary transfer pub async fn build_transfer<'a, N: Namada<'a>>( context: &mut N, - mut args: args::TxTransfer, + args: &mut args::TxTransfer, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(args.source.effective_address()); let signing_data = signing::aux_signing_data( @@ -1642,7 +1643,7 @@ pub async fn build_transfer<'a, N: Namada<'a>>( }; // Construct the shielded part of the transaction, if any - let stx_result = ShieldedContext::::gen_shielded_transfer(context, &args).await; + let stx_result = ShieldedContext::::gen_shielded_transfer(context, args).await; let shielded_parts = match stx_result { Ok(stx) => Ok(stx), From b874ffaf17be6e61a4af43769ee1733ada3fbce1 Mon Sep 17 00:00:00 2001 From: yito88 Date: Mon, 2 Oct 2023 17:23:42 +0200 Subject: [PATCH 039/161] remove trace-path --- apps/src/lib/cli.rs | 10 --------- apps/src/lib/cli/context.rs | 16 +++++++++++++ apps/src/lib/client/rpc.rs | 7 +++--- benches/lib.rs | 1 - core/src/ledger/ibc/mod.rs | 4 ++-- core/src/types/ibc.rs | 7 +++--- shared/src/sdk/args.rs | 5 ----- shared/src/sdk/rpc.rs | 45 ++++++++++++++++++++++++++++++++++++- shared/src/sdk/signing.rs | 1 - shared/src/sdk/tx.rs | 39 +++++++++++++------------------- tests/src/e2e/ibc_tests.rs | 24 ++++---------------- 11 files changed, 89 insertions(+), 70 deletions(-) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 79db0b3824..135ff1e3c5 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -2516,7 +2516,6 @@ pub mod args { use std::path::PathBuf; use std::str::FromStr; - use namada::ibc::applications::transfer::TracePath; use namada::ibc::core::ics24_host::identifier::{ChannelId, PortId}; pub use namada::sdk::args::*; use namada::types::address::Address; @@ -2726,7 +2725,6 @@ pub mod args { pub const TM_ADDRESS: Arg = arg("tm-address"); pub const TOKEN_OPT: ArgOpt = TOKEN.opt(); pub const TOKEN: Arg = arg("token"); - pub const TRACE_PATH: ArgOpt = arg_opt("trace-path"); pub const TRANSFER_SOURCE: Arg = arg("source"); pub const TRANSFER_TARGET: Arg = arg("target"); pub const TX_HASH: Arg = arg("tx-hash"); @@ -3506,7 +3504,6 @@ pub mod args { source: ctx.get_cached(&self.source), target: ctx.get(&self.target), token: ctx.get(&self.token), - trace_path: self.trace_path, amount: self.amount, native_token: ctx.native_token.clone(), tx_code_path: self.tx_code_path.to_path_buf(), @@ -3520,7 +3517,6 @@ pub mod args { let source = TRANSFER_SOURCE.parse(matches); let target = TRANSFER_TARGET.parse(matches); let token = TOKEN.parse(matches); - let trace_path = TRACE_PATH.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_TRANSFER_WASM); Self { @@ -3528,7 +3524,6 @@ pub mod args { source, target, token, - trace_path, amount, native_token: (), tx_code_path, @@ -3546,7 +3541,6 @@ pub mod args { to produce the signature.", )) .arg(TOKEN.def().help("The transfer token.")) - .arg(TRACE_PATH.def().help("The transfer token's trace path.")) .arg(AMOUNT.def().help("The amount to transfer in decimal.")) } } @@ -3558,7 +3552,6 @@ pub mod args { source: ctx.get(&self.source), receiver: self.receiver, token: ctx.get(&self.token), - trace_path: self.trace_path, amount: self.amount, port_id: self.port_id, channel_id: self.channel_id, @@ -3576,7 +3569,6 @@ pub mod args { let source = SOURCE.parse(matches); let receiver = RECEIVER.parse(matches); let token = TOKEN.parse(matches); - let trace_path = TRACE_PATH.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let port_id = PORT_ID.parse(matches); let channel_id = CHANNEL_ID.parse(matches); @@ -3589,7 +3581,6 @@ pub mod args { source, receiver, token, - trace_path, amount, port_id, channel_id, @@ -3610,7 +3601,6 @@ pub mod args { "The receiver address on the destination chain as string.", )) .arg(TOKEN.def().help("The transfer token.")) - .arg(TRACE_PATH.def().help("The transfer token's trace path.")) .arg(AMOUNT.def().help("The amount to transfer in decimal.")) .arg(PORT_ID.def().help("The port ID.")) .arg(CHANNEL_ID.def().help("The channel ID.")) diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index 4aac8b1026..520e999189 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -6,11 +6,13 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use color_eyre::eyre::Result; +use namada::ledger::ibc::storage::ibc_token; use namada::sdk::masp::ShieldedContext; use namada::sdk::wallet::Wallet; use namada::types::address::{Address, InternalAddress}; use namada::types::chain::ChainId; use namada::types::ethereum_events::EthAddress; +use namada::types::ibc::is_ibc_denom; use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::*; @@ -367,6 +369,20 @@ impl ArgFromContext for Address { }) .unwrap_or(Err(Skip)) }) + // An IBC token + .or_else(|_| { + is_ibc_denom(raw) + .map(|(trace_path, base_denom)| { + let base_token = ctx + .wallet + .find_address(&base_denom) + .map(|addr| addr.to_string()) + .unwrap_or(base_denom); + let ibc_denom = format!("{trace_path}/{base_token}"); + ibc_token(ibc_denom) + }) + .ok_or(Skip) + }) // Or it can be an alias that may be found in the wallet .or_else(|_| ctx.wallet.find_address(raw).cloned().ok_or(Skip)) .map_err(|_| format!("Unknown address {raw}")) diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index d3363d17a7..cfd13b42e2 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -50,7 +50,7 @@ use namada::sdk::wallet::{AddressVpType, Wallet}; use namada::types::address::{masp, Address, InternalAddress}; use namada::types::control_flow::ProceedOrElse; use namada::types::hash::Hash; -use namada::types::ibc::split_ibc_denom; +use namada::types::ibc::is_ibc_denom; use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; @@ -385,7 +385,7 @@ pub async fn query_transparent_balance< display_line!(IO, "{}: {}", token_alias, balance); } Err(e) => { - display_line!(IO, "Eror in querying: {e}"); + display_line!(IO, "Querying error: {e}"); display_line!( IO, "No {} balance found for {}", @@ -761,7 +761,7 @@ fn get_ibc_denom_alias( wallet: &Wallet, ibc_denom: impl AsRef, ) -> String { - split_ibc_denom(&ibc_denom) + is_ibc_denom(&ibc_denom) .map(|(trace_path, base_token)| { let base_token_alias = match Address::decode(&base_token) { Ok(base_token) => wallet.lookup_alias(&base_token), @@ -776,7 +776,6 @@ fn get_ibc_denom_alias( .unwrap_or(ibc_denom.as_ref().to_string()) } - /// Query Proposals pub async fn query_proposal< C: namada::ledger::queries::Client + Sync, diff --git a/benches/lib.rs b/benches/lib.rs index 30765d8beb..47645abdf4 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -783,7 +783,6 @@ impl BenchShieldedCtx { source: source.clone(), target: target.clone(), token: address::nam(), - trace_path: None, amount: InputAmount::Validated(DenominatedAmount { amount, denom: 0.into(), diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index 4900312a7f..40382282cd 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -29,7 +29,7 @@ use crate::ibc_proto::google::protobuf::Any; use crate::types::address::Address; use crate::types::chain::ChainId; use crate::types::ibc::{ - split_ibc_denom, EVENT_TYPE_DENOM_TRACE, EVENT_TYPE_PACKET, + is_ibc_denom, EVENT_TYPE_DENOM_TRACE, EVENT_TYPE_PACKET, }; #[allow(missing_docs)] @@ -160,7 +160,7 @@ where e )) })?; - if let Some((_, base_token)) = split_ibc_denom(&ibc_denom) { + if let Some((_, base_token)) = is_ibc_denom(&ibc_denom) { self.ctx .borrow_mut() .store_ibc_denom(base_token, trace_hash, &ibc_denom) diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs index 4ee504fb4a..e7cb5f745f 100644 --- a/core/src/types/ibc.rs +++ b/core/src/types/ibc.rs @@ -94,10 +94,11 @@ mod ibc_rs_conversion { /// Returns the trace path and the token string if the denom is an IBC /// denom. - pub fn split_ibc_denom( - denom: impl AsRef, - ) -> Option<(TracePath, String)> { + pub fn is_ibc_denom(denom: impl AsRef) -> Option<(TracePath, String)> { let prefixed_denom = PrefixedDenom::from_str(denom.as_ref()).ok()?; + if prefixed_denom.trace_path.is_empty() { + return None; + } // The base token isn't decoded because it could be non Namada token Some(( prefixed_denom.trace_path, diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index af65abcad2..0b87317530 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -11,7 +11,6 @@ use namada_core::types::time::DateTimeUtc; use serde::{Deserialize, Serialize}; use zeroize::Zeroizing; -use crate::ibc::applications::transfer::TracePath; use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use crate::types::address::Address; use crate::types::keccak::KeccakHash; @@ -136,8 +135,6 @@ pub struct TxTransfer { pub target: C::TransferTarget, /// Transferred token address pub token: C::Address, - /// Transferred token's trace path - pub trace_path: Option, /// Transferred token amount pub amount: InputAmount, /// Native token address @@ -168,8 +165,6 @@ pub struct TxIbcTransfer { pub receiver: String, /// Transferred token address pub token: C::Address, - /// Transferred token's trace path - pub trace_path: Option, /// Transferred token amount pub amount: InputAmount, /// Port ID diff --git a/shared/src/sdk/rpc.rs b/shared/src/sdk/rpc.rs index 58609bed42..d98e0b61ec 100644 --- a/shared/src/sdk/rpc.rs +++ b/shared/src/sdk/rpc.rs @@ -13,7 +13,7 @@ use namada_core::ledger::governance::storage::proposal::StorageProposal; use namada_core::ledger::governance::utils::Vote; use namada_core::ledger::storage::LastBlock; use namada_core::types::account::Account; -use namada_core::types::address::Address; +use namada_core::types::address::{Address, InternalAddress}; use namada_core::types::storage::Key; use namada_core::types::token::{ Amount, DenominatedAmount, Denomination, MaspDenom, @@ -25,6 +25,9 @@ use namada_proof_of_stake::types::{ use serde::Serialize; use crate::ledger::events::Event; +use crate::ledger::ibc::storage::{ + ibc_denom_key, ibc_denom_key_prefix, is_ibc_denom_key, +}; use crate::ledger::queries::vp::pos::EnrichedBondsAndUnbondsDetails; use crate::ledger::queries::RPC; use crate::proto::Tx; @@ -1088,3 +1091,43 @@ pub async fn format_denominated_amount< }); DenominatedAmount { amount, denom }.to_string() } + +/// Look up the IBC denomination from a IbcToken. +pub async fn query_ibc_denom< + C: crate::ledger::queries::Client + Sync, + IO: Io, +>( + client: &C, + token: &Address, + owner: Option<&Address>, +) -> String { + let hash = match token { + Address::Internal(InternalAddress::IbcToken(hash)) => hash, + _ => return token.to_string(), + }; + + if let Some(owner) = owner { + let ibc_denom_key = ibc_denom_key(owner.to_string(), hash); + if let Ok(ibc_denom) = + query_storage_value::(client, &ibc_denom_key).await + { + return ibc_denom; + } + } + + // No owner is specified or the owner doesn't have the token + let ibc_denom_prefix = ibc_denom_key_prefix(None); + if let Ok(Some(ibc_denoms)) = + query_storage_prefix::(client, &ibc_denom_prefix).await + { + for (key, ibc_denom) in ibc_denoms { + if let Some((_, token_hash)) = is_ibc_denom_key(&key) { + if token_hash == *hash { + return ibc_denom; + } + } + } + } + + token.to_string() +} diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 9f1d541faf..042be03a63 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -426,7 +426,6 @@ pub async fn wrap_tx< fee_payer_address.clone(), ), token: args.fee_token.clone(), - trace_path: None, amount: args::InputAmount::Validated(DenominatedAmount { // NOTE: must unshield the total fee amount, not the // diff, because the ledger evaluates the transaction in diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 5b477de47e..e95ab58fe1 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -36,12 +36,11 @@ use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc::applications::transfer::packet::PacketData; -use crate::ibc::applications::transfer::{PrefixedCoin, PrefixedDenom}; +use crate::ibc::applications::transfer::PrefixedCoin; use crate::ibc::core::ics04_channel::timeout::TimeoutHeight; use crate::ibc::core::timestamp::Timestamp as IbcTimestamp; use crate::ibc::core::Msg; use crate::ibc::Height as IbcHeight; -use crate::ledger::ibc::storage::ibc_token; use crate::proto::{MaspBuilder, Tx}; use crate::sdk::args::{self, InputAmount}; use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; @@ -1414,16 +1413,10 @@ pub async fn build_ibc_transfer< .await .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; - let ibc_denom = PrefixedDenom { - trace_path: args.trace_path.unwrap_or_default(), - base_denom: args - .token - .to_string() - .parse() - .expect("Conversion from the token shouldn't fail"), - }; + let ibc_denom = + rpc::query_ibc_denom::<_, IO>(client, &args.token, Some(&source)).await; let token = PrefixedCoin { - denom: ibc_denom, + denom: ibc_denom.parse().expect("Invalid IBC denom"), // Set the IBC amount as an integer amount: validated_amount.into(), }; @@ -1666,11 +1659,6 @@ pub async fn build_transfer< ) -> Result<(Tx, Option)> { let source = args.source.effective_address(); let target = args.target.effective_address(); - let token = if let Some(trace_path) = &args.trace_path { - ibc_token(format!("{}/{}", trace_path.clone(), args.token)) - } else { - args.token.clone() - }; // Check that the source address exists on chain source_exists_or_err::<_, IO>(source.clone(), args.tx.force, client) @@ -1679,16 +1667,20 @@ pub async fn build_transfer< target_exists_or_err::<_, IO>(target.clone(), args.tx.force, client) .await?; // Check source balance - let balance_key = token::balance_key(&token, &source); + let balance_key = token::balance_key(&args.token, &source); // validate the amount given - let validated_amount = - validate_amount::<_, IO>(client, args.amount, &token, args.tx.force) - .await?; + let validated_amount = validate_amount::<_, IO>( + client, + args.amount, + &args.token, + args.tx.force, + ) + .await?; args.amount = InputAmount::Validated(validated_amount); let post_balance = check_balance_too_low_err::( - &token, + &args.token, &source, validated_amount.amount, balance_key, @@ -1699,7 +1691,7 @@ pub async fn build_transfer< let tx_source_balance = Some(TxSourcePostBalance { post_balance, source: source.clone(), - token: token.clone(), + token: args.token.clone(), }); let masp_addr = masp(); @@ -1712,7 +1704,7 @@ pub async fn build_transfer< // TODO Refactor me, we shouldn't rely on any specific token here. (token::Amount::default(), args.native_token.clone()) } else { - (validated_amount.amount, token) + (validated_amount.amount, args.token.clone()) }; // Determine whether to pin this transaction to a storage key let key = match &args.target { @@ -2215,6 +2207,7 @@ fn validate_untrusted_code_err( Ok(()) } } + async fn query_wasm_code_hash_buf< C: crate::ledger::queries::Client + Sync, IO: Io, diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index 27261a233d..3cd2ba48a3 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -637,7 +637,6 @@ fn transfer_token( ALBERT, &receiver, NAM, - None, "100000", ALBERT_KEY, port_id_a, @@ -707,7 +706,6 @@ fn try_invalid_transfers( ALBERT, &receiver, NAM, - None, "10.1", ALBERT_KEY, port_id_a, @@ -723,7 +721,6 @@ fn try_invalid_transfers( ALBERT, &receiver, NAM, - None, "10", ALBERT_KEY, &"port".parse().unwrap(), @@ -739,7 +736,6 @@ fn try_invalid_transfers( ALBERT, &receiver, NAM, - None, "10", ALBERT_KEY, port_id_a, @@ -757,10 +753,8 @@ fn transfer_received_token( channel_id: &ChannelId, test: &Test, ) -> Result<()> { - // token received via the port and channel - let trace_path = format!("{port_id}/{channel_id}"); - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let ibc_denom = format!("{port_id}/{channel_id}/nam"); let amount = Amount::native_whole(50000).to_string_native(); let tx_args = [ "transfer", @@ -769,9 +763,7 @@ fn transfer_received_token( "--target", ALBERT, "--token", - NAM, - "--trace-path", - &trace_path, + &ibc_denom, "--amount", &amount, "--gas-token", @@ -798,14 +790,13 @@ fn transfer_back( let receiver = find_address(test_a, ALBERT)?; // Chain A was the source for the sent token - let trace_path = format!("{}/{}", port_id_b, channel_id_b); + let ibc_denom = format!("{port_id_b}/{channel_id_b}/nam"); // Send a token from Chain B let height = transfer( test_b, BERTHA, &receiver, - NAM, - Some(&trace_path), + ibc_denom, "50000", BERTHA_KEY, port_id_b, @@ -869,7 +860,6 @@ fn transfer_timeout( ALBERT, &receiver, NAM, - None, "100000", ALBERT_KEY, port_id_a, @@ -999,7 +989,6 @@ fn transfer( sender: impl AsRef, receiver: &Address, token: impl AsRef, - trace_path: Option<&str>, amount: impl AsRef, signer: impl AsRef, port_id: &PortId, @@ -1033,11 +1022,6 @@ fn transfer( &rpc, ]; - if let Some(trace_path) = trace_path { - tx_args.push("--trace-path"); - tx_args.push(trace_path.clone()); - } - let timeout = timeout_sec.unwrap_or_default().as_secs().to_string(); if timeout_sec.is_some() { tx_args.push("--timeout-sec-offset"); From 4cf16cb4b6d5a5b6d27573333f4f8f45eb60e71d Mon Sep 17 00:00:00 2001 From: yito88 Date: Wed, 20 Sep 2023 00:23:53 +0200 Subject: [PATCH 040/161] WIP: receive to a shielded address --- core/src/ledger/ibc/context/storage.rs | 8 +- core/src/ledger/ibc/mod.rs | 43 +++- core/src/ledger/vp_env.rs | 50 +++- core/src/types/address.rs | 10 +- core/src/types/ibc.rs | 69 ++++- shared/src/ledger/native_vp/ibc/context.rs | 69 ++++- shared/src/ledger/native_vp/mod.rs | 13 + shared/src/ledger/vp_host_fns.rs | 15 ++ shared/src/vm/host_env.rs | 116 ++++++++- shared/src/vm/wasm/host_env.rs | 1 + tx_prelude/src/ibc.rs | 13 +- tx_prelude/src/token.rs | 78 +++--- vm_env/src/lib.rs | 6 + vp_prelude/src/lib.rs | 14 + wasm/wasm_source/src/tx_bridge_pool.rs | 9 - wasm/wasm_source/src/tx_transfer.rs | 34 ++- wasm/wasm_source/src/vp_implicit.rs | 12 - wasm/wasm_source/src/vp_masp.rs | 284 ++++++++++----------- wasm/wasm_source/src/vp_testnet_faucet.rs | 7 +- wasm/wasm_source/src/vp_user.rs | 12 - wasm/wasm_source/src/vp_validator.rs | 12 - 21 files changed, 591 insertions(+), 284 deletions(-) diff --git a/core/src/ledger/ibc/context/storage.rs b/core/src/ledger/ibc/context/storage.rs index 2d1c8afdb3..ef8b61116d 100644 --- a/core/src/ledger/ibc/context/storage.rs +++ b/core/src/ledger/ibc/context/storage.rs @@ -7,7 +7,7 @@ pub use ics23::ProofSpec; use super::super::Error; use crate::ledger::storage_api; use crate::types::address::Address; -use crate::types::ibc::IbcEvent; +use crate::types::ibc::{IbcEvent, IbcShieldedTransfer}; use crate::types::storage::{BlockHeight, Header, Key}; use crate::types::token::DenominatedAmount; @@ -70,6 +70,12 @@ pub trait IbcStorageContext { amount: DenominatedAmount, ) -> Result<(), Self::Error>; + /// Handle masp tx + fn handle_masp_tx( + &mut self, + shielded: &IbcShieldedTransfer, + ) -> Result<(), Self::Error>; + /// Mint token fn mint_token( &mut self, diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index 40382282cd..05dec84b99 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -29,7 +29,8 @@ use crate::ibc_proto::google::protobuf::Any; use crate::types::address::Address; use crate::types::chain::ChainId; use crate::types::ibc::{ - is_ibc_denom, EVENT_TYPE_DENOM_TRACE, EVENT_TYPE_PACKET, + get_shielded_transfer, is_ibc_denom, EVENT_TYPE_DENOM_TRACE, + EVENT_TYPE_PACKET, }; #[allow(missing_docs)] @@ -53,6 +54,8 @@ pub enum Error { Denom(String), #[error("Invalid chain ID: {0}")] ChainId(ChainId), + #[error("Handling MASP transaction error: {0}")] + MaspTx(String), } /// IBC actions to handle IBC operations @@ -128,15 +131,17 @@ where let envelope = MsgEnvelope::try_from(any_msg).map_err(Error::Execution)?; execute(self, envelope.clone()).map_err(Error::Execution)?; + // For receiving the token to a shielded address + self.handle_masp_tx(&envelope)?; // the current ibc-rs execution doesn't store the denom for the // token hash when transfer with MsgRecvPacket - self.store_denom(envelope) + self.store_denom(&envelope) } } } /// Store the denom when transfer with MsgRecvPacket - fn store_denom(&mut self, envelope: MsgEnvelope) -> Result<(), Error> { + fn store_denom(&mut self, envelope: &MsgEnvelope) -> Result<(), Error> { match envelope { MsgEnvelope::Packet(PacketMsg::Recv(_)) => { if let Some((trace_hash, ibc_denom, receiver)) = @@ -248,6 +253,38 @@ where } } } + + /// Handle the MASP transaction if needed + fn handle_masp_tx(&mut self, envelope: &MsgEnvelope) -> Result<(), Error> { + let shielded_transfer = match envelope { + MsgEnvelope::Packet(PacketMsg::Recv(_)) => { + let event = self + .ctx + .borrow() + .get_ibc_event("fungible_token_packet") + .map_err(|_| { + Error::MaspTx( + "Reading the IBC event failed".to_string(), + ) + })?; + match event { + Some(event) => get_shielded_transfer(&event) + .map_err(|e| Error::MaspTx(e.to_string()))?, + None => return Ok(()), + } + } + _ => return Ok(()), + }; + if let Some(shielded_transfer) = shielded_transfer { + self.ctx + .borrow_mut() + .handle_masp_tx(&shielded_transfer) + .map_err(|_| { + Error::MaspTx("Writing MASP components failed".to_string()) + })?; + } + Ok(()) + } } #[derive(Debug, Default)] diff --git a/core/src/ledger/vp_env.rs b/core/src/ledger/vp_env.rs index 969452b241..c62cf2982d 100644 --- a/core/src/ledger/vp_env.rs +++ b/core/src/ledger/vp_env.rs @@ -2,14 +2,17 @@ //! inside validity predicates. use borsh::BorshDeserialize; +use masp_primitives::transaction::Transaction; -use super::storage_api::{self, StorageRead}; +use super::storage_api::{self, OptionExt, ResultExt, StorageRead}; use crate::proto::Tx; use crate::types::address::Address; use crate::types::hash::Hash; +use crate::types::ibc::{get_shielded_transfer, IbcEvent}; use crate::types::storage::{ BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, }; +use crate::types::token::Transfer; /// Validity predicate's environment is available for native VPs and WASM VPs pub trait VpEnv<'view> @@ -75,6 +78,12 @@ where /// Get the address of the native token. fn get_native_token(&self) -> Result; + /// Get the IBC event. + fn get_ibc_event( + &self, + event_type: String, + ) -> Result, storage_api::Error>; + /// Storage prefix iterator, ordered by storage keys. It will try to get an /// iterator from the storage. fn iter_prefix<'iter>( @@ -97,6 +106,45 @@ where /// Get a tx hash fn get_tx_code_hash(&self) -> Result, storage_api::Error>; + /// Get the shielded action including the transfer and the masp tx + fn get_shielded_action( + &self, + tx_data: Tx, + ) -> Result<(Transfer, Transaction), storage_api::Error> { + let signed = tx_data; + match Transfer::try_from_slice(&signed.data().unwrap()[..]) { + Ok(transfer) => { + let shielded_hash = transfer + .shielded + .ok_or_err_msg("unable to find shielded hash")?; + let masp_tx = signed + .get_section(&shielded_hash) + .and_then(|x| x.as_ref().masp_tx()) + .ok_or_err_msg("unable to find shielded section")?; + Ok((transfer, masp_tx)) + } + Err(_) => { + if let Some(event) = + self.get_ibc_event("fungible_token_packet".to_string())? + { + if let Some(shielded) = + get_shielded_transfer(&event).into_storage_result()? + { + Ok((shielded.transfer, shielded.masp_tx)) + } else { + Err(storage_api::Error::new_const( + "No shielded transfer in the IBC event", + )) + } + } else { + Err(storage_api::Error::new_const( + "No IBC event for the shielded action", + )) + } + } + } + } + /// Verify a MASP transaction fn verify_masp(&self, tx: Vec) -> Result; diff --git a/core/src/types/address.rs b/core/src/types/address.rs index 416b3f059e..1a18118e83 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -445,12 +445,18 @@ impl FromStr for Address { } } -/// for IBC signer +/// Convert the IBC signer to an address for the IBC transfer impl TryFrom for Address { type Error = DecodeError; fn try_from(signer: Signer) -> Result { - Address::decode(signer.as_ref()) + // When IBC transfer, the address in this signer should be an address or + // a payment address. If it's a payment address, this returns + // the masp address. + Address::decode(signer.as_ref()).or( + crate::types::masp::PaymentAddress::from_str(signer.as_ref()) + .and(Ok(masp())), + ) } } diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs index e7cb5f745f..3e7ae3c4fc 100644 --- a/core/src/types/ibc.rs +++ b/core/src/types/ibc.rs @@ -1,4 +1,4 @@ -//! IBC event without IBC-related data types +//! IBC-related data types use std::cmp::Ordering; use std::collections::HashMap; @@ -50,25 +50,41 @@ impl std::fmt::Display for IbcEvent { } } +/// IBC shielded transfer +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct IbcShieldedTransfer { + /// The IBC event type + pub transfer: crate::types::token::Transfer, + /// The attributes of the IBC event + pub masp_tx: masp_primitives::transaction::Transaction, +} + #[cfg(any(feature = "abciplus", feature = "abcipp"))] mod ibc_rs_conversion { use std::collections::HashMap; use std::str::FromStr; + use borsh::{BorshDeserialize, BorshSerialize}; + use data_encoding::HEXLOWER; use thiserror::Error; - use super::IbcEvent; - use crate::ibc::applications::transfer::{PrefixedDenom, TracePath}; + use super::{IbcEvent, IbcShieldedTransfer}; + use crate::ibc::applications::transfer::{Memo, PrefixedDenom, TracePath}; use crate::ibc::core::events::{ Error as IbcEventError, IbcEvent as RawIbcEvent, }; use crate::tendermint_proto::abci::Event as AbciEvent; + use crate::types::masp::PaymentAddress; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { #[error("IBC event error: {0}")] IbcEvent(IbcEventError), + #[error("IBC transfer memo HEX decoding error: {0}")] + DecodingHex(data_encoding::DecodeError), + #[error("IBC transfer memo decoding error: {0}")] + DecodingShieldedTransfer(std::io::Error), } /// Conversion functions result @@ -105,6 +121,53 @@ mod ibc_rs_conversion { prefixed_denom.base_denom.to_string(), )) } + + impl From for Memo { + fn from(shielded: IbcShieldedTransfer) -> Self { + let bytes = + shielded.try_to_vec().expect("Encoding shouldn't failed"); + HEXLOWER.encode(&bytes).into() + } + } + + impl TryFrom for IbcShieldedTransfer { + type Error = Error; + + fn try_from(memo: Memo) -> Result { + let bytes = HEXLOWER + .decode(memo.as_ref().as_bytes()) + .map_err(Error::DecodingHex)?; + Self::try_from_slice(&bytes) + .map_err(Error::DecodingShieldedTransfer) + } + } + + /// Get the shielded transfer from the memo + pub fn get_shielded_transfer( + event: &IbcEvent, + ) -> Result> { + if event.event_type != "fungible_token_packet" { + // This event is not for receiving a token + return Ok(None); + } + let is_success = + event.attributes.get("success") == Some(&"true".to_string()); + let receiver = event.attributes.get("receiver"); + let is_shielded = if let Some(receiver) = receiver { + PaymentAddress::from_str(&receiver).is_ok() + } else { + false + }; + if !is_success || !is_shielded { + return Ok(None); + } + + event + .attributes + .get("memo") + .map(|memo| IbcShieldedTransfer::try_from(Memo::from(memo.clone()))) + .transpose() + } } #[cfg(any(feature = "abciplus", feature = "abcipp"))] diff --git a/shared/src/ledger/native_vp/ibc/context.rs b/shared/src/ledger/native_vp/ibc/context.rs index 5af2afebf7..93784b8054 100644 --- a/shared/src/ledger/native_vp/ibc/context.rs +++ b/shared/src/ledger/native_vp/ibc/context.rs @@ -3,15 +3,21 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use borsh::BorshSerialize; +use masp_primitives::transaction::Transaction; use namada_core::ledger::ibc::storage::is_ibc_key; use namada_core::ledger::ibc::{IbcCommonContext, IbcStorageContext}; use namada_core::ledger::storage::write_log::StorageModification; use namada_core::ledger::storage::{self as ledger_storage, StorageHasher}; use namada_core::ledger::storage_api::StorageRead; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::ibc::IbcEvent; -use namada_core::types::storage::{BlockHeight, Header, Key}; -use namada_core::types::token::{self, Amount, DenominatedAmount}; +use namada_core::types::address::{self, Address, InternalAddress}; +use namada_core::types::ibc::{IbcEvent, IbcShieldedTransfer}; +use namada_core::types::storage::{ + BlockHeight, Epoch, Header, Key, KeySeg, TxIndex, +}; +use namada_core::types::token::{ + self, Amount, DenominatedAmount, Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, + TX_KEY_PREFIX, +}; use super::Error; use crate::ledger::native_vp::CtxPreStorageRead; @@ -160,6 +166,54 @@ where ) } + fn handle_masp_tx( + &mut self, + shielded: &IbcShieldedTransfer, + ) -> Result<(), Self::Error> { + let masp_addr = address::masp(); + let head_tx_key = Key::from(masp_addr.to_db_key()) + .push(&HEAD_TX_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let current_tx_idx: u64 = + self.ctx.read(&head_tx_key).unwrap_or(None).unwrap_or(0); + let current_tx_key = Key::from(masp_addr.to_db_key()) + .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) + .expect("Cannot obtain a storage key"); + // Save the Transfer object and its location within the blockchain + // so that clients do not have to separately look these + // up + let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( + self.ctx.get_block_epoch().map_err(Error::NativeVpError)?, + self.ctx.get_block_height().map_err(Error::NativeVpError)?, + self.ctx.get_tx_index().map_err(Error::NativeVpError)?, + shielded.transfer.clone(), + shielded.masp_tx.clone(), + ); + self.write( + ¤t_tx_key, + record.try_to_vec().expect("encoding shouldn't failed"), + )?; + self.write( + &head_tx_key, + (current_tx_idx + 1) + .try_to_vec() + .expect("encoding shouldn't failed"), + )?; + // If storage key has been supplied, then pin this transaction to it + if let Some(key) = &shielded.transfer.key { + let pin_key = Key::from(masp_addr.to_db_key()) + .push(&(PIN_KEY_PREFIX.to_owned() + key)) + .expect("Cannot obtain a storage key"); + self.write( + &pin_key, + current_tx_idx + .try_to_vec() + .expect("encoding shouldn't fail"), + )?; + } + Ok(()) + } + fn mint_token( &mut self, target: &Address, @@ -344,6 +398,13 @@ where unimplemented!("Validation doesn't transfer") } + fn handle_masp_tx( + &mut self, + _shielded: &IbcShieldedTransfer, + ) -> Result<(), Self::Error> { + unimplemented!("Validation doesn't handle a masp tx") + } + fn mint_token( &mut self, _target: &Address, diff --git a/shared/src/ledger/native_vp/mod.rs b/shared/src/ledger/native_vp/mod.rs index 31148a1568..002c256eb1 100644 --- a/shared/src/ledger/native_vp/mod.rs +++ b/shared/src/ledger/native_vp/mod.rs @@ -23,6 +23,7 @@ use crate::ledger::storage::{Storage, StorageHasher}; use crate::proto::Tx; use crate::types::address::Address; use crate::types::hash::Hash; +use crate::types::ibc::IbcEvent; use crate::types::storage::{ BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, }; @@ -449,6 +450,18 @@ where .into_storage_result() } + fn get_ibc_event( + &self, + event_type: String, + ) -> Result, storage_api::Error> { + vp_host_fns::get_ibc_event( + &mut self.gas_meter.borrow_mut(), + self.write_log, + event_type, + ) + .into_storage_result() + } + fn iter_prefix<'iter>( &'iter self, prefix: &Key, diff --git a/shared/src/ledger/vp_host_fns.rs b/shared/src/ledger/vp_host_fns.rs index a9aaa7eb16..c8e1bd12d7 100644 --- a/shared/src/ledger/vp_host_fns.rs +++ b/shared/src/ledger/vp_host_fns.rs @@ -15,6 +15,7 @@ use crate::ledger::gas::{GasMetering, VpGasMeter}; use crate::ledger::storage::write_log::WriteLog; use crate::ledger::storage::{self, write_log, Storage, StorageHasher}; use crate::proto::{Section, Tx}; +use crate::types::ibc::IbcEvent; /// These runtime errors will abort VP execution immediately #[allow(missing_docs)] @@ -333,6 +334,20 @@ where Ok(storage.native_token.clone()) } +/// Getting the IBC event. +pub fn get_ibc_event( + _gas_meter: &mut VpGasMeter, + write_log: &WriteLog, + event_type: String, +) -> EnvResult> { + for event in write_log.get_ibc_events() { + if event.event_type == event_type { + return Ok(Some(event.clone())); + } + } + Ok(None) +} + /// Storage prefix iterator for prior state (before tx execution), ordered by /// storage keys. It will try to get an iterator from the storage. pub fn iter_prefix_pre<'a, DB, H>( diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index 7806d1abef..fd4129501d 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -5,6 +5,7 @@ use std::convert::TryInto; use std::num::TryFromIntError; use borsh::{BorshDeserialize, BorshSerialize}; +use masp_primitives::transaction::Transaction; use namada_core::ledger::gas::{GasMetering, TxGasMeter}; use namada_core::types::internal::KeyVal; use thiserror::Error; @@ -21,11 +22,12 @@ use crate::ledger::vp_host_fns; use crate::proto::Tx; use crate::types::address::{self, Address}; use crate::types::hash::Hash; -use crate::types::ibc::IbcEvent; +use crate::types::ibc::{IbcEvent, IbcShieldedTransfer}; use crate::types::internal::HostEnvResult; -use crate::types::storage::{BlockHeight, Key, TxIndex}; +use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; use crate::types::token::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, + Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, }; use crate::vm::memory::VmMemory; use crate::vm::prefix_iter::{PrefixIteratorId, PrefixIterators}; @@ -1778,6 +1780,44 @@ where Ok(epoch.0) } +/// Getting the IBC event function exposed to the wasm VM VP environment. +pub fn vp_get_ibc_event( + env: &VpVmEnv, + event_type_ptr: u64, + event_type_len: u64, +) -> vp_host_fns::EnvResult +where + MEM: VmMemory, + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + EVAL: VpEvaluator, + CA: WasmCacheAccess, +{ + let (event_type, gas) = env + .memory + .read_string(event_type_ptr, event_type_len as _) + .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + let gas_meter = unsafe { env.ctx.gas_meter.get() }; + vp_host_fns::add_gas(gas_meter, gas)?; + + let write_log = unsafe { env.ctx.write_log.get() }; + match vp_host_fns::get_ibc_event(gas_meter, write_log, event_type)? { + Some(event) => { + let value = event + .try_to_vec() + .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let len: i64 = value + .len() + .try_into() + .map_err(vp_host_fns::RuntimeError::NumConversionError)?; + let result_buffer = unsafe { env.ctx.result_buffer.get() }; + result_buffer.replace(value); + Ok(len) + } + None => Ok(HostEnvResult::Fail.to_i64()), + } +} + /// Verify a transaction signature /// TODO: this is just a warkaround to track gas for multiple singature /// verifications. When the runtime gas meter is implemented, this funcion can @@ -1868,8 +1908,6 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - use masp_primitives::transaction::Transaction; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; let (tx_bytes, gas) = env .memory @@ -2247,6 +2285,42 @@ where Ok(()) } + fn handle_masp_tx( + &mut self, + shielded: &IbcShieldedTransfer, + ) -> Result<(), Self::Error> { + let masp_addr = address::masp(); + let head_tx_key = Key::from(masp_addr.to_db_key()) + .push(&HEAD_TX_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let current_tx_idx: u64 = ibc_read_borsh(self, &head_tx_key) + .unwrap_or(None) + .unwrap_or(0); + let current_tx_key = Key::from(masp_addr.to_db_key()) + .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) + .expect("Cannot obtain a storage key"); + // Save the Transfer object and its location within the blockchain + // so that clients do not have to separately look these + // up + let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( + ibc_get_block_epoch(self)?, + self.get_height()?, + ibc_get_tx_index(self)?, + shielded.transfer.clone(), + shielded.masp_tx.clone(), + ); + ibc_write_borsh(self, ¤t_tx_key, &record)?; + ibc_write_borsh(self, &head_tx_key, &(current_tx_idx + 1))?; + // If storage key has been supplied, then pin this transaction to it + if let Some(key) = &shielded.transfer.key { + let pin_key = Key::from(masp_addr.to_db_key()) + .push(&(PIN_KEY_PREFIX.to_owned() + key)) + .expect("Cannot obtain a storage key"); + ibc_write_borsh(self, &pin_key, ¤t_tx_idx)?; + } + Ok(()) + } + fn mint_token( &mut self, target: &Address, @@ -2396,6 +2470,40 @@ where Ok(()) } +/// Get the current epoch. +// Temp helper for ibc tx workaround. +fn ibc_get_block_epoch<'a, DB, H, CA>( + ctx: &TxCtx<'a, DB, H, CA>, +) -> TxResult +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + let storage = unsafe { ctx.storage.get() }; + let (epoch, gas) = storage.get_current_epoch(); + ibc_tx_charge_gas(ctx, gas)?; + Ok(epoch) +} + +/// Get the tx index. +// Temp helper for ibc tx workaround. +fn ibc_get_tx_index<'a, DB, H, CA>( + ctx: &TxCtx<'a, DB, H, CA>, +) -> TxResult +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + let tx_index = unsafe { ctx.tx_index.get() }; + ibc_tx_charge_gas( + ctx, + crate::vm::host_env::gas::STORAGE_ACCESS_GAS_PER_BYTE, + )?; + Ok(TxIndex(tx_index.0)) +} + // Temp. workaround for impl<'a, DB, H, CA> namada_core::ledger::ibc::IbcCommonContext for TxCtx<'a, DB, H, CA> diff --git a/shared/src/vm/wasm/host_env.rs b/shared/src/vm/wasm/host_env.rs index 57a8bc6986..e5ebce6ee2 100644 --- a/shared/src/vm/wasm/host_env.rs +++ b/shared/src/vm/wasm/host_env.rs @@ -127,6 +127,7 @@ where "namada_vp_get_block_hash" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_hash), "namada_vp_get_tx_code_hash" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_tx_code_hash), "namada_vp_get_block_epoch" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_epoch), + "namada_vp_get_ibc_event" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_ibc_event), "namada_vp_verify_tx_section_signature" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_verify_tx_section_signature), "namada_vp_verify_masp" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_verify_masp), "namada_vp_eval" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_eval), diff --git a/tx_prelude/src/ibc.rs b/tx_prelude/src/ibc.rs index f7ee8230b1..5e8e9522fa 100644 --- a/tx_prelude/src/ibc.rs +++ b/tx_prelude/src/ibc.rs @@ -10,11 +10,11 @@ pub use namada_core::ledger::ibc::{ use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::ledger::tx_env::TxEnv; use namada_core::types::address::{Address, InternalAddress}; -pub use namada_core::types::ibc::IbcEvent; +pub use namada_core::types::ibc::{IbcEvent, IbcShieldedTransfer}; use namada_core::types::storage::{BlockHeight, Header, Key}; use namada_core::types::token::DenominatedAmount; -use crate::token::{burn, mint, transfer}; +use crate::token::{burn, handle_masp_tx, mint, transfer}; use crate::{Ctx, KeyValIterator}; /// IBC actions to handle an IBC message @@ -89,7 +89,14 @@ impl IbcStorageContext for Ctx { token: &Address, amount: DenominatedAmount, ) -> std::result::Result<(), Self::Error> { - transfer(self, src, dest, token, amount, &None, &None, &None) + transfer(self, src, dest, token, amount) + } + + fn handle_masp_tx( + &mut self, + shielded: &IbcShieldedTransfer, + ) -> Result<(), Self::Error> { + handle_masp_tx(self, &shielded.transfer, &shielded.masp_tx) } fn mint_token( diff --git a/tx_prelude/src/token.rs b/tx_prelude/src/token.rs index e950668d2b..6067c82a46 100644 --- a/tx_prelude/src/token.rs +++ b/tx_prelude/src/token.rs @@ -1,6 +1,5 @@ use masp_primitives::transaction::Transaction; use namada_core::types::address::Address; -use namada_core::types::hash::Hash; use namada_core::types::storage::KeySeg; use namada_core::types::token; pub use namada_core::types::token::*; @@ -15,9 +14,6 @@ pub fn transfer( dest: &Address, token: &Address, amount: DenominatedAmount, - key: &Option, - shielded_hash: &Option, - shielded: &Option, ) -> TxResult { if amount.amount != Amount::default() && src != dest { let src_key = token::balance_key(token, src); @@ -33,47 +29,43 @@ pub fn transfer( ctx.write(&src_key, src_bal)?; ctx.write(&dest_key, dest_bal)?; } + Ok(()) +} - // If this transaction has a shielded component, then handle it - // separately - if let Some(shielded) = shielded { - let masp_addr = address::masp(); - ctx.insert_verifier(&masp_addr)?; - let head_tx_key = storage::Key::from(masp_addr.to_db_key()) - .push(&HEAD_TX_KEY.to_owned()) - .expect("Cannot obtain a storage key"); - let current_tx_idx: u64 = - ctx.read(&head_tx_key).unwrap_or(None).unwrap_or(0); - let current_tx_key = storage::Key::from(masp_addr.to_db_key()) - .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) +/// Handle a MASP transaction. +pub fn handle_masp_tx( + ctx: &mut Ctx, + transfer: &Transfer, + shielded: &Transaction, +) -> TxResult { + let masp_addr = address::masp(); + ctx.insert_verifier(&masp_addr)?; + let head_tx_key = storage::Key::from(masp_addr.to_db_key()) + .push(&HEAD_TX_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let current_tx_idx: u64 = + ctx.read(&head_tx_key).unwrap_or(None).unwrap_or(0); + let current_tx_key = storage::Key::from(masp_addr.to_db_key()) + .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) + .expect("Cannot obtain a storage key"); + // Save the Transfer object and its location within the blockchain + // so that clients do not have to separately look these + // up + let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( + ctx.get_block_epoch()?, + ctx.get_block_height()?, + ctx.get_tx_index()?, + transfer.clone(), + shielded.clone(), + ); + ctx.write(¤t_tx_key, record)?; + ctx.write(&head_tx_key, current_tx_idx + 1)?; + // If storage key has been supplied, then pin this transaction to it + if let Some(key) = &transfer.key { + let pin_key = storage::Key::from(masp_addr.to_db_key()) + .push(&(PIN_KEY_PREFIX.to_owned() + key)) .expect("Cannot obtain a storage key"); - // Save the Transfer object and its location within the blockchain - // so that clients do not have to separately look these - // up - let transfer = Transfer { - source: src.clone(), - target: dest.clone(), - token: token.clone(), - amount, - key: key.clone(), - shielded: *shielded_hash, - }; - let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( - ctx.get_block_epoch()?, - ctx.get_block_height()?, - ctx.get_tx_index()?, - transfer, - shielded.clone(), - ); - ctx.write(¤t_tx_key, record)?; - ctx.write(&head_tx_key, current_tx_idx + 1)?; - // If storage key has been supplied, then pin this transaction to it - if let Some(key) = key { - let pin_key = storage::Key::from(masp_addr.to_db_key()) - .push(&(PIN_KEY_PREFIX.to_owned() + key)) - .expect("Cannot obtain a storage key"); - ctx.write(&pin_key, current_tx_idx)?; - } + ctx.write(&pin_key, current_tx_idx)?; } Ok(()) } diff --git a/vm_env/src/lib.rs b/vm_env/src/lib.rs index 6a630f9349..edf00f847c 100644 --- a/vm_env/src/lib.rs +++ b/vm_env/src/lib.rs @@ -197,6 +197,12 @@ pub mod vp { // Get the native token address pub fn namada_vp_get_native_token(result_ptr: u64); + // Get the IBC event + pub fn namada_vp_get_ibc_event( + event_type_ptr: u64, + event_type_len: u64, + ) -> i64; + // Requires a node running with "Info" log level pub fn namada_vp_log_string(str_ptr: u64, str_len: u64); diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index 0962628363..9857574acd 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -297,6 +297,20 @@ impl<'view> VpEnv<'view> for Ctx { get_native_token() } + fn get_ibc_event( + &self, + event_type: String, + ) -> Result, Error> { + let read_result = unsafe { + namada_vp_get_ibc_event( + event_type.as_ptr() as _, + event_type.len() as _, + ) + }; + Ok(read_from_buffer(read_result, namada_vp_result_buffer) + .and_then(|t| ibc::IbcEvent::try_from_slice(&t[..]).ok())) + } + fn iter_prefix<'iter>( &'iter self, prefix: &storage::Key, diff --git a/wasm/wasm_source/src/tx_bridge_pool.rs b/wasm/wasm_source/src/tx_bridge_pool.rs index 88d757998f..d93b983943 100644 --- a/wasm/wasm_source/src/tx_bridge_pool.rs +++ b/wasm/wasm_source/src/tx_bridge_pool.rs @@ -23,9 +23,6 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { &bridge_pool::BRIDGE_POOL_ADDRESS, fee_token_addr, amount.native_denominated(), - &None, - &None, - &None, )?; log_string("Token transfer succeeded."); let TransferToEthereum { @@ -43,9 +40,6 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { ð_bridge::ADDRESS, &nam_addr, amount.native_denominated(), - &None, - &None, - &None, )?; } else { // Otherwise we escrow ERC20 tokens. @@ -56,9 +50,6 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { &bridge_pool::BRIDGE_POOL_ADDRESS, &token, amount.native_denominated(), - &None, - &None, - &None, )?; } log_string("Escrow succeeded"); diff --git a/wasm/wasm_source/src/tx_transfer.rs b/wasm/wasm_source/src/tx_transfer.rs index bdc683c339..f36f52c74d 100644 --- a/wasm/wasm_source/src/tx_transfer.rs +++ b/wasm/wasm_source/src/tx_transfer.rs @@ -11,15 +11,17 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let transfer = token::Transfer::try_from_slice(&data[..]) .wrap_err("failed to decode token::Transfer")?; debug_log!("apply_tx called with transfer: {:#?}", transfer); - let token::Transfer { - source, - target, - token, - amount, - key, - shielded: shielded_hash, - } = transfer; - let shielded = shielded_hash + + token::transfer( + ctx, + &transfer.source, + &transfer.target, + &transfer.token, + transfer.amount, + )?; + + let shielded = transfer + .shielded .as_ref() .map(|hash| { signed @@ -28,14 +30,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { .ok_or_err_msg("unable to find shielded section") }) .transpose()?; - token::transfer( - ctx, - &source, - &target, - &token, - amount, - &key, - &shielded_hash, - &shielded, - ) + if let Some(shielded) = shielded { + token::handle_masp_tx(ctx, &transfer, &shielded)?; + } + Ok(()) } diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/wasm_source/src/vp_implicit.rs index 215dccf421..68db2d8408 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/wasm_source/src/vp_implicit.rs @@ -362,9 +362,6 @@ mod tests { address, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -595,9 +592,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -658,9 +652,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -734,9 +725,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); diff --git a/wasm/wasm_source/src/vp_masp.rs b/wasm/wasm_source/src/vp_masp.rs index cb66211118..794ddec2ee 100644 --- a/wasm/wasm_source/src/vp_masp.rs +++ b/wasm/wasm_source/src/vp_masp.rs @@ -90,185 +90,167 @@ fn validate_tx( verifiers, ); - let signed = tx_data; - let transfer = - token::Transfer::try_from_slice(&signed.data().unwrap()[..]).unwrap(); + let (transfer, shielded_tx) = ctx.get_shielded_action(tx_data)?; + let mut transparent_tx_pool = I128Sum::zero(); + // The Sapling value balance adds to the transparent tx pool + transparent_tx_pool += shielded_tx.sapling_value_balance(); - let shielded = transfer - .shielded - .as_ref() - .map(|hash| { - signed - .get_section(hash) - .and_then(|x| x.as_ref().masp_tx()) - .ok_or_err_msg("unable to find shielded section") - }) - .transpose()?; - if let Some(shielded_tx) = shielded { - let mut transparent_tx_pool = I128Sum::zero(); - // The Sapling value balance adds to the transparent tx pool - transparent_tx_pool += shielded_tx.sapling_value_balance(); + if transfer.source != masp() { + // Handle transparent input + // Note that the asset type is timestamped so shields + // where the shielded value has an incorrect timestamp + // are automatically rejected + for denom in token::MaspDenom::iter() { + let (_transp_asset, transp_amt) = convert_amount( + ctx.get_block_epoch().unwrap(), + &transfer.token, + transfer.amount.into(), + denom, + ); - if transfer.source != masp() { - // Handle transparent input - // Note that the asset type is timestamped so shields - // where the shielded value has an incorrect timestamp - // are automatically rejected - for denom in token::MaspDenom::iter() { - let (_transp_asset, transp_amt) = convert_amount( - ctx.get_block_epoch().unwrap(), - &transfer.token, - transfer.amount.into(), - denom, - ); - - // Non-masp sources add to transparent tx pool - transparent_tx_pool += transp_amt; - } - } else { - // Handle shielded input - // The following boundary conditions must be satisfied - // 1. Zero transparent input - // 2. the transparent transaction value pool's amount must equal the - // containing wrapper transaction's fee amount - // Satisfies 1. - if let Some(transp_bundle) = shielded_tx.transparent_bundle() { - if !transp_bundle.vin.is_empty() { - debug_log!( - "Transparent input to a transaction from the masp \ - must be 0 but is {}", - transp_bundle.vin.len() - ); - return reject(); - } - } + // Non-masp sources add to transparent tx pool + transparent_tx_pool += transp_amt; } - - if transfer.target != masp() { - // Handle transparent output - // The following boundary conditions must be satisfied - // 1. One to 4 transparent outputs - // 2. Asset type must be properly derived - // 3. Value from the output must be the same as the containing - // transfer - // 4. Public key must be the hash of the target - - // Satisfies 1. - let transp_bundle = - shielded_tx.transparent_bundle().ok_or_err_msg( - "Expected transparent outputs in unshielding transaction", - )?; - - let out_length = transp_bundle.vout.len(); - if !(1..=4).contains(&out_length) { + } else { + // Handle shielded input + // The following boundary conditions must be satisfied + // 1. Zero transparent input + // 2. the transparent transaction value pool's amount must equal the + // containing wrapper transaction's fee amount + // Satisfies 1. + if let Some(transp_bundle) = shielded_tx.transparent_bundle() { + if !transp_bundle.vin.is_empty() { debug_log!( - "Transparent output to a transaction to the masp must be \ - beteween 1 and 4 but is {}", - transp_bundle.vout.len() + "Transparent input to a transaction from the masp must be \ + 0 but is {}", + transp_bundle.vin.len() ); - return reject(); } - let mut outs = transp_bundle.vout.iter(); - let mut valid_count = 0; - for denom in token::MaspDenom::iter() { - let out = match outs.next() { - Some(out) => out, - None => continue, - }; + } + } + + if transfer.target != masp() { + // Handle transparent output + // The following boundary conditions must be satisfied + // 1. One to 4 transparent outputs + // 2. Asset type must be properly derived + // 3. Value from the output must be the same as the containing + // transfer + // 4. Public key must be the hash of the target - let expected_asset_type: AssetType = - asset_type_from_epoched_address( - ctx.get_block_epoch().unwrap(), - &transfer.token, - denom, - ); + // Satisfies 1. + let transp_bundle = shielded_tx.transparent_bundle().ok_or_err_msg( + "Expected transparent outputs in unshielding transaction", + )?; - // Satisfies 2. and 3. - if !valid_asset_type(&expected_asset_type, &out.asset_type) { - // we don't know which masp denoms are necessary apriori. - // This is encoded via the asset types. - continue; - } - if !valid_transfer_amount( - out.value, - denom.denominate(&transfer.amount.amount), - ) { - return reject(); - } + let out_length = transp_bundle.vout.len(); + if !(1..=4).contains(&out_length) { + debug_log!( + "Transparent output to a transaction to the masp must be \ + beteween 1 and 4 but is {}", + transp_bundle.vout.len() + ); + + return reject(); + } + let mut outs = transp_bundle.vout.iter(); + let mut valid_count = 0; + for denom in token::MaspDenom::iter() { + let out = match outs.next() { + Some(out) => out, + None => continue, + }; - let (_transp_asset, transp_amt) = convert_amount( + let expected_asset_type: AssetType = + asset_type_from_epoched_address( ctx.get_block_epoch().unwrap(), &transfer.token, - transfer.amount.amount, denom, ); - // Non-masp destinations subtract from transparent tx pool - transparent_tx_pool -= transp_amt; - - // Satisfies 4. - let target_enc = transfer - .target - .try_to_vec() - .expect("target address encoding"); - - let hash = Ripemd160::digest(sha256(&target_enc).0.as_slice()); - - if <[u8; 20]>::from(hash) != out.address.0 { - debug_log!( - "the public key of the output account does not match \ - the transfer target" - ); - return reject(); - } - valid_count += 1; + // Satisfies 2. and 3. + if !valid_asset_type(&expected_asset_type, &out.asset_type) { + // we don't know which masp denoms are necessary apriori. + // This is encoded via the asset types. + continue; } - // one or more of the denoms in the batch failed to verify - // the asset derivation. - if valid_count != out_length { + if !valid_transfer_amount( + out.value, + denom.denominate(&transfer.amount.amount), + ) { return reject(); } - } else { - // Handle shielded output - // The following boundary conditions must be satisfied - // 1. Zero transparent output - // Satisfies 1. - if let Some(transp_bundle) = shielded_tx.transparent_bundle() { - if !transp_bundle.vout.is_empty() { - debug_log!( - "Transparent output to a transaction from the masp \ - must be 0 but is {}", - transp_bundle.vout.len() - ); - return reject(); - } - } - } + let (_transp_asset, transp_amt) = convert_amount( + ctx.get_block_epoch().unwrap(), + &transfer.token, + transfer.amount.amount, + denom, + ); - match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { - None | Some(Ordering::Less) => { + // Non-masp destinations subtract from transparent tx pool + transparent_tx_pool -= transp_amt; + + // Satisfies 4. + let target_enc = transfer + .target + .try_to_vec() + .expect("target address encoding"); + + let hash = Ripemd160::digest(sha256(&target_enc).0.as_slice()); + + if <[u8; 20]>::from(hash) != out.address.0 { debug_log!( - "Transparent transaction value pool must be nonnegative. \ - Violation may be caused by transaction being constructed \ - in previous epoch. Maybe try again." + "the public key of the output account does not match the \ + transfer target" ); - // Section 3.4: The remaining value in the transparent - // transaction value pool MUST be nonnegative. return reject(); } - Some(Ordering::Greater) => { + valid_count += 1; + } + // one or more of the denoms in the batch failed to verify + // the asset derivation. + if valid_count != out_length { + return reject(); + } + } else { + // Handle shielded output + // The following boundary conditions must be satisfied + // 1. Zero transparent output + + // Satisfies 1. + if let Some(transp_bundle) = shielded_tx.transparent_bundle() { + if !transp_bundle.vout.is_empty() { debug_log!( - "Transaction fees cannot be paid inside MASP transaction." + "Transparent output to a transaction from the masp must \ + be 0 but is {}", + transp_bundle.vout.len() ); return reject(); } - _ => {} } - // Do the expensive proof verification in the VM at the end. - ctx.verify_masp(shielded_tx.try_to_vec().unwrap()) - } else { - reject() } + + match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { + None | Some(Ordering::Less) => { + debug_log!( + "Transparent transaction value pool must be nonnegative. \ + Violation may be caused by transaction being constructed in \ + previous epoch. Maybe try again." + ); + // Section 3.4: The remaining value in the transparent + // transaction value pool MUST be nonnegative. + return reject(); + } + Some(Ordering::Greater) => { + debug_log!( + "Transaction fees cannot be paid inside MASP transaction." + ); + return reject(); + } + _ => {} + } + // Do the expensive proof verification in the VM at the end. + ctx.verify_masp(shielded_tx.try_to_vec().unwrap()) } diff --git a/wasm/wasm_source/src/vp_testnet_faucet.rs b/wasm/wasm_source/src/vp_testnet_faucet.rs index 7298c0b126..2f85740028 100644 --- a/wasm/wasm_source/src/vp_testnet_faucet.rs +++ b/wasm/wasm_source/src/vp_testnet_faucet.rs @@ -176,9 +176,6 @@ mod tests { address, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -331,7 +328,7 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction - tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount, &None, &None, &None).unwrap(); + tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount).unwrap(); }); let vp_env = vp_host_env::take(); @@ -390,7 +387,7 @@ mod tests { let valid = solution.validate(tx::ctx(), address, target.clone()).unwrap(); assert!(valid); // Apply transfer in a transaction - tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount, &None, &None, &None).unwrap(); + tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount).unwrap(); }); let mut vp_env = vp_host_env::take(); diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index a334576b53..25b17a14b4 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -259,9 +259,6 @@ mod tests { address, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -317,9 +314,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -379,9 +373,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -612,9 +603,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); diff --git a/wasm/wasm_source/src/vp_validator.rs b/wasm/wasm_source/src/vp_validator.rs index f929a8a0d1..9c32d81717 100644 --- a/wasm/wasm_source/src/vp_validator.rs +++ b/wasm/wasm_source/src/vp_validator.rs @@ -266,9 +266,6 @@ mod tests { address, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -324,9 +321,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -386,9 +380,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -629,9 +620,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); From f35b779fe2a0907883f2b224d0f3a41325e1ecfd Mon Sep 17 00:00:00 2001 From: yito88 Date: Wed, 20 Sep 2023 16:01:30 +0200 Subject: [PATCH 041/161] add gen_ibc_shielded_transfer --- apps/src/lib/cli.rs | 97 +++++++++++++++++++++ apps/src/lib/cli/client.rs | 18 ++++ apps/src/lib/client/tx.rs | 33 ++++++++ benches/lib.rs | 53 +++--------- core/src/ledger/ibc/mod.rs | 43 +++++++++- core/src/ledger/storage_api/account.rs | 1 + core/src/ledger/storage_api/token.rs | 13 +-- core/src/types/address.rs | 31 +++++-- shared/src/ledger/protocol/mod.rs | 1 + shared/src/sdk/args.rs | 23 +++++ shared/src/sdk/masp.rs | 40 +++++---- shared/src/sdk/rpc.rs | 1 + shared/src/sdk/signing.rs | 47 ++++++----- shared/src/sdk/tx.rs | 111 ++++++++++++++++++++++++- 14 files changed, 410 insertions(+), 102 deletions(-) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 135ff1e3c5..9981cb06e9 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -255,6 +255,7 @@ pub mod cmds { .subcommand(QueryValidatorState::def().display_order(5)) // Actions .subcommand(SignTx::def().display_order(6)) + .subcommand(GenIbcShieldedTransafer::def().display_order(6)) // Utils .subcommand(Utils::def().display_order(7)) } @@ -313,6 +314,8 @@ pub mod cmds { let add_to_eth_bridge_pool = Self::parse_with_ctx(matches, AddToEthBridgePool); let sign_tx = Self::parse_with_ctx(matches, SignTx); + let gen_ibc_shielded = + Self::parse_with_ctx(matches, GenIbcShieldedTransafer); let utils = SubCmd::parse(matches).map(Self::WithoutContext); tx_custom .or(tx_transfer) @@ -350,6 +353,7 @@ pub mod cmds { .or(query_validator_state) .or(query_account) .or(sign_tx) + .or(gen_ibc_shielded) .or(utils) } } @@ -424,6 +428,7 @@ pub mod cmds { QueryPgf(QueryPgf), QueryValidatorState(QueryValidatorState), SignTx(SignTx), + GenIbcShieldedTransafer(GenIbcShieldedTransafer), } #[allow(clippy::large_enum_variant)] @@ -1874,6 +1879,29 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct GenIbcShieldedTransafer( + pub args::GenIbcShieldedTransafer, + ); + + impl SubCmd for GenIbcShieldedTransafer { + const CMD: &'static str = "ibc-gen-shielded"; + + fn parse(matches: &ArgMatches) -> Option { + matches.subcommand_matches(Self::CMD).map(|matches| { + GenIbcShieldedTransafer(args::GenIbcShieldedTransafer::parse( + matches, + )) + }) + } + + fn def() -> App { + App::new(Self::CMD) + .about("Generate shielded transfer for IBC.") + .add_args::>() + } + } + #[derive(Clone, Debug)] pub struct EpochSleep(pub args::Query); @@ -2713,6 +2741,7 @@ pub mod args { pub const SAFE_MODE: ArgFlag = flag("safe-mode"); pub const SCHEME: ArgDefault = arg_default("scheme", DefaultFn(|| SchemeType::Ed25519)); + pub const SENDER: Arg = arg("sender"); pub const SIGNING_KEYS: ArgMulti = arg_multi("signing-keys"); pub const SIGNATURES: ArgMulti = arg_multi("signatures"); pub const SOURCE: Arg = arg("source"); @@ -4731,6 +4760,74 @@ pub mod args { } } + impl CliToSdk> + for GenIbcShieldedTransafer + { + fn to_sdk( + self, + ctx: &mut Context, + ) -> GenIbcShieldedTransafer { + GenIbcShieldedTransafer:: { + query: self.query.to_sdk(ctx), + output_folder: self.output_folder, + sender: self.sender, + target: ctx.get(&self.target), + token: ctx.get(&self.token), + trace_path: self.trace_path, + amount: self.amount, + port_id: self.port_id, + channel_id: self.channel_id, + } + } + } + + impl Args for GenIbcShieldedTransafer { + fn parse(matches: &ArgMatches) -> Self { + let query = Query::parse(matches); + let output_folder = OUTPUT_FOLDER_PATH.parse(matches); + let sender = SENDER.parse(matches); + let target = TRANSFER_TARGET.parse(matches); + let token = TOKEN.parse(matches); + let trace_path = TRACE_PATH.parse(matches); + let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); + let port_id = PORT_ID.parse(matches); + let channel_id = CHANNEL_ID.parse(matches); + Self { + query, + output_folder, + sender, + target, + token, + trace_path, + amount, + port_id, + channel_id, + } + } + + fn def(app: App) -> App { + app.add_args::>() + .arg(OUTPUT_FOLDER_PATH.def().help( + "The output folder path where the artifact will be stored.", + )) + .arg(SENDER.def().help("The foreign sender address.")) + .arg(TRANSFER_TARGET.def().help("The target address.")) + .arg(TOKEN.def().help("The transfer token.")) + .arg(TRACE_PATH.def().help("The IBC trace path of the token.")) + .arg(AMOUNT.def().help("The amount to transfer in decimal.")) + .arg( + PORT_ID + .def() + .help("The port ID via which the token is received."), + ) + .arg( + CHANNEL_ID.def().help( + "The channel ID via which the token is received.", + ), + ) + } + } + impl CliToSdk> for QueryCommissionRate { fn to_sdk(self, ctx: &mut Context) -> QueryCommissionRate { QueryCommissionRate:: { diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index 1a7d9f534a..4ccfb438e0 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -660,6 +660,24 @@ impl CliApi { let args = args.to_sdk(&mut ctx); tx::sign_tx::<_, IO>(&client, &mut ctx, args).await?; } + Sub::GenIbcShieldedTransafer(GenIbcShieldedTransafer( + mut args, + )) => { + let client = client.unwrap_or_else(|| { + C::from_tendermint_address( + &mut args.query.ledger_address, + ) + }); + client + .wait_until_node_is_synced::() + .await + .proceed_or_else(error)?; + let args = args.to_sdk(&mut ctx); + tx::gen_ibc_shielded_transfer::<_, IO>( + &client, &mut ctx, args, + ) + .await?; + } } } cli::NamadaClient::WithoutContext(cmd, global_args) => match cmd { diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index c8c42b190b..81ff927b7f 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -12,6 +12,7 @@ use namada::core::ledger::governance::cli::offline::{ use namada::core::ledger::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, }; +use namada::ibc::applications::transfer::Memo; use namada::ledger::pos; use namada::proof_of_stake::parameters::PosParams; use namada::proto::Tx; @@ -1606,3 +1607,35 @@ pub async fn submit_tx( ) -> Result { tx::submit_tx::<_, IO>(client, to_broadcast).await } + +pub async fn gen_ibc_shielded_transfer( + client: &C, + ctx: &mut Context, + args: args::GenIbcShieldedTransafer, +) -> Result<(), error::Error> +where + C: namada::ledger::queries::Client + Sync, + C::Error: std::fmt::Display, +{ + if let Some(shielded_transfer) = tx::gen_ibc_shielded_transfer::<_, _, IO>( + client, + &mut ctx.shielded, + args.clone(), + ) + .await? + { + let tx_id = shielded_transfer.masp_tx.txid().to_string(); + let filename = format!("ibc_shielded_transfer_{}.memo", tx_id,); + let output_path = match &args.output_folder { + Some(path) => path.join(filename), + None => filename.into(), + }; + let out = File::create(&output_path) + .expect("Should be able to create the out file."); + serde_json::to_writer_pretty(out, &Memo::from(shielded_transfer)) + .expect("IBC memo should be deserializable."); + } else { + eprintln!("No shielded transfer for this IBC transfer.") + } + Ok(()) +} diff --git a/benches/lib.rs b/benches/lib.rs index 47645abdf4..d48e45cb4a 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -89,9 +89,7 @@ use namada::types::time::DateTimeUtc; use namada::types::token::DenominatedAmount; use namada::types::transaction::governance::InitProposalData; use namada::types::transaction::pos::Bond; -use namada::types::transaction::GasLimit; use namada::vm::wasm::run; -use namada_apps::cli::args::{Tx as TxArgs, TxTransfer}; use namada_apps::cli::context::FromContext; use namada_apps::cli::Context; use namada_apps::config::TendermintMode; @@ -753,44 +751,10 @@ impl BenchShieldedCtx { source: TransferSource, target: TransferTarget, ) -> Tx { - let mock_args = TxArgs { - dry_run: false, - dry_run_wrapper: false, - dump_tx: false, - force: false, - broadcast_only: false, - ledger_address: (), - initialized_account_alias: None, - fee_amount: None, - fee_token: address::nam(), - fee_unshield: None, - gas_limit: GasLimit::from(u64::MAX), - expiration: None, - disposable_signing_key: false, - signing_keys: vec![defaults::albert_keypair()], - signatures: vec![], - wallet_alias_force: true, - chain_id: None, - tx_reveal_code_path: TX_REVEAL_PK_WASM.into(), - verification_key: None, - password: None, - wrapper_fee_payer: None, - output_folder: None, + let denominated_amount = DenominatedAmount { + amount, + denom: 0.into(), }; - - let args = TxTransfer { - tx: mock_args, - source: source.clone(), - target: target.clone(), - token: address::nam(), - amount: InputAmount::Validated(DenominatedAmount { - amount, - denom: 0.into(), - }), - native_token: self.shell.wl_storage.storage.native_token.clone(), - tx_code_path: TX_TRANSFER_WASM.into(), - }; - let async_runtime = tokio::runtime::Runtime::new().unwrap(); let spending_key = self .wallet @@ -804,10 +768,13 @@ impl BenchShieldedCtx { )) .unwrap(); let shielded = async_runtime - .block_on( - self.shielded - .gen_shielded_transfer::<_, DefaultIo>(&self.shell, args), - ) + .block_on(self.shielded.gen_shielded_transfer::<_, DefaultIo>( + &self.shell, + &source, + &target, + &address::nam(), + denominated_amount, + )) .unwrap() .map( |ShieldedTransfer { diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index 05dec84b99..3b8edcc271 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -7,6 +7,7 @@ use std::cell::RefCell; use std::collections::HashMap; use std::fmt::Debug; use std::rc::Rc; +use std::str::FromStr; use std::time::Duration; pub use context::common::IbcCommonContext; @@ -18,11 +19,14 @@ use thiserror::Error; use crate::ibc::applications::transfer::error::TokenTransferError; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc::applications::transfer::{ - send_transfer_execute, send_transfer_validate, + is_receiver_chain_source, send_transfer_execute, send_transfer_validate, + BaseDenom, PrefixedDenom, TracePath, TracePrefix, }; use crate::ibc::core::ics04_channel::msgs::PacketMsg; use crate::ibc::core::ics23_commitment::specs::ProofSpecs; -use crate::ibc::core::ics24_host::identifier::{ChainId as IbcChainId, PortId}; +use crate::ibc::core::ics24_host::identifier::{ + ChainId as IbcChainId, ChannelId, PortId, +}; use crate::ibc::core::router::{Module, ModuleId, Router}; use crate::ibc::core::{execute, validate, MsgEnvelope, RouterError}; use crate::ibc_proto::google::protobuf::Any; @@ -299,3 +303,38 @@ pub struct ValidationParams { /// Upgrade path pub upgrade_path: Vec, } + +/// Get the IbcToken from the source/destination ports and channels +pub fn received_ibc_token( + token: &Address, + trace_path: Option, + src_port_id: &PortId, + src_channel_id: &ChannelId, + dest_port_id: &PortId, + dest_channel_id: &ChannelId, +) -> Result { + if let Some(trace_path) = trace_path { + let mut ibc_denom = PrefixedDenom { + trace_path, + base_denom: BaseDenom::from_str(&token.to_string()).map_err( + |e| Error::Denom(format!("Trace path is invalid: error {e}")), + )?, + }; + if is_receiver_chain_source( + src_port_id.clone(), + src_channel_id.clone(), + &ibc_denom, + ) { + let prefix = + TracePrefix::new(src_port_id.clone(), src_channel_id.clone()); + ibc_denom.remove_trace_prefix(&prefix); + } else { + let prefix = + TracePrefix::new(dest_port_id.clone(), dest_channel_id.clone()); + ibc_denom.add_trace_prefix(prefix); + } + Ok(storage::ibc_token(&ibc_denom.to_string())) + } else { + Ok(token.clone()) + } +} diff --git a/core/src/ledger/storage_api/account.rs b/core/src/ledger/storage_api/account.rs index 5fa0abb8f9..b3cdd5fb67 100644 --- a/core/src/ledger/storage_api/account.rs +++ b/core/src/ledger/storage_api/account.rs @@ -77,6 +77,7 @@ where } Address::Implicit(_) => Ok(true), Address::Internal(_) => Ok(false), + Address::Foreign(_) => Ok(false), } } diff --git a/core/src/ledger/storage_api/token.rs b/core/src/ledger/storage_api/token.rs index 02adcc32be..f22ca99280 100644 --- a/core/src/ledger/storage_api/token.rs +++ b/core/src/ledger/storage_api/token.rs @@ -46,19 +46,22 @@ pub fn read_denom( where S: StorageRead, { - let (key, nut) = match token { + let (key, is_default_zero) = match token { Address::Internal(InternalAddress::Nut(erc20)) => { let token = Address::Internal(InternalAddress::Erc20(*erc20)); + // NB: always use the equivalent ERC20's smallest + // denomination to specify amounts, if we cannot + // find a denom in storage (token::denom_key(&token), true) } + Address::Internal(InternalAddress::IbcToken(_)) => { + (token::denom_key(token), true) + } token => (token::denom_key(token), false), }; storage.read(&key).map(|opt_denom| { Some(opt_denom.unwrap_or_else(|| { - if nut { - // NB: always use the equivalent ERC20's smallest - // denomination to specify amounts, if we cannot - // find a denom in storage + if is_default_zero { 0u8.into() } else { // FIXME: perhaps when we take this branch, we should diff --git a/core/src/types/address.rs b/core/src/types/address.rs index 1a18118e83..32cc7de99b 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -96,6 +96,8 @@ const PREFIX_ESTABLISHED: &str = "est"; const PREFIX_IMPLICIT: &str = "imp"; /// Fixed-length address strings prefix for internal addresses. const PREFIX_INTERNAL: &str = "ano"; +/// Fixed-length address strings prefix for foreign addresses. +const PREFIX_FOREIGN: &str = "for"; /// Fixed-length address strings prefix for IBC addresses. const PREFIX_IBC: &str = "ibc"; /// Fixed-length address strings prefix for Ethereum addresses. @@ -134,6 +136,8 @@ pub enum Address { Implicit(ImplicitAddress), /// An internal address represents a module with a native VP Internal(InternalAddress), + /// An foreign address is provided from other chains + Foreign(String), } // We're using the string format of addresses (bech32m) for ordering to ensure @@ -196,6 +200,7 @@ impl Address { Some(hash_hex) } Address::Internal(_) => None, + Address::Foreign(_) => None, } } @@ -254,6 +259,9 @@ impl Address { debug_assert_eq!(string.len(), FIXED_LEN_STRING_BYTES); string } + Address::Foreign(addr) => { + format!("{}::{}", PREFIX_FOREIGN, addr) + } } .into_bytes(); string.resize(FIXED_LEN_STRING_BYTES, b' '); @@ -374,6 +382,9 @@ impl Address { "Invalid ERC20 internal address".to_string(), )), }, + Some((PREFIX_FOREIGN, raw)) => { + Ok(Address::Foreign(raw.to_string())) + } _ => Err(DecodeError::InvalidInnerEncoding( ErrorKind::InvalidData, "Invalid address prefix".to_string(), @@ -397,6 +408,9 @@ impl Address { Address::Internal(kind) => { format!("Internal {}: {}", kind, self.encode()) } + Address::Foreign(_) => { + format!("Foreign: {}", self.encode()) + } } } } @@ -445,17 +459,22 @@ impl FromStr for Address { } } -/// Convert the IBC signer to an address for the IBC transfer +/// for IBC signer impl TryFrom for Address { type Error = DecodeError; fn try_from(signer: Signer) -> Result { - // When IBC transfer, the address in this signer should be an address or - // a payment address. If it's a payment address, this returns - // the masp address. + // The given address should be an address or payment address. When + // sending a token from a spending key, it has been already + // replaced with the MASP address. Address::decode(signer.as_ref()).or( - crate::types::masp::PaymentAddress::from_str(signer.as_ref()) - .and(Ok(masp())), + match crate::types::masp::PaymentAddress::from_str(signer.as_ref()) + { + Ok(_) => Ok(masp()), + Err(_) => Err(DecodeError::InvalidInnerEncodingStr(format!( + "Invalid address for IBC transfer: {signer}" + ))), + }, ) } } diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index a23b026eea..c1a518e023 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -973,6 +973,7 @@ where accepted } + Address::Foreign(_) => Ok(true), }; // Returning error from here will short-circuit the VP parallel diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index 0b87317530..87f01d2e90 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -885,3 +885,26 @@ pub struct ValidatorSetUpdateRelay { /// Ethereum transfers aren't canceled midway through. pub safe_mode: bool, } + +/// IBC shielded transfer generation arguments +#[derive(Clone, Debug)] +pub struct GenIbcShieldedTransafer { + /// The query parameters. + pub query: Query, + /// The output directory path to where serialize the data + pub output_folder: Option, + /// The foreign sender address + pub sender: String, + /// The target address + pub target: C::TransferTarget, + /// The token address + pub token: C::Address, + /// The trace path of the token + pub trace_path: Option, + /// Transferred token amount + pub amount: InputAmount, + /// Port ID via which the token is received + pub port_id: PortId, + /// Channel ID via which the token is received + pub channel_id: ChannelId, +} diff --git a/shared/src/sdk/masp.rs b/shared/src/sdk/masp.rs index 739f941b9a..08cd0844a7 100644 --- a/shared/src/sdk/masp.rs +++ b/shared/src/sdk/masp.rs @@ -59,17 +59,19 @@ use sha2::Digest; use thiserror::Error; use crate::proto::Tx; -use crate::sdk::args::InputAmount; use crate::sdk::error::{EncodingError, Error, PinnedBalanceError, QueryError}; use crate::sdk::queries::Client; +use crate::sdk::rpc; use crate::sdk::rpc::{query_conversion, query_storage_value}; use crate::sdk::tx::decode_component; -use crate::sdk::{args, rpc}; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; use crate::types::address::{masp, Address}; use crate::types::io::Io; -use crate::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; +use crate::types::masp::{ + BalanceOwner, ExtendedViewingKey, PaymentAddress, TransferSource, + TransferTarget, +}; use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; use crate::types::token; use crate::types::token::{ @@ -1486,7 +1488,10 @@ impl ShieldedContext { pub async fn gen_shielded_transfer( &mut self, client: &C, - args: args::TxTransfer, + source: &TransferSource, + target: &TransferTarget, + token: &Address, + amount: token::DenominatedAmount, ) -> Result, TransferErr> { // No shielded components are needed when neither source nor destination // are shielded @@ -1496,8 +1501,8 @@ impl ShieldedContext { use rand::rngs::StdRng; use rand_core::SeedableRng; - let spending_key = args.source.spending_key(); - let payment_address = args.target.payment_address(); + let spending_key = source.spending_key(); + let payment_address = target.payment_address(); // No shielded components are needed when neither source nor // destination are shielded if spending_key.is_none() && payment_address.is_none() { @@ -1540,14 +1545,9 @@ impl ShieldedContext { let mut builder = Builder::::new_with_rng(NETWORK, 1.into(), rng); - // break up a transfer into a number of transfers with suitable - // denominations - let InputAmount::Validated(amt) = args.amount else { - unreachable!("The function `gen_shielded_transfer` is only called by `submit_tx` which validates amounts.") - }; // Convert transaction amount into MASP types - let (asset_types, amount) = - convert_amount(epoch, &args.token, amt.amount)?; + let (asset_types, masp_amount) = + convert_amount(epoch, token, amount.amount)?; // If there are shielded inputs if let Some(sk) = spending_key { @@ -1556,7 +1556,7 @@ impl ShieldedContext { .collect_unspent_notes::<_, IO>( client, &to_viewing_key(&sk).vk, - I128Sum::from_sum(amount), + I128Sum::from_sum(masp_amount), epoch, ) .await?; @@ -1582,8 +1582,7 @@ impl ShieldedContext { // We add a dummy UTXO to our transaction, but only the source of // the parent Transfer object is used to validate fund // availability - let source_enc = args - .source + let source_enc = source .address() .ok_or_else(|| { Error::Other( @@ -1605,7 +1604,7 @@ impl ShieldedContext { builder .add_transparent_input(TxOut { asset_type: *asset_type, - value: denom.denominate(&amt), + value: denom.denominate(&amount), address: script, }) .map_err(builder::Error::TransparentBuild)?; @@ -1623,7 +1622,7 @@ impl ShieldedContext { ovk_opt, pa.into(), *asset_type, - denom.denominate(&amt), + denom.denominate(&amount), memo.clone(), ) .map_err(builder::Error::SaplingBuild)?; @@ -1631,8 +1630,7 @@ impl ShieldedContext { } else { // Embed the transparent target address into the shielded // transaction so that it can be signed - let target_enc = args - .target + let target_enc = target .address() .ok_or_else(|| { Error::Other( @@ -1650,7 +1648,7 @@ impl ShieldedContext { )); for (denom, asset_type) in MaspDenom::iter().zip(asset_types.iter()) { - let vout = denom.denominate(&amt); + let vout = denom.denominate(&amount); if vout != 0 { builder .add_transparent_output( diff --git a/shared/src/sdk/rpc.rs b/shared/src/sdk/rpc.rs index d98e0b61ec..5c69d743d9 100644 --- a/shared/src/sdk/rpc.rs +++ b/shared/src/sdk/rpc.rs @@ -217,6 +217,7 @@ pub async fn known_address( query_has_storage_key(client, &key).await } Address::Implicit(_) | Address::Internal(_) => Ok(true), + Address::Foreign(_) => Ok(false), } } diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 042be03a63..48695e9069 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -1,6 +1,5 @@ //! Functions to sign transactions use std::collections::{BTreeMap, HashMap}; -use std::path::PathBuf; use borsh::{BorshDeserialize, BorshSerialize}; use data_encoding::HEXLOWER; @@ -121,6 +120,10 @@ pub async fn find_pk< "Internal address {} doesn't have any signing keys.", addr )), + Address::Foreign(_) => other_err(format!( + "Foreign address {} doesn't have any signing keys.", + addr + )), } } @@ -275,7 +278,8 @@ pub async fn aux_signing_data< Some(AccountPublicKeysMap::from_iter(public_keys.clone())), 1u8, ), - Some(owner @ Address::Internal(_)) => { + Some(owner @ Address::Internal(_)) + | Some(owner @ Address::Foreign(_)) => { return Err(Error::from(TxError::InvalidAccount(owner.encode()))); } None => (None, 0u8), @@ -419,30 +423,27 @@ pub async fn wrap_tx< Some(diff) if !diff.is_zero() => { if let Some(spending_key) = args.fee_unshield.clone() { // Unshield funds for fee payment - let transfer_args = args::TxTransfer { - tx: args.to_owned(), - source: spending_key, - target: namada_core::types::masp::TransferTarget::Address( - fee_payer_address.clone(), - ), - token: args.fee_token.clone(), - amount: args::InputAmount::Validated(DenominatedAmount { - // NOTE: must unshield the total fee amount, not the - // diff, because the ledger evaluates the transaction in - // reverse (wrapper first, inner second) and cannot know - // ahead of time if the inner will modify the balance of - // the gas payer - amount: total_fee, - denom: 0.into(), - }), - // These last two fields are not used in the function, mock - // them - native_token: args.fee_token.clone(), - tx_code_path: PathBuf::new(), + let target = namada_core::types::masp::TransferTarget::Address( + fee_payer_address.clone(), + ); + let fee_amount = DenominatedAmount { + // NOTE: must unshield the total fee amount, not the + // diff, because the ledger evaluates the transaction in + // reverse (wrapper first, inner second) and cannot know + // ahead of time if the inner will modify the balance of + // the gas payer + amount: total_fee, + denom: 0.into(), }; match shielded - .gen_shielded_transfer::<_, IO>(client, transfer_args) + .gen_shielded_transfer::<_, IO>( + client, + &spending_key, + &target, + &args.fee_token, + fee_amount, + ) .await { Ok(Some(ShieldedTransfer { diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index e95ab58fe1..c9c61991c7 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -22,6 +22,7 @@ use namada_core::ledger::governance::cli::onchain::{ }; use namada_core::ledger::governance::storage::proposal::ProposalType; use namada_core::ledger::governance::storage::vote::StorageProposalVote; +use namada_core::ledger::ibc::storage::channel_key; use namada_core::ledger::pgf::cli::steward::Commission; use namada_core::types::address::{masp, Address}; use namada_core::types::dec::Dec; @@ -38,6 +39,7 @@ use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc::applications::transfer::packet::PacketData; use crate::ibc::applications::transfer::PrefixedCoin; use crate::ibc::core::ics04_channel::timeout::TimeoutHeight; +use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use crate::ibc::core::timestamp::Timestamp as IbcTimestamp; use crate::ibc::core::Msg; use crate::ibc::Height as IbcHeight; @@ -55,9 +57,10 @@ use crate::sdk::wallet::{Wallet, WalletUtils}; use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::tendermint_rpc::error::Error as RpcError; use crate::types::control_flow::{time, ProceedOrElse}; +use crate::types::ibc::IbcShieldedTransfer; use crate::types::io::Io; use crate::types::key::*; -use crate::types::masp::TransferTarget; +use crate::types::masp::{TransferSource, TransferTarget}; use crate::types::storage::Epoch; use crate::types::time::DateTimeUtc; use crate::types::transaction::account::{InitAccount, UpdateAccount}; @@ -1714,7 +1717,13 @@ pub async fn build_transfer< // Construct the shielded part of the transaction, if any let stx_result = shielded - .gen_shielded_transfer::<_, IO>(client, args.clone()) + .gen_shielded_transfer::<_, IO>( + client, + &args.source, + &args.target, + &args.token, + validated_amount, + ) .await; let shielded_parts = match stx_result { @@ -1997,6 +2006,104 @@ pub async fn build_custom< Ok((tx, epoch)) } +/// Generate IBC shielded transfer +pub async fn gen_ibc_shielded_transfer< + C: crate::ledger::queries::Client + Sync, + V: ShieldedUtils, + IO: Io, +>( + client: &C, + shielded: &mut ShieldedContext, + args: args::GenIbcShieldedTransafer, +) -> Result> { + let key = match args.target.payment_address() { + Some(pa) if pa.is_pinned() => Some(pa.hash()), + Some(_) => None, + None => return Ok(None), + }; + let source = Address::Foreign(args.sender.clone()); + let (src_port_id, src_channel_id) = + get_ibc_src_port_channel(client, &args.port_id, &args.channel_id) + .await?; + let token = namada_core::ledger::ibc::received_ibc_token( + &args.token, + args.trace_path, + &src_port_id, + &src_channel_id, + &args.port_id, + &args.channel_id, + ) + .map_err(|e| { + Error::Other(format!("Getting IBC Token failed: error {e}")) + })?; + let validated_amount = + validate_amount::<_, IO>(client, args.amount, &token, false) + .await + .expect("expected to validate amount"); + + let shielded = shielded + .gen_shielded_transfer::<_, IO>( + client, + &TransferSource::Address(source.clone()), + &args.target, + &token, + validated_amount, + ) + .await + .map_err(|err| TxError::MaspError(err.to_string()))?; + let transfer = token::Transfer { + source: source.clone(), + target: masp(), + token, + amount: validated_amount, + key, + shielded: None, + }; + if let Some(shielded) = shielded { + Ok(Some(IbcShieldedTransfer { + transfer, + masp_tx: shielded.masp_tx, + })) + } else { + Ok(None) + } +} + +async fn get_ibc_src_port_channel( + client: &C, + dest_port_id: &PortId, + dest_channel_id: &ChannelId, +) -> Result<(PortId, ChannelId)> { + use crate::ibc::core::ics04_channel::channel::ChannelEnd; + use crate::ibc_proto::protobuf::Protobuf; + + let channel_key = channel_key(dest_port_id, dest_channel_id); + match rpc::query_storage_value_bytes::(client, &channel_key, None, false) + .await + { + Ok((Some(bytes), _)) => { + let channel = ChannelEnd::decode_vec(&bytes).map_err(|_| { + Error::Other(format!( + "Decoding channel end failed: port {}, channel {}", + dest_port_id, dest_channel_id + )) + })?; + if let Some(src_channel) = channel.remote.channel_id() { + Ok((channel.remote.port_id.clone(), src_channel.clone())) + } else { + Err(Error::Other(format!( + "The source channel doesn't exist: port {dest_port_id}, \ + channel {dest_channel_id}" + ))) + } + } + _ => Err(Error::Other(format!( + "Reading channel end failed: port {dest_port_id}, channel \ + {dest_channel_id}" + ))), + } +} + async fn expect_dry_broadcast< C: crate::ledger::queries::Client + Sync, IO: Io, From 0e1850739aaf2fc8562d02863b59f82dee13fcbc Mon Sep 17 00:00:00 2001 From: yito88 Date: Wed, 20 Sep 2023 23:41:23 +0200 Subject: [PATCH 042/161] WIP: add e2e test for receiving to payment address --- apps/src/lib/cli.rs | 17 ++- apps/src/lib/client/tx.rs | 10 +- benches/lib.rs | 1 - core/src/ledger/ibc/mod.rs | 65 ++++++------ core/src/types/ibc.rs | 8 +- shared/src/sdk/args.rs | 2 - shared/src/sdk/tx.rs | 5 +- tests/src/e2e/ibc_tests.rs | 212 ++++++++++++++++++++++++++++++++----- 8 files changed, 239 insertions(+), 81 deletions(-) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 9981cb06e9..2980b21e49 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -2687,7 +2687,7 @@ pub mod args { pub const HD_WALLET_DERIVATION_PATH_OPT: ArgOpt = HD_WALLET_DERIVATION_PATH.opt(); pub const HISTORIC: ArgFlag = flag("historic"); - pub const IBC_TRANSFER_MEMO: ArgOpt = arg_opt("memo"); + pub const IBC_TRANSFER_MEMO_PATH: ArgOpt = arg_opt("memo-path"); pub const LEDGER_ADDRESS_ABOUT: &str = "Address of a ledger node as \"{scheme}://{host}:{port}\". If the \ scheme is not supplied, it is assumed to be TCP."; @@ -3603,7 +3603,10 @@ pub mod args { let channel_id = CHANNEL_ID.parse(matches); let timeout_height = TIMEOUT_HEIGHT.parse(matches); let timeout_sec_offset = TIMEOUT_SEC_OFFSET.parse(matches); - let memo = IBC_TRANSFER_MEMO.parse(matches); + let memo = IBC_TRANSFER_MEMO_PATH.parse(matches).map(|path| { + std::fs::read_to_string(path) + .expect("Expected a file at given path") + }); let tx_code_path = PathBuf::from(TX_IBC_WASM); Self { tx, @@ -3640,9 +3643,9 @@ pub mod args { ) .arg(TIMEOUT_SEC_OFFSET.def().help("The timeout as seconds.")) .arg( - IBC_TRANSFER_MEMO + IBC_TRANSFER_MEMO_PATH .def() - .help("Memo field of ICS20 transfer."), + .help("The path for the memo field of ICS20 transfer."), ) } } @@ -4773,7 +4776,6 @@ pub mod args { sender: self.sender, target: ctx.get(&self.target), token: ctx.get(&self.token), - trace_path: self.trace_path, amount: self.amount, port_id: self.port_id, channel_id: self.channel_id, @@ -4788,7 +4790,6 @@ pub mod args { let sender = SENDER.parse(matches); let target = TRANSFER_TARGET.parse(matches); let token = TOKEN.parse(matches); - let trace_path = TRACE_PATH.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let port_id = PORT_ID.parse(matches); let channel_id = CHANNEL_ID.parse(matches); @@ -4798,7 +4799,6 @@ pub mod args { sender, target, token, - trace_path, amount, port_id, channel_id, @@ -4806,14 +4806,13 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::>() + app.add_args::>() .arg(OUTPUT_FOLDER_PATH.def().help( "The output folder path where the artifact will be stored.", )) .arg(SENDER.def().help("The foreign sender address.")) .arg(TRANSFER_TARGET.def().help("The target address.")) .arg(TOKEN.def().help("The transfer token.")) - .arg(TRACE_PATH.def().help("The IBC trace path of the token.")) .arg(AMOUNT.def().help("The amount to transfer in decimal.")) .arg( PORT_ID diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 81ff927b7f..9e2ae749b0 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -1625,15 +1625,19 @@ where .await? { let tx_id = shielded_transfer.masp_tx.txid().to_string(); - let filename = format!("ibc_shielded_transfer_{}.memo", tx_id,); + let filename = format!("ibc_shielded_transfer_{}.memo", tx_id); let output_path = match &args.output_folder { Some(path) => path.join(filename), None => filename.into(), }; - let out = File::create(&output_path) + let mut out = File::create(&output_path) .expect("Should be able to create the out file."); - serde_json::to_writer_pretty(out, &Memo::from(shielded_transfer)) + out.write_all(Memo::from(shielded_transfer).as_ref().as_bytes()) .expect("IBC memo should be deserializable."); + println!( + "Output IBC shielded transfer for {tx_id} to {}", + output_path.to_string_lossy() + ); } else { eprintln!("No shielded transfer for this IBC transfer.") } diff --git a/benches/lib.rs b/benches/lib.rs index d48e45cb4a..287d6683b0 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -71,7 +71,6 @@ use namada::ledger::queries::{ use namada::ledger::storage_api::StorageRead; use namada::proof_of_stake; use namada::proto::{Code, Data, Section, Signature, Tx}; -use namada::sdk::args::InputAmount; use namada::sdk::masp::{ self, ShieldedContext, ShieldedTransfer, ShieldedUtils, }; diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index 3b8edcc271..277a19011b 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -20,7 +20,7 @@ use crate::ibc::applications::transfer::error::TokenTransferError; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc::applications::transfer::{ is_receiver_chain_source, send_transfer_execute, send_transfer_validate, - BaseDenom, PrefixedDenom, TracePath, TracePrefix, + PrefixedDenom, TracePrefix, }; use crate::ibc::core::ics04_channel::msgs::PacketMsg; use crate::ibc::core::ics23_commitment::specs::ProofSpecs; @@ -30,12 +30,13 @@ use crate::ibc::core::ics24_host::identifier::{ use crate::ibc::core::router::{Module, ModuleId, Router}; use crate::ibc::core::{execute, validate, MsgEnvelope, RouterError}; use crate::ibc_proto::google::protobuf::Any; -use crate::types::address::Address; +use crate::types::address::{masp, Address}; use crate::types::chain::ChainId; use crate::types::ibc::{ get_shielded_transfer, is_ibc_denom, EVENT_TYPE_DENOM_TRACE, EVENT_TYPE_PACKET, }; +use crate::types::masp::PaymentAddress; #[allow(missing_docs)] #[derive(Error, Debug)] @@ -211,14 +212,20 @@ where .as_ref() .and_then(|event| event.attributes.get("receiver")) { - Some(receiver) => { - Some(Address::decode(receiver).map_err(|_| { - Error::Denom(format!( - "Decoding the receiver address failed: {:?}", - receive_event - )) - })?) - } + Some(receiver) => Some( + Address::decode(receiver) + .or_else(|_| { + // Replace it with MASP address when the receiver is a + // payment address + PaymentAddress::from_str(receiver).map(|_| masp()) + }) + .map_err(|_| { + Error::Denom(format!( + "Decoding the receiver address failed: {:?}", + receive_event + )) + })?, + ), None => None, }; let denom_event = self @@ -306,35 +313,25 @@ pub struct ValidationParams { /// Get the IbcToken from the source/destination ports and channels pub fn received_ibc_token( - token: &Address, - trace_path: Option, + ibc_denom: &PrefixedDenom, src_port_id: &PortId, src_channel_id: &ChannelId, dest_port_id: &PortId, dest_channel_id: &ChannelId, ) -> Result { - if let Some(trace_path) = trace_path { - let mut ibc_denom = PrefixedDenom { - trace_path, - base_denom: BaseDenom::from_str(&token.to_string()).map_err( - |e| Error::Denom(format!("Trace path is invalid: error {e}")), - )?, - }; - if is_receiver_chain_source( - src_port_id.clone(), - src_channel_id.clone(), - &ibc_denom, - ) { - let prefix = - TracePrefix::new(src_port_id.clone(), src_channel_id.clone()); - ibc_denom.remove_trace_prefix(&prefix); - } else { - let prefix = - TracePrefix::new(dest_port_id.clone(), dest_channel_id.clone()); - ibc_denom.add_trace_prefix(prefix); - } - Ok(storage::ibc_token(&ibc_denom.to_string())) + let mut ibc_denom = ibc_denom.clone(); + if is_receiver_chain_source( + src_port_id.clone(), + src_channel_id.clone(), + &ibc_denom, + ) { + let prefix = + TracePrefix::new(src_port_id.clone(), src_channel_id.clone()); + ibc_denom.remove_trace_prefix(&prefix); } else { - Ok(token.clone()) + let prefix = + TracePrefix::new(dest_port_id.clone(), dest_channel_id.clone()); + ibc_denom.add_trace_prefix(prefix); } + Ok(storage::ibc_token(ibc_denom.to_string())) } diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs index 3e7ae3c4fc..6ff3c911f2 100644 --- a/core/src/types/ibc.rs +++ b/core/src/types/ibc.rs @@ -65,7 +65,7 @@ mod ibc_rs_conversion { use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; - use data_encoding::HEXLOWER; + use data_encoding::HEXUPPER; use thiserror::Error; use super::{IbcEvent, IbcShieldedTransfer}; @@ -126,7 +126,7 @@ mod ibc_rs_conversion { fn from(shielded: IbcShieldedTransfer) -> Self { let bytes = shielded.try_to_vec().expect("Encoding shouldn't failed"); - HEXLOWER.encode(&bytes).into() + HEXUPPER.encode(&bytes).into() } } @@ -134,7 +134,7 @@ mod ibc_rs_conversion { type Error = Error; fn try_from(memo: Memo) -> Result { - let bytes = HEXLOWER + let bytes = HEXUPPER .decode(memo.as_ref().as_bytes()) .map_err(Error::DecodingHex)?; Self::try_from_slice(&bytes) @@ -154,7 +154,7 @@ mod ibc_rs_conversion { event.attributes.get("success") == Some(&"true".to_string()); let receiver = event.attributes.get("receiver"); let is_shielded = if let Some(receiver) = receiver { - PaymentAddress::from_str(&receiver).is_ok() + PaymentAddress::from_str(receiver).is_ok() } else { false }; diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index 87f01d2e90..ff031d8f62 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -899,8 +899,6 @@ pub struct GenIbcShieldedTransafer { pub target: C::TransferTarget, /// The token address pub token: C::Address, - /// The trace path of the token - pub trace_path: Option, /// Transferred token amount pub amount: InputAmount, /// Port ID via which the token is received diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index c9c61991c7..56b05395a6 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -2025,9 +2025,10 @@ pub async fn gen_ibc_shielded_transfer< let (src_port_id, src_channel_id) = get_ibc_src_port_channel(client, &args.port_id, &args.channel_id) .await?; + let ibc_denom = + rpc::query_ibc_denom::<_, IO>(client, &args.token, Some(&source)).await; let token = namada_core::ledger::ibc::received_ibc_token( - &args.token, - args.trace_path, + &ibc_denom.parse().expect("Invalid IBC denom"), &src_port_id, &src_channel_id, &args.port_id, diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index 3cd2ba48a3..e5f91b8c7c 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -13,6 +13,7 @@ use core::convert::TryFrom; use core::str::FromStr; use core::time::Duration; use std::collections::HashMap; +use std::path::PathBuf; use color_eyre::eyre::Result; use eyre::eyre; @@ -60,12 +61,14 @@ use namada::ledger::storage::traits::Sha256Hasher; use namada::tendermint::abci::Event as AbciEvent; use namada::tendermint::block::Height as TmHeight; use namada::types::address::{Address, InternalAddress}; +use namada::types::io::DefaultIo; use namada::types::key::PublicKey; use namada::types::storage::{BlockHeight, Key}; use namada::types::token::Amount; use namada_apps::client::rpc::{ query_storage_value, query_storage_value_bytes, }; +use namada_apps::client::tx::CLIShieldedUtils; use namada_apps::client::utils::id_from_pk; use namada_apps::config::ethereum_bridge; use namada_apps::config::genesis::genesis_config::GenesisConfig; @@ -77,9 +80,10 @@ use prost::Message; use setup::constants::*; use tendermint_light_client::components::io::{Io, ProdIo as TmLightClientIo}; -use super::helpers::wait_for_wasm_pre_compile; use super::setup::set_ethereum_bridge_mode; -use crate::e2e::helpers::{find_address, get_actor_rpc, get_validator_pk}; +use crate::e2e::helpers::{ + find_address, get_actor_rpc, get_validator_pk, wait_for_wasm_pre_compile, +}; use crate::e2e::setup::{self, sleep, Bin, NamadaCmd, Test, Who}; use crate::{run, run_as}; @@ -125,10 +129,6 @@ fn run_ledger_ibc() -> Result<()> { wait_for_wasm_pre_compile(&mut ledger_a)?; wait_for_wasm_pre_compile(&mut ledger_b)?; - // Wait for a first block - ledger_a.exp_string("Committed block hash")?; - ledger_b.exp_string("Committed block hash")?; - let _bg_ledger_a = ledger_a.background(); let _bg_ledger_b = ledger_b.background(); @@ -190,6 +190,18 @@ fn run_ledger_ibc() -> Result<()> { // The balance should not be changed check_balances_after_back(&port_id_b, &channel_id_b, &test_a, &test_b)?; + shielded_transfer( + &test_a, + &test_b, + &client_id_a, + &client_id_b, + &port_id_a, + &channel_id_a, + &port_id_b, + &channel_id_b, + )?; + check_shielded_balances(&port_id_b, &channel_id_b, &test_b)?; + // Skip tests for closing a channel and timeout_on_close since the transfer // channel cannot be closed @@ -197,13 +209,18 @@ fn run_ledger_ibc() -> Result<()> { } fn setup_two_single_node_nets() -> Result<(Test, Test)> { + // Download the shielded pool parameters before starting node + let _ = CLIShieldedUtils::new::(PathBuf::new()); + // epoch per 100 seconds let update_genesis = |mut genesis: GenesisConfig| { - genesis.parameters.epochs_per_year = 315_360; + genesis.parameters.epochs_per_year = 31536; + genesis.parameters.min_num_of_blocks = 1; genesis }; let update_genesis_b = |mut genesis: GenesisConfig| { - genesis.parameters.epochs_per_year = 315_360; + genesis.parameters.epochs_per_year = 31536; + genesis.parameters.min_num_of_blocks = 1; setup::set_validators(1, genesis, |_| setup::ANOTHER_CHAIN_PORT_OFFSET) }; Ok(( @@ -635,7 +652,7 @@ fn transfer_token( let height = transfer( test_a, ALBERT, - &receiver, + receiver.to_string(), NAM, "100000", ALBERT_KEY, @@ -643,6 +660,7 @@ fn transfer_token( channel_id_a, None, None, + None, false, )?; let events = get_events(test_a, height)?; @@ -704,13 +722,14 @@ fn try_invalid_transfers( transfer( test_a, ALBERT, - &receiver, + receiver.to_string(), NAM, "10.1", ALBERT_KEY, port_id_a, channel_id_a, None, + None, Some("The amount for the IBC transfer should be an integer"), false, )?; @@ -719,13 +738,14 @@ fn try_invalid_transfers( transfer( test_a, ALBERT, - &receiver, + receiver.to_string(), NAM, "10", ALBERT_KEY, &"port".parse().unwrap(), channel_id_a, None, + None, Some("Error trying to apply a transaction"), false, )?; @@ -734,13 +754,14 @@ fn try_invalid_transfers( transfer( test_a, ALBERT, - &receiver, + receiver.to_string(), NAM, "10", ALBERT_KEY, port_id_a, &"channel-42".parse().unwrap(), None, + None, Some("Error trying to apply a transaction"), false, )?; @@ -795,7 +816,7 @@ fn transfer_back( let height = transfer( test_b, BERTHA, - &receiver, + receiver.to_string(), ibc_denom, "50000", BERTHA_KEY, @@ -803,6 +824,7 @@ fn transfer_back( channel_id_b, None, None, + None, false, )?; let events = get_events(test_b, height)?; @@ -858,12 +880,13 @@ fn transfer_timeout( let height = transfer( test_a, ALBERT, - &receiver, + receiver.to_string(), NAM, "100000", ALBERT_KEY, port_id_a, channel_id_a, + None, Some(Duration::new(5, 0)), None, false, @@ -893,6 +916,116 @@ fn transfer_timeout( Ok(()) } +#[allow(clippy::too_many_arguments)] +fn shielded_transfer( + test_a: &Test, + test_b: &Test, + client_id_a: &ClientId, + client_id_b: &ClientId, + port_id_a: &PortId, + channel_id_a: &ChannelId, + port_id_b: &PortId, + channel_id_b: &ChannelId, +) -> Result<()> { + // Get masp proof for the following IBC transfer from the destination chain + // It will send 10 BTC from Chain A to PA(B) on Chain B + let sender = find_address(test_a, ALBERT)?; + let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); + let output_folder = test_b.test_dir.path().to_string_lossy(); + let amount = Amount::native_whole(10).to_string_native(); + let args = [ + "ibc-gen-shielded", + "--output-folder-path", + &output_folder, + "--sender", + &sender.to_string(), + "--target", + AB_PAYMENT_ADDRESS, + "--token", + BTC, + "--amount", + &amount, + "--port-id", + port_id_b.as_ref(), + "--channel-id", + channel_id_b.as_ref(), + "--node", + &rpc_b, + ]; + let mut client = run!(test_b, Bin::Client, args, Some(120))?; + let file_path = get_shielded_transfer_path(&mut client)?; + client.assert_success(); + + // Send a token from Chain A to PA(B) on Chain B + let amount = Amount::native_whole(10).to_string_native(); + let height = transfer( + test_a, + ALBERT, + AB_PAYMENT_ADDRESS, + BTC, + amount, + ALBERT_KEY, + port_id_a, + channel_id_a, + Some(&file_path.to_string_lossy()), + None, + None, + false, + )?; + let events = get_events(test_a, height)?; + let packet = + get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; + check_ibc_packet_query(test_a, &"send_packet".parse().unwrap(), &packet)?; + + let height_a = query_height(test_a)?; + let proof_commitment_on_a = + get_commitment_proof(test_a, &packet, height_a)?; + let msg = MsgRecvPacket { + packet, + proof_commitment_on_a, + proof_height_on_a: height_a, + signer: signer(), + }; + // Update the client state of Chain A on Chain B + update_client_with_height(test_a, test_b, client_id_b, height_a)?; + // Receive the token on Chain B + let height = submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; + let events = get_events(test_b, height)?; + let packet = + get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; + let ack = + get_ack_from_events(&events).ok_or(eyre!("Transaction failed"))?; + check_ibc_packet_query( + test_b, + &"write_acknowledgement".parse().unwrap(), + &packet, + )?; + + // get the proof on Chain B + let height_b = query_height(test_b)?; + let proof_acked_on_b = get_ack_proof(test_b, &packet, height_b)?; + let msg = MsgAcknowledgement { + packet, + acknowledgement: ack.try_into().expect("invalid ack"), + proof_acked_on_b, + proof_height_on_b: height_b, + signer: signer(), + }; + // Update the client state of Chain B on Chain A + update_client_with_height(test_b, test_a, client_id_a, height_b)?; + // Acknowledge on Chain A + submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; + + Ok(()) +} + +fn get_shielded_transfer_path(client: &mut NamadaCmd) -> Result { + let (_unread, matched) = + client.exp_regex("Output IBC shielded transfer .*")?; + let file_path = matched.trim().split(' ').last().expect("invalid output"); + Ok(PathBuf::from_str(file_path).expect("invalid file path")) +} + fn get_commitment_proof( test: &Test, packet: &Packet, @@ -987,19 +1120,19 @@ fn submit_ibc_tx( fn transfer( test: &Test, sender: impl AsRef, - receiver: &Address, + receiver: impl AsRef, token: impl AsRef, amount: impl AsRef, signer: impl AsRef, port_id: &PortId, channel_id: &ChannelId, + memo: Option<&str>, timeout_sec: Option, expected_err: Option<&str>, wait_reveal_pk: bool, ) -> Result { let rpc = get_actor_rpc(test, &Who::Validator(0)); - let receiver = receiver.to_string(); let channel_id = channel_id.to_string(); let port_id = port_id.to_string(); let mut tx_args = vec![ @@ -1007,7 +1140,7 @@ fn transfer( "--source", sender.as_ref(), "--receiver", - &receiver, + receiver.as_ref(), "--signing-keys", signer.as_ref(), "--token", @@ -1022,13 +1155,19 @@ fn transfer( &rpc, ]; + let memo_path = memo.unwrap_or_default(); + if memo.is_some() { + tx_args.push("--memo-path"); + tx_args.push(memo_path); + } + let timeout = timeout_sec.unwrap_or_default().as_secs().to_string(); if timeout_sec.is_some() { tx_args.push("--timeout-sec-offset"); tx_args.push(&timeout); } - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + let mut client = run!(test, Bin::Client, tx_args, Some(300))?; match expected_err { Some(err) => { client.exp_string(err)?; @@ -1124,10 +1263,7 @@ fn check_ibc_update_query( client_id, &consensus_height, )) { - Ok(Some(event)) => { - println!("Found the update event: {:?}", event); - Ok(()) - } + Ok(Some(_)) => Ok(()), Ok(None) => Err(eyre!("No update event for the client {}", client_id)), Err(e) => Err(eyre!("IBC update event query failed: {}", e)), } @@ -1150,10 +1286,7 @@ fn check_ibc_packet_query( &packet.chan_id_on_b, &packet.seq_on_a, )) { - Ok(Some(event)) => { - println!("Found the packet event: {:?}", event); - Ok(()) - } + Ok(Some(_)) => Ok(()), Ok(None) => Err(eyre!("No packet event for the packet {}", packet)), Err(e) => Err(eyre!("IBC packet event query failed: {}", e)), } @@ -1297,6 +1430,33 @@ fn check_balances_after_back( Ok(()) } +/// Check balances after IBC shielded transfer +fn check_shielded_balances( + dest_port_id: &PortId, + dest_channel_id: &ChannelId, + test_b: &Test, +) -> Result<()> { + // Check the balance on Chain B + let token = find_address(test_b, BTC)?; + let denom = format!("{}/{}/{}", dest_port_id, dest_channel_id, &token); + let ibc_token = ibc_token(denom).to_string(); + let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); + let query_args = vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + &ibc_token, + "--node", + &rpc_b, + ]; + let expected = format!("{}: 10", ibc_token); + let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; + client.exp_string(&expected)?; + client.assert_success(); + Ok(()) +} + fn signer() -> Signer { "signer".to_string().into() } From 94e6c2d53a09e17005b984c0b4dfe623a1ba5357 Mon Sep 17 00:00:00 2001 From: yito88 Date: Fri, 22 Sep 2023 22:18:41 +0200 Subject: [PATCH 043/161] workaround for decoding asset types for IbcToken --- shared/src/sdk/tx.rs | 25 ++++++++++++++++++++----- tests/src/e2e/ibc_tests.rs | 1 + 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 56b05395a6..773ccbd635 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -47,7 +47,9 @@ use crate::proto::{MaspBuilder, Tx}; use crate::sdk::args::{self, InputAmount}; use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; use crate::sdk::masp::TransferErr::Build; -use crate::sdk::masp::{ShieldedContext, ShieldedTransfer, ShieldedUtils}; +use crate::sdk::masp::{ + make_asset_type, ShieldedContext, ShieldedTransfer, ShieldedUtils, +}; use crate::sdk::rpc::{ self, format_denominated_amount, query_wasm_code_hash, validate_amount, TxBroadcastData, TxResponse, @@ -2042,7 +2044,7 @@ pub async fn gen_ibc_shielded_transfer< .await .expect("expected to validate amount"); - let shielded = shielded + let shielded_transfer = shielded .gen_shielded_transfer::<_, IO>( client, &TransferSource::Address(source.clone()), @@ -2052,18 +2054,31 @@ pub async fn gen_ibc_shielded_transfer< ) .await .map_err(|err| TxError::MaspError(err.to_string()))?; + let transfer = token::Transfer { source: source.clone(), target: masp(), - token, + token: token.clone(), amount: validated_amount, key, shielded: None, }; - if let Some(shielded) = shielded { + if let Some(shielded_transfer) = shielded_transfer { + // TODO: Workaround for decoding the asset_type later + let mut asset_types = Vec::new(); + for denom in MaspDenom::iter() { + let epoch = shielded_transfer.epoch; + let asset_type = make_asset_type(Some(epoch), &token, denom)?; + shielded + .asset_types + .insert(asset_type, (token.clone(), denom, epoch)); + asset_types.push(asset_type); + } + let _ = shielded.save().await; + Ok(Some(IbcShieldedTransfer { transfer, - masp_tx: shielded.masp_tx, + masp_tx: shielded_transfer.masp_tx, })) } else { Ok(None) diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index e5f91b8c7c..01f71cb953 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -1447,6 +1447,7 @@ fn check_shielded_balances( AB_VIEWING_KEY, "--token", &ibc_token, + "--no-conversions", "--node", &rpc_b, ]; From c8f84261b1f839345046e10d498845eb9e624546 Mon Sep 17 00:00:00 2001 From: yito88 Date: Fri, 29 Sep 2023 15:14:29 +0200 Subject: [PATCH 044/161] fix after merge --- core/src/ledger/ibc/mod.rs | 12 +++++------- tests/src/e2e/ibc_tests.rs | 7 ++----- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index 277a19011b..c997e88c3b 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -159,11 +159,7 @@ where // denomination is also set for the minting. self.ctx .borrow_mut() - .store_ibc_denom( - &receiver.to_string(), - &trace_hash, - &ibc_denom, - ) + .store_ibc_denom(&receiver, &trace_hash, &ibc_denom) .map_err(|e| { Error::Denom(format!( "Writing the IBC denom failed: {}", @@ -203,7 +199,7 @@ where /// events fn get_minted_token_info( &self, - ) -> Result, Error> { + ) -> Result, Error> { let receive_event = self.ctx.borrow().get_ibc_event(EVENT_TYPE_PACKET).map_err( |_| Error::Denom("Reading the IBC event failed".to_string()), @@ -212,6 +208,7 @@ where .as_ref() .and_then(|event| event.attributes.get("receiver")) { + // Check the receiver address Some(receiver) => Some( Address::decode(receiver) .or_else(|_| { @@ -224,7 +221,8 @@ where "Decoding the receiver address failed: {:?}", receive_event )) - })?, + })? + .to_string(), ), None => None, }; diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index 01f71cb953..7eb99c7e44 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -1437,21 +1437,18 @@ fn check_shielded_balances( test_b: &Test, ) -> Result<()> { // Check the balance on Chain B - let token = find_address(test_b, BTC)?; - let denom = format!("{}/{}/{}", dest_port_id, dest_channel_id, &token); - let ibc_token = ibc_token(denom).to_string(); let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); let query_args = vec![ "balance", "--owner", AB_VIEWING_KEY, "--token", - &ibc_token, + BTC, "--no-conversions", "--node", &rpc_b, ]; - let expected = format!("{}: 10", ibc_token); + let expected = format!("{}/{}/btc: 10", dest_port_id, dest_channel_id); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); From 27f4e0a177923d1da3233e53bf897910538f4c4a Mon Sep 17 00:00:00 2001 From: yito88 Date: Fri, 29 Sep 2023 21:08:20 +0200 Subject: [PATCH 045/161] fix get_shielded_action --- core/src/ledger/ibc/mod.rs | 2 +- core/src/ledger/vp_env.rs | 60 ++++++++++++++++++-------------------- core/src/types/ibc.rs | 4 +-- 3 files changed, 32 insertions(+), 34 deletions(-) diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index c997e88c3b..c4db5fae54 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -270,7 +270,7 @@ where let event = self .ctx .borrow() - .get_ibc_event("fungible_token_packet") + .get_ibc_event(EVENT_TYPE_PACKET) .map_err(|_| { Error::MaspTx( "Reading the IBC event failed".to_string(), diff --git a/core/src/ledger/vp_env.rs b/core/src/ledger/vp_env.rs index c62cf2982d..5bac346b48 100644 --- a/core/src/ledger/vp_env.rs +++ b/core/src/ledger/vp_env.rs @@ -8,7 +8,7 @@ use super::storage_api::{self, OptionExt, ResultExt, StorageRead}; use crate::proto::Tx; use crate::types::address::Address; use crate::types::hash::Hash; -use crate::types::ibc::{get_shielded_transfer, IbcEvent}; +use crate::types::ibc::{get_shielded_transfer, IbcEvent, EVENT_TYPE_PACKET}; use crate::types::storage::{ BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, }; @@ -112,37 +112,35 @@ where tx_data: Tx, ) -> Result<(Transfer, Transaction), storage_api::Error> { let signed = tx_data; - match Transfer::try_from_slice(&signed.data().unwrap()[..]) { - Ok(transfer) => { - let shielded_hash = transfer - .shielded - .ok_or_err_msg("unable to find shielded hash")?; - let masp_tx = signed - .get_section(&shielded_hash) - .and_then(|x| x.as_ref().masp_tx()) - .ok_or_err_msg("unable to find shielded section")?; - Ok((transfer, masp_tx)) - } - Err(_) => { - if let Some(event) = - self.get_ibc_event("fungible_token_packet".to_string())? - { - if let Some(shielded) = - get_shielded_transfer(&event).into_storage_result()? - { - Ok((shielded.transfer, shielded.masp_tx)) - } else { - Err(storage_api::Error::new_const( - "No shielded transfer in the IBC event", - )) - } - } else { - Err(storage_api::Error::new_const( - "No IBC event for the shielded action", - )) - } - } + if let Ok(transfer) = + Transfer::try_from_slice(&signed.data().unwrap()[..]) + { + let shielded_hash = transfer + .shielded + .ok_or_err_msg("unable to find shielded hash")?; + let masp_tx = signed + .get_section(&shielded_hash) + .and_then(|x| x.as_ref().masp_tx()) + .ok_or_err_msg("unable to find shielded section")?; + return Ok((transfer, masp_tx)); } + + // Shielded transfer over IBC + let event = self + .get_ibc_event(EVENT_TYPE_PACKET.to_string())? + .ok_or_else(|| { + storage_api::Error::new_const( + "No IBC event for the shielded action", + ) + })?; + get_shielded_transfer(&event) + .into_storage_result()? + .map(|shielded| (shielded.transfer, shielded.masp_tx)) + .ok_or_else(|| { + storage_api::Error::new_const( + "No shielded transfer in the IBC event", + ) + }) } /// Verify a MASP transaction diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs index 6ff3c911f2..daf736df46 100644 --- a/core/src/types/ibc.rs +++ b/core/src/types/ibc.rs @@ -68,7 +68,7 @@ mod ibc_rs_conversion { use data_encoding::HEXUPPER; use thiserror::Error; - use super::{IbcEvent, IbcShieldedTransfer}; + use super::{IbcEvent, IbcShieldedTransfer, EVENT_TYPE_PACKET}; use crate::ibc::applications::transfer::{Memo, PrefixedDenom, TracePath}; use crate::ibc::core::events::{ Error as IbcEventError, IbcEvent as RawIbcEvent, @@ -146,7 +146,7 @@ mod ibc_rs_conversion { pub fn get_shielded_transfer( event: &IbcEvent, ) -> Result> { - if event.event_type != "fungible_token_packet" { + if event.event_type != EVENT_TYPE_PACKET { // This event is not for receiving a token return Ok(None); } From 68064109a072f1f3b91a8b0f940a2ef2f1dd534f Mon Sep 17 00:00:00 2001 From: yito88 Date: Fri, 29 Sep 2023 23:29:43 +0200 Subject: [PATCH 046/161] remove Address::Foreign --- apps/src/lib/cli.rs | 4 --- core/src/ledger/storage_api/account.rs | 1 - core/src/types/address.rs | 14 -------- shared/src/ledger/protocol/mod.rs | 1 - shared/src/sdk/args.rs | 2 -- shared/src/sdk/rpc.rs | 1 - shared/src/sdk/signing.rs | 7 +--- shared/src/sdk/tx.rs | 50 ++++++++++++++------------ tests/src/e2e/ibc_tests.rs | 3 -- 9 files changed, 28 insertions(+), 55 deletions(-) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 2980b21e49..66c1f73c3a 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -4773,7 +4773,6 @@ pub mod args { GenIbcShieldedTransafer:: { query: self.query.to_sdk(ctx), output_folder: self.output_folder, - sender: self.sender, target: ctx.get(&self.target), token: ctx.get(&self.token), amount: self.amount, @@ -4787,7 +4786,6 @@ pub mod args { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let output_folder = OUTPUT_FOLDER_PATH.parse(matches); - let sender = SENDER.parse(matches); let target = TRANSFER_TARGET.parse(matches); let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); @@ -4796,7 +4794,6 @@ pub mod args { Self { query, output_folder, - sender, target, token, amount, @@ -4810,7 +4807,6 @@ pub mod args { .arg(OUTPUT_FOLDER_PATH.def().help( "The output folder path where the artifact will be stored.", )) - .arg(SENDER.def().help("The foreign sender address.")) .arg(TRANSFER_TARGET.def().help("The target address.")) .arg(TOKEN.def().help("The transfer token.")) .arg(AMOUNT.def().help("The amount to transfer in decimal.")) diff --git a/core/src/ledger/storage_api/account.rs b/core/src/ledger/storage_api/account.rs index b3cdd5fb67..5fa0abb8f9 100644 --- a/core/src/ledger/storage_api/account.rs +++ b/core/src/ledger/storage_api/account.rs @@ -77,7 +77,6 @@ where } Address::Implicit(_) => Ok(true), Address::Internal(_) => Ok(false), - Address::Foreign(_) => Ok(false), } } diff --git a/core/src/types/address.rs b/core/src/types/address.rs index 32cc7de99b..ecb0670904 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -96,8 +96,6 @@ const PREFIX_ESTABLISHED: &str = "est"; const PREFIX_IMPLICIT: &str = "imp"; /// Fixed-length address strings prefix for internal addresses. const PREFIX_INTERNAL: &str = "ano"; -/// Fixed-length address strings prefix for foreign addresses. -const PREFIX_FOREIGN: &str = "for"; /// Fixed-length address strings prefix for IBC addresses. const PREFIX_IBC: &str = "ibc"; /// Fixed-length address strings prefix for Ethereum addresses. @@ -136,8 +134,6 @@ pub enum Address { Implicit(ImplicitAddress), /// An internal address represents a module with a native VP Internal(InternalAddress), - /// An foreign address is provided from other chains - Foreign(String), } // We're using the string format of addresses (bech32m) for ordering to ensure @@ -200,7 +196,6 @@ impl Address { Some(hash_hex) } Address::Internal(_) => None, - Address::Foreign(_) => None, } } @@ -259,9 +254,6 @@ impl Address { debug_assert_eq!(string.len(), FIXED_LEN_STRING_BYTES); string } - Address::Foreign(addr) => { - format!("{}::{}", PREFIX_FOREIGN, addr) - } } .into_bytes(); string.resize(FIXED_LEN_STRING_BYTES, b' '); @@ -382,9 +374,6 @@ impl Address { "Invalid ERC20 internal address".to_string(), )), }, - Some((PREFIX_FOREIGN, raw)) => { - Ok(Address::Foreign(raw.to_string())) - } _ => Err(DecodeError::InvalidInnerEncoding( ErrorKind::InvalidData, "Invalid address prefix".to_string(), @@ -408,9 +397,6 @@ impl Address { Address::Internal(kind) => { format!("Internal {}: {}", kind, self.encode()) } - Address::Foreign(_) => { - format!("Foreign: {}", self.encode()) - } } } } diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index c1a518e023..a23b026eea 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -973,7 +973,6 @@ where accepted } - Address::Foreign(_) => Ok(true), }; // Returning error from here will short-circuit the VP parallel diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index ff031d8f62..df782ca497 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -893,8 +893,6 @@ pub struct GenIbcShieldedTransafer { pub query: Query, /// The output directory path to where serialize the data pub output_folder: Option, - /// The foreign sender address - pub sender: String, /// The target address pub target: C::TransferTarget, /// The token address diff --git a/shared/src/sdk/rpc.rs b/shared/src/sdk/rpc.rs index 5c69d743d9..d98e0b61ec 100644 --- a/shared/src/sdk/rpc.rs +++ b/shared/src/sdk/rpc.rs @@ -217,7 +217,6 @@ pub async fn known_address( query_has_storage_key(client, &key).await } Address::Implicit(_) | Address::Internal(_) => Ok(true), - Address::Foreign(_) => Ok(false), } } diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 48695e9069..5ff3376c23 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -120,10 +120,6 @@ pub async fn find_pk< "Internal address {} doesn't have any signing keys.", addr )), - Address::Foreign(_) => other_err(format!( - "Foreign address {} doesn't have any signing keys.", - addr - )), } } @@ -278,8 +274,7 @@ pub async fn aux_signing_data< Some(AccountPublicKeysMap::from_iter(public_keys.clone())), 1u8, ), - Some(owner @ Address::Internal(_)) - | Some(owner @ Address::Foreign(_)) => { + Some(owner @ Address::Internal(_)) => { return Err(Error::from(TxError::InvalidAccount(owner.encode()))); } None => (None, 0u8), diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 773ccbd635..16aa79170a 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -24,7 +24,7 @@ use namada_core::ledger::governance::storage::proposal::ProposalType; use namada_core::ledger::governance::storage::vote::StorageProposalVote; use namada_core::ledger::ibc::storage::channel_key; use namada_core::ledger::pgf::cli::steward::Commission; -use namada_core::types::address::{masp, Address}; +use namada_core::types::address::{masp, Address, InternalAddress}; use namada_core::types::dec::Dec; use namada_core::types::hash::Hash; use namada_core::types::token::MaspDenom; @@ -2023,7 +2023,7 @@ pub async fn gen_ibc_shielded_transfer< Some(_) => None, None => return Ok(None), }; - let source = Address::Foreign(args.sender.clone()); + let source = Address::Internal(InternalAddress::Ibc); let (src_port_id, src_channel_id) = get_ibc_src_port_channel(client, &args.port_id, &args.channel_id) .await?; @@ -2094,30 +2094,34 @@ async fn get_ibc_src_port_channel( use crate::ibc_proto::protobuf::Protobuf; let channel_key = channel_key(dest_port_id, dest_channel_id); - match rpc::query_storage_value_bytes::(client, &channel_key, None, false) - .await - { - Ok((Some(bytes), _)) => { - let channel = ChannelEnd::decode_vec(&bytes).map_err(|_| { + let bytes = + rpc::query_storage_value_bytes::(client, &channel_key, None, false) + .await? + .0 + .ok_or_else(|| { Error::Other(format!( - "Decoding channel end failed: port {}, channel {}", - dest_port_id, dest_channel_id + "No channel end: port {dest_port_id}, channel \ + {dest_channel_id}" )) })?; - if let Some(src_channel) = channel.remote.channel_id() { - Ok((channel.remote.port_id.clone(), src_channel.clone())) - } else { - Err(Error::Other(format!( - "The source channel doesn't exist: port {dest_port_id}, \ - channel {dest_channel_id}" - ))) - } - } - _ => Err(Error::Other(format!( - "Reading channel end failed: port {dest_port_id}, channel \ - {dest_channel_id}" - ))), - } + let channel = ChannelEnd::decode_vec(&bytes).map_err(|_| { + Error::Other(format!( + "Decoding channel end failed: port {dest_port_id}, channel \ + {dest_channel_id}", + )) + })?; + channel + .remote + .channel_id() + .map(|src_channel| { + (channel.remote.port_id.clone(), src_channel.clone()) + }) + .ok_or_else(|| { + Error::Other(format!( + "The source channel doesn't exist: port {dest_port_id}, \ + channel {dest_channel_id}" + )) + }) } async fn expect_dry_broadcast< diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index 7eb99c7e44..584974c61a 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -929,7 +929,6 @@ fn shielded_transfer( ) -> Result<()> { // Get masp proof for the following IBC transfer from the destination chain // It will send 10 BTC from Chain A to PA(B) on Chain B - let sender = find_address(test_a, ALBERT)?; let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); let output_folder = test_b.test_dir.path().to_string_lossy(); let amount = Amount::native_whole(10).to_string_native(); @@ -937,8 +936,6 @@ fn shielded_transfer( "ibc-gen-shielded", "--output-folder-path", &output_folder, - "--sender", - &sender.to_string(), "--target", AB_PAYMENT_ADDRESS, "--token", From 7c7ee029a1e91cd21c41e3c8633a0d4224075b69 Mon Sep 17 00:00:00 2001 From: brentstone Date: Mon, 2 Oct 2023 14:04:50 -0600 Subject: [PATCH 047/161] keep validator eth keys for max proposal period --- proof_of_stake/src/types.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index d3ac47ac29..c7d666bdd3 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -44,14 +44,14 @@ pub type ValidatorConsensusKeys = crate::epoched::Epoched< pub type ValidatorEthHotKeys = crate::epoched::Epoched< common::PublicKey, crate::epoched::OffsetPipelineLen, - crate::epoched::OffsetDefaultNumPastEpochs, + crate::epoched::OffsetMaxProposalPeriodPlus, >; /// Epoched validator's eth cold key. pub type ValidatorEthColdKeys = crate::epoched::Epoched< common::PublicKey, crate::epoched::OffsetPipelineLen, - crate::epoched::OffsetDefaultNumPastEpochs, + crate::epoched::OffsetMaxProposalPeriodPlus, >; /// Epoched validator's state. From 85770beec8bd02d7fb0686611b926192e3c55c81 Mon Sep 17 00:00:00 2001 From: brentstone Date: Mon, 2 Oct 2023 14:05:29 -0600 Subject: [PATCH 048/161] clear old enqueued slashes when processing slashes --- proof_of_stake/src/epoched.rs | 25 +++++++++++++++++++++++++ proof_of_stake/src/lib.rs | 3 +++ proof_of_stake/src/types.rs | 2 +- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 8b86f4dbd4..0f66a36d84 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -877,6 +877,29 @@ impl EpochOffset for OffsetSlashProcessingLen { } } +/// Offset at the slash processing delay plus the default num past epochs. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetSlashProcessingLenPlus; +impl EpochOffset for OffsetSlashProcessingLenPlus { + fn value(params: &PosParams) -> u64 { + params.slash_processing_epoch_offset() + DEFAULT_NUM_PAST_EPOCHS + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::SlashProcessingLenPlus + } +} + /// Maximum offset. #[derive( Debug, @@ -1018,6 +1041,8 @@ pub enum DynEpochOffset { /// Offset at slash processing delay (unbonding + /// cubic_slashing_window + 1). SlashProcessingLen, + /// Offset at slash processing delay plus the defaul num past epochs + SlashProcessingLenPlus, /// Offset at the max proposal period MaxProposalPeriod, /// Offset at the max proposal period plus the default num past epochs diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 6f39fc6527..223abdc1fe 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -3689,6 +3689,9 @@ where cur_slashes.push(updated_slash); } + // Update the epochs of enqueued slashes in storage + enqueued_slashes_handle().update_data(storage, ¶ms, current_epoch)?; + let mut deltas_for_update: HashMap> = HashMap::new(); diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index c7d666bdd3..68a89c86cb 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -140,7 +140,7 @@ pub type ValidatorSlashes = NestedMap; pub type EpochedSlashes = crate::epoched::NestedEpoched< ValidatorSlashes, crate::epoched::OffsetUnbondingLen, - crate::epoched::OffsetSlashProcessingLen, + crate::epoched::OffsetSlashProcessingLenPlus, >; /// Epoched validator's unbonds From 59d84653729264b86847304f3b6d4dd69af7356b Mon Sep 17 00:00:00 2001 From: brentstone Date: Mon, 2 Oct 2023 14:07:09 -0600 Subject: [PATCH 049/161] keep `total_deltas` for same period of time as `validator_deltas` --- proof_of_stake/src/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 68a89c86cb..3cc24ffbbb 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -104,7 +104,7 @@ pub type ValidatorDeltas = crate::epoched::EpochedDelta< pub type TotalDeltas = crate::epoched::EpochedDelta< token::Change, crate::epoched::OffsetUnbondingLen, - crate::epoched::OffsetSlashProcessingLen, + crate::epoched::OffsetMaxProposalPeriodOrSlashProcessingLenPlus, >; /// Epoched validator commission rate From 6656f1b276563998475d511e36ff6b8619ea7e57 Mon Sep 17 00:00:00 2001 From: yito88 Date: Mon, 2 Oct 2023 22:41:16 +0200 Subject: [PATCH 050/161] fix get_ibc_event --- core/src/ledger/ibc/context/storage.rs | 6 +- core/src/ledger/ibc/mod.rs | 26 ++++++--- core/src/ledger/tx_env.rs | 6 +- core/src/ledger/vp_env.rs | 22 +++---- shared/src/ledger/native_vp/ibc/context.rs | 20 +++---- shared/src/ledger/native_vp/mod.rs | 6 +- shared/src/ledger/vp_host_fns.rs | 16 +++--- shared/src/vm/host_env.rs | 67 +++++++++++----------- shared/src/vm/wasm/host_env.rs | 4 +- tests/src/vm_host_env/tx.rs | 2 +- tx_prelude/src/ibc.rs | 6 +- tx_prelude/src/lib.rs | 14 ++--- vm_env/src/lib.rs | 6 +- vp_prelude/src/lib.rs | 13 +++-- 14 files changed, 110 insertions(+), 104 deletions(-) diff --git a/core/src/ledger/ibc/context/storage.rs b/core/src/ledger/ibc/context/storage.rs index ef8b61116d..ef24cd94f2 100644 --- a/core/src/ledger/ibc/context/storage.rs +++ b/core/src/ledger/ibc/context/storage.rs @@ -55,11 +55,11 @@ pub trait IbcStorageContext { /// Emit an IBC event fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), Self::Error>; - /// Get an IBC event - fn get_ibc_event( + /// Get IBC events + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, Self::Error>; + ) -> Result, Self::Error>; /// Transfer token fn transfer_token( diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index c4db5fae54..c78afeabd6 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -200,11 +200,16 @@ where fn get_minted_token_info( &self, ) -> Result, Error> { - let receive_event = - self.ctx.borrow().get_ibc_event(EVENT_TYPE_PACKET).map_err( - |_| Error::Denom("Reading the IBC event failed".to_string()), - )?; + let receive_event = self + .ctx + .borrow() + .get_ibc_events(EVENT_TYPE_PACKET) + .map_err(|_| { + Error::Denom("Reading the IBC event failed".to_string()) + })?; + // The receiving event should be only one in the single IBC transaction let receiver = match receive_event + .first() .as_ref() .and_then(|event| event.attributes.get("receiver")) { @@ -229,11 +234,12 @@ where let denom_event = self .ctx .borrow() - .get_ibc_event(EVENT_TYPE_DENOM_TRACE) + .get_ibc_events(EVENT_TYPE_DENOM_TRACE) .map_err(|_| { Error::Denom("Reading the IBC event failed".to_string()) })?; - Ok(denom_event.as_ref().and_then(|event| { + // The denom event should be only one in the single IBC transaction + Ok(denom_event.first().as_ref().and_then(|event| { let trace_hash = event.attributes.get("trace_hash").cloned()?; let denom = event.attributes.get("denom").cloned()?; Some((trace_hash, denom, receiver?)) @@ -270,14 +276,16 @@ where let event = self .ctx .borrow() - .get_ibc_event(EVENT_TYPE_PACKET) + .get_ibc_events(EVENT_TYPE_PACKET) .map_err(|_| { Error::MaspTx( "Reading the IBC event failed".to_string(), ) })?; - match event { - Some(event) => get_shielded_transfer(&event) + // The receiving event should be only one in the single IBC + // transaction + match event.first() { + Some(event) => get_shielded_transfer(event) .map_err(|e| Error::MaspTx(e.to_string()))?, None => return Ok(()), } diff --git a/core/src/ledger/tx_env.rs b/core/src/ledger/tx_env.rs index 61b7824e64..ad8b23e60c 100644 --- a/core/src/ledger/tx_env.rs +++ b/core/src/ledger/tx_env.rs @@ -59,9 +59,9 @@ pub trait TxEnv: StorageRead + StorageWrite { /// Request to charge the provided amount of gas for the current transaction fn charge_gas(&mut self, used_gas: u64) -> Result<(), storage_api::Error>; - /// Get an IBC event with a event type - fn get_ibc_event( + /// Get IBC events with a event type + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, storage_api::Error>; + ) -> Result, storage_api::Error>; } diff --git a/core/src/ledger/vp_env.rs b/core/src/ledger/vp_env.rs index 5bac346b48..1241adfed7 100644 --- a/core/src/ledger/vp_env.rs +++ b/core/src/ledger/vp_env.rs @@ -78,11 +78,11 @@ where /// Get the address of the native token. fn get_native_token(&self) -> Result; - /// Get the IBC event. - fn get_ibc_event( + /// Get the IBC events. + fn get_ibc_events( &self, event_type: String, - ) -> Result, storage_api::Error>; + ) -> Result, storage_api::Error>; /// Storage prefix iterator, ordered by storage keys. It will try to get an /// iterator from the storage. @@ -126,14 +126,14 @@ where } // Shielded transfer over IBC - let event = self - .get_ibc_event(EVENT_TYPE_PACKET.to_string())? - .ok_or_else(|| { - storage_api::Error::new_const( - "No IBC event for the shielded action", - ) - })?; - get_shielded_transfer(&event) + let events = self.get_ibc_events(EVENT_TYPE_PACKET.to_string())?; + // The receiving event should be only one in the single IBC transaction + let event = events.first().ok_or_else(|| { + storage_api::Error::new_const( + "No IBC event for the shielded action", + ) + })?; + get_shielded_transfer(event) .into_storage_result()? .map(|shielded| (shielded.transfer, shielded.masp_tx)) .ok_or_else(|| { diff --git a/shared/src/ledger/native_vp/ibc/context.rs b/shared/src/ledger/native_vp/ibc/context.rs index 93784b8054..0043737818 100644 --- a/shared/src/ledger/native_vp/ibc/context.rs +++ b/shared/src/ledger/native_vp/ibc/context.rs @@ -124,16 +124,16 @@ where Ok(()) } - fn get_ibc_event( + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, Self::Error> { - for event in &self.event { - if event.event_type == *event_type.as_ref() { - return Ok(Some(event.clone())); - } - } - Ok(None) + ) -> Result, Self::Error> { + Ok(self + .event + .iter() + .filter(|event| event.event_type == *event_type.as_ref()) + .cloned() + .collect()) } fn transfer_token( @@ -381,10 +381,10 @@ where unimplemented!("Validation doesn't emit an event") } - fn get_ibc_event( + fn get_ibc_events( &self, _event_type: impl AsRef, - ) -> Result, Self::Error> { + ) -> Result, Self::Error> { unimplemented!("Validation doesn't get an event") } diff --git a/shared/src/ledger/native_vp/mod.rs b/shared/src/ledger/native_vp/mod.rs index 002c256eb1..a75543718c 100644 --- a/shared/src/ledger/native_vp/mod.rs +++ b/shared/src/ledger/native_vp/mod.rs @@ -450,11 +450,11 @@ where .into_storage_result() } - fn get_ibc_event( + fn get_ibc_events( &self, event_type: String, - ) -> Result, storage_api::Error> { - vp_host_fns::get_ibc_event( + ) -> Result, storage_api::Error> { + vp_host_fns::get_ibc_events( &mut self.gas_meter.borrow_mut(), self.write_log, event_type, diff --git a/shared/src/ledger/vp_host_fns.rs b/shared/src/ledger/vp_host_fns.rs index c8e1bd12d7..ebe32d4f72 100644 --- a/shared/src/ledger/vp_host_fns.rs +++ b/shared/src/ledger/vp_host_fns.rs @@ -335,17 +335,17 @@ where } /// Getting the IBC event. -pub fn get_ibc_event( +pub fn get_ibc_events( _gas_meter: &mut VpGasMeter, write_log: &WriteLog, event_type: String, -) -> EnvResult> { - for event in write_log.get_ibc_events() { - if event.event_type == event_type { - return Ok(Some(event.clone())); - } - } - Ok(None) +) -> EnvResult> { + Ok(write_log + .get_ibc_events() + .iter() + .filter(|event| event.event_type == event_type) + .cloned() + .collect()) } /// Storage prefix iterator for prior state (before tx execution), ordered by diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index fd4129501d..65bcce69b0 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -977,7 +977,7 @@ where } /// Getting an IBC event function exposed to the wasm VM Tx environment. -pub fn tx_get_ibc_event( +pub fn tx_get_ibc_events( env: &TxVmEnv, event_type_ptr: u64, event_type_len: u64, @@ -994,20 +994,20 @@ where .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; tx_charge_gas(env, gas)?; let write_log = unsafe { env.ctx.write_log.get() }; - for event in write_log.get_ibc_events() { - if event.event_type == event_type { - let value = - event.try_to_vec().map_err(TxRuntimeError::EncodingError)?; - let len: i64 = value - .len() - .try_into() - .map_err(TxRuntimeError::NumConversionError)?; - let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(value); - return Ok(len); - } - } - Ok(HostEnvResult::Fail.to_i64()) + let events: Vec = write_log + .get_ibc_events() + .iter() + .filter(|event| event.event_type == event_type) + .cloned() + .collect(); + let value = events.try_to_vec().map_err(TxRuntimeError::EncodingError)?; + let len: i64 = value + .len() + .try_into() + .map_err(TxRuntimeError::NumConversionError)?; + let result_buffer = unsafe { env.ctx.result_buffer.get() }; + result_buffer.replace(value); + Ok(len) } /// Storage read prior state (before tx execution) function exposed to the wasm @@ -1781,7 +1781,7 @@ where } /// Getting the IBC event function exposed to the wasm VM VP environment. -pub fn vp_get_ibc_event( +pub fn vp_get_ibc_events( env: &VpVmEnv, event_type_ptr: u64, event_type_len: u64, @@ -1801,21 +1801,17 @@ where vp_host_fns::add_gas(gas_meter, gas)?; let write_log = unsafe { env.ctx.write_log.get() }; - match vp_host_fns::get_ibc_event(gas_meter, write_log, event_type)? { - Some(event) => { - let value = event - .try_to_vec() - .map_err(vp_host_fns::RuntimeError::EncodingError)?; - let len: i64 = value - .len() - .try_into() - .map_err(vp_host_fns::RuntimeError::NumConversionError)?; - let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(value); - Ok(len) - } - None => Ok(HostEnvResult::Fail.to_i64()), - } + let events = vp_host_fns::get_ibc_events(gas_meter, write_log, event_type)?; + let value = events + .try_to_vec() + .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let len: i64 = value + .len() + .try_into() + .map_err(vp_host_fns::RuntimeError::NumConversionError)?; + let result_buffer = unsafe { env.ctx.result_buffer.get() }; + result_buffer.replace(value); + Ok(len) } /// Verify a transaction signature @@ -2245,16 +2241,17 @@ where ibc_tx_charge_gas(self, gas) } - fn get_ibc_event( + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, Self::Error> { + ) -> Result, Self::Error> { let write_log = unsafe { self.write_log.get() }; Ok(write_log .get_ibc_events() .iter() - .find(|event| event.event_type == event_type.as_ref()) - .cloned()) + .filter(|event| event.event_type == event_type.as_ref()) + .cloned() + .collect()) } fn transfer_token( diff --git a/shared/src/vm/wasm/host_env.rs b/shared/src/vm/wasm/host_env.rs index e5ebce6ee2..e9a63e631e 100644 --- a/shared/src/vm/wasm/host_env.rs +++ b/shared/src/vm/wasm/host_env.rs @@ -76,7 +76,7 @@ where "namada_tx_update_validity_predicate" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_update_validity_predicate), "namada_tx_init_account" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_init_account), "namada_tx_emit_ibc_event" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_emit_ibc_event), - "namada_tx_get_ibc_event" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_ibc_event), + "namada_tx_get_ibc_events" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_ibc_events), "namada_tx_get_chain_id" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_chain_id), "namada_tx_get_tx_index" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_tx_index), "namada_tx_get_block_height" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_block_height), @@ -127,7 +127,7 @@ where "namada_vp_get_block_hash" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_hash), "namada_vp_get_tx_code_hash" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_tx_code_hash), "namada_vp_get_block_epoch" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_epoch), - "namada_vp_get_ibc_event" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_ibc_event), + "namada_vp_get_ibc_events" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_ibc_events), "namada_vp_verify_tx_section_signature" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_verify_tx_section_signature), "namada_vp_verify_masp" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_verify_masp), "namada_vp_eval" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_eval), diff --git a/tests/src/vm_host_env/tx.rs b/tests/src/vm_host_env/tx.rs index 582b668b79..ad425d09fd 100644 --- a/tests/src/vm_host_env/tx.rs +++ b/tests/src/vm_host_env/tx.rs @@ -447,7 +447,7 @@ mod native_tx_host_env { result_ptr: u64 )); native_host_fn!(tx_emit_ibc_event(event_ptr: u64, event_len: u64)); - native_host_fn!(tx_get_ibc_event(event_type_ptr: u64, event_type_len: u64) -> i64); + native_host_fn!(tx_get_ibc_events(event_type_ptr: u64, event_type_len: u64) -> i64); native_host_fn!(tx_get_chain_id(result_ptr: u64)); native_host_fn!(tx_get_block_height() -> u64); native_host_fn!(tx_get_tx_index() -> u32); diff --git a/tx_prelude/src/ibc.rs b/tx_prelude/src/ibc.rs index 5e8e9522fa..2f19c24a82 100644 --- a/tx_prelude/src/ibc.rs +++ b/tx_prelude/src/ibc.rs @@ -75,11 +75,11 @@ impl IbcStorageContext for Ctx { ::emit_ibc_event(self, &event) } - fn get_ibc_event( + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, Self::Error> { - ::get_ibc_event(self, &event_type) + ) -> Result, Self::Error> { + ::get_ibc_events(self, &event_type) } fn transfer_token( diff --git a/tx_prelude/src/lib.rs b/tx_prelude/src/lib.rs index 87d91e07a3..8ec36a7b30 100644 --- a/tx_prelude/src/lib.rs +++ b/tx_prelude/src/lib.rs @@ -331,23 +331,21 @@ impl TxEnv for Ctx { Ok(()) } - fn get_ibc_event( + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, Error> { + ) -> Result, Error> { let event_type = event_type.as_ref().to_string(); let read_result = unsafe { - namada_tx_get_ibc_event( + namada_tx_get_ibc_events( event_type.as_ptr() as _, event_type.len() as _, ) }; match read_from_buffer(read_result, namada_tx_result_buffer) { - Some(value) => Ok(Some( - ibc::IbcEvent::try_from_slice(&value[..]) - .expect("The conversion shouldn't fail"), - )), - None => Ok(None), + Some(value) => Ok(Vec::::try_from_slice(&value[..]) + .expect("The conversion shouldn't fail")), + None => Ok(Vec::new()), } } } diff --git a/vm_env/src/lib.rs b/vm_env/src/lib.rs index edf00f847c..baadd56182 100644 --- a/vm_env/src/lib.rs +++ b/vm_env/src/lib.rs @@ -78,8 +78,8 @@ pub mod tx { // Emit an IBC event pub fn namada_tx_emit_ibc_event(event_ptr: u64, event_len: u64); - // Get an IBC event - pub fn namada_tx_get_ibc_event( + // Get IBC events + pub fn namada_tx_get_ibc_events( event_type_ptr: u64, event_type_len: u64, ) -> i64; @@ -198,7 +198,7 @@ pub mod vp { pub fn namada_vp_get_native_token(result_ptr: u64); // Get the IBC event - pub fn namada_vp_get_ibc_event( + pub fn namada_vp_get_ibc_events( event_type_ptr: u64, event_type_len: u64, ) -> i64; diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index 9857574acd..60618cfacf 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -297,18 +297,21 @@ impl<'view> VpEnv<'view> for Ctx { get_native_token() } - fn get_ibc_event( + fn get_ibc_events( &self, event_type: String, - ) -> Result, Error> { + ) -> Result, Error> { let read_result = unsafe { - namada_vp_get_ibc_event( + namada_vp_get_ibc_events( event_type.as_ptr() as _, event_type.len() as _, ) }; - Ok(read_from_buffer(read_result, namada_vp_result_buffer) - .and_then(|t| ibc::IbcEvent::try_from_slice(&t[..]).ok())) + match read_from_buffer(read_result, namada_vp_result_buffer) { + Some(value) => Ok(Vec::::try_from_slice(&value[..]) + .expect("The conversion shouldn't fail")), + None => Ok(Vec::new()), + } } fn iter_prefix<'iter>( From e471b6c17f6751575ee03eca5864506fd6490c26 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Wed, 27 Sep 2023 17:18:57 +0200 Subject: [PATCH 051/161] Moved FsShieldedUtils into the SDK behind a feature flag. --- apps/src/lib/cli/context.rs | 6 +- apps/src/lib/client/tx.rs | 197 +----- benches/lib.rs | 18 +- shared/src/ledger/eth_bridge/bridge_pool.rs | 12 +- shared/src/ledger/mod.rs | 80 ++- shared/src/sdk/args.rs | 688 +++++++++++++++----- shared/src/sdk/masp.rs | 138 +++- shared/src/sdk/signing.rs | 49 +- shared/src/sdk/tx.rs | 299 +++++---- tests/src/e2e/ledger_tests.rs | 7 +- tests/src/integration/masp.rs | 11 +- 11 files changed, 969 insertions(+), 536 deletions(-) diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index 4aac8b1026..f2efec7fee 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -8,6 +8,7 @@ use std::str::FromStr; use color_eyre::eyre::Result; use namada::sdk::masp::ShieldedContext; use namada::sdk::wallet::Wallet; +use namada::sdk::masp::fs::FsShieldedUtils; use namada::types::address::{Address, InternalAddress}; use namada::types::chain::ChainId; use namada::types::ethereum_events::EthAddress; @@ -16,7 +17,6 @@ use namada::types::key::*; use namada::types::masp::*; use super::args; -use crate::client::tx::CLIShieldedUtils; #[cfg(any(test, feature = "dev"))] use crate::config::genesis; use crate::config::genesis::genesis_config; @@ -78,7 +78,7 @@ pub struct Context { /// The ledger configuration for a specific chain ID pub config: Config, /// The context fr shielded operations - pub shielded: ShieldedContext, + pub shielded: ShieldedContext, /// Native token's address pub native_token: Address, } @@ -145,7 +145,7 @@ impl Context { wallet, global_config, config, - shielded: CLIShieldedUtils::new::(chain_dir), + shielded: FsShieldedUtils::new(chain_dir), native_token, }) } diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 4b2aa9b865..1289492faa 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -1,11 +1,5 @@ -use std::env; -use std::fmt::Debug; -use std::fs::{File, OpenOptions}; -use std::io::{Read, Write}; -use std::path::PathBuf; - -use borsh::{BorshDeserialize, BorshSerialize}; -use masp_proofs::prover::LocalTxProver; +use std::fs::File; + use namada::core::ledger::governance::cli::offline::{ OfflineProposal, OfflineSignedProposal, OfflineVote, }; @@ -13,11 +7,12 @@ use namada::core::ledger::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, }; use namada::ledger::pos; -use namada::proof_of_stake::parameters::PosParams; -use namada::proto::Tx; use namada::sdk::rpc::{TxBroadcastData, TxResponse}; use namada::sdk::wallet::{Wallet, WalletUtils}; -use namada::sdk::{error, masp, signing, tx}; +use namada::ledger::{Namada, NamadaImpl}; +use namada::proof_of_stake::parameters::PosParams; +use namada::proto::Tx; +use namada::sdk::{error, signing, tx}; use namada::tendermint_rpc::HttpClient; use namada::types::address::{Address, ImplicitAddress}; use namada::types::dec::Dec; @@ -36,8 +31,6 @@ use crate::node::ledger::tendermint_node; use crate::wallet::{ gen_validator_keys, read_and_confirm_encryption_password, CliWalletUtils, }; -use namada::ledger::NamadaImpl; -use namada::ledger::Namada; use namada::types::io::StdIo; /// Wrapper around `signing::aux_signing_data` that stores the optional @@ -49,8 +42,7 @@ pub async fn aux_signing_data<'a>( default_signer: Option
, ) -> Result { let signing_data = - signing::aux_signing_data(context, args, owner, default_signer) - .await?; + signing::aux_signing_data(context, args, owner, default_signer).await?; if args.disposable_signing_key { if !(args.dry_run || args.dry_run_wrapper) { @@ -91,14 +83,11 @@ pub async fn submit_reveal_aux<'a>( if tx::is_reveal_pk_needed(context.client, address, args.force).await? { println!( - "Submitting a tx to reveal the public key for address {address}..." + "Submitting a tx to reveal the public key for address \ + {address}..." ); - let (mut tx, signing_data, _epoch) = tx::build_reveal_pk( - context, - &args, - &public_key, - ) - .await?; + let (mut tx, signing_data, _epoch) = + tx::build_reveal_pk(context, &args, &public_key).await?; signing::generate_test_vector(context, &tx).await?; @@ -123,7 +112,7 @@ where let mut namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); submit_reveal_aux(&mut namada, args.tx.clone(), &args.owner).await?; - + let (mut tx, signing_data, _epoch) = args.build(&mut namada).await?; signing::generate_test_vector(&mut namada, &tx).await?; @@ -174,8 +163,8 @@ where { let mut namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - let (mut tx, signing_data, _epoch) = tx::build_init_account(&mut namada, &args) - .await?; + let (mut tx, signing_data, _epoch) = + tx::build_init_account(&mut namada, &args).await?; signing::generate_test_vector(&mut namada, &tx).await?; @@ -507,122 +496,7 @@ where Ok(()) } -/// Shielded context file name -const FILE_NAME: &str = "shielded.dat"; -const TMP_FILE_NAME: &str = "shielded.tmp"; - -#[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] -pub struct CLIShieldedUtils { - #[borsh_skip] - context_dir: PathBuf, -} - -impl CLIShieldedUtils { - /// Initialize a shielded transaction context that identifies notes - /// decryptable by any viewing key in the given set - pub fn new(context_dir: PathBuf) -> masp::ShieldedContext { - // Make sure that MASP parameters are downloaded to enable MASP - // transaction building and verification later on - let params_dir = masp::get_params_dir(); - let spend_path = params_dir.join(masp::SPEND_NAME); - let convert_path = params_dir.join(masp::CONVERT_NAME); - let output_path = params_dir.join(masp::OUTPUT_NAME); - if !(spend_path.exists() - && convert_path.exists() - && output_path.exists()) - { - display_line!(IO, "MASP parameters not present, downloading..."); - masp_proofs::download_masp_parameters(None) - .expect("MASP parameters not present or downloadable"); - display_line!( - IO, - "MASP parameter download complete, resuming execution..." - ); - } - // Finally initialize a shielded context with the supplied directory - let utils = Self { context_dir }; - masp::ShieldedContext { - utils, - ..Default::default() - } - } -} - -impl Default for CLIShieldedUtils { - fn default() -> Self { - Self { - context_dir: PathBuf::from(FILE_NAME), - } - } -} - -#[cfg_attr(feature = "async-send", async_trait::async_trait)] -#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] -impl masp::ShieldedUtils for CLIShieldedUtils { - fn local_tx_prover(&self) -> LocalTxProver { - if let Ok(params_dir) = env::var(masp::ENV_VAR_MASP_PARAMS_DIR) { - let params_dir = PathBuf::from(params_dir); - let spend_path = params_dir.join(masp::SPEND_NAME); - let convert_path = params_dir.join(masp::CONVERT_NAME); - let output_path = params_dir.join(masp::OUTPUT_NAME); - LocalTxProver::new(&spend_path, &output_path, &convert_path) - } else { - LocalTxProver::with_default_location() - .expect("unable to load MASP Parameters") - } - } - - /// Try to load the last saved shielded context from the given context - /// directory. If this fails, then leave the current context unchanged. - async fn load(self) -> std::io::Result> { - // Try to load shielded context from file - let mut ctx_file = File::open(self.context_dir.join(FILE_NAME))?; - let mut bytes = Vec::new(); - ctx_file.read_to_end(&mut bytes)?; - let mut new_ctx = masp::ShieldedContext::deserialize(&mut &bytes[..])?; - // Associate the originating context directory with the - // shielded context under construction - new_ctx.utils = self; - Ok(new_ctx) - } - - /// Save this shielded context into its associated context directory - async fn save( - &self, - ctx: &masp::ShieldedContext, - ) -> std::io::Result<()> { - // TODO: use mktemp crate? - let tmp_path = self.context_dir.join(TMP_FILE_NAME); - { - // First serialize the shielded context into a temporary file. - // Inability to create this file implies a simultaneuous write is in - // progress. In this case, immediately fail. This is unproblematic - // because the data intended to be stored can always be re-fetched - // from the blockchain. - let mut ctx_file = OpenOptions::new() - .write(true) - .create_new(true) - .open(tmp_path.clone())?; - let mut bytes = Vec::new(); - ctx.serialize(&mut bytes) - .expect("cannot serialize shielded context"); - ctx_file.write_all(&bytes[..])?; - } - // Atomically update the old shielded context file with new data. - // Atomicity is required to prevent other client instances from reading - // corrupt data. - std::fs::rename(tmp_path.clone(), self.context_dir.join(FILE_NAME))?; - // Finally, remove our temporary file to allow future saving of shielded - // contexts. - std::fs::remove_file(tmp_path)?; - Ok(()) - } -} - -pub async fn submit_transfer< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( +pub async fn submit_transfer( client: &C, mut ctx: Context, args: args::TxTransfer, @@ -630,7 +504,7 @@ pub async fn submit_transfer< for _ in 0..2 { let mut namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - + submit_reveal_aux( &mut namada, args.tx.clone(), @@ -638,7 +512,8 @@ pub async fn submit_transfer< ) .await?; - let (mut tx, signing_data, tx_epoch) = args.clone().build(&mut namada).await?; + let (mut tx, signing_data, tx_epoch) = + args.clone().build(&mut namada).await?; signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { @@ -715,8 +590,7 @@ where let governance_parameters = rpc::query_governance_parameters(client).await; let mut namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args - .is_offline + let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline { let proposal = OfflineProposal::try_from(args.proposal_data.as_ref()) .map_err(|e| { @@ -768,12 +642,7 @@ where ) .await?; - tx::build_pgf_funding_proposal( - &mut namada, - &args, - proposal, - ) - .await? + tx::build_pgf_funding_proposal(&mut namada, &args, proposal).await? } else if args.is_pgf_stewards { let proposal = PgfStewardProposal::try_from( args.proposal_data.as_ref(), @@ -803,12 +672,7 @@ where ) .await?; - tx::build_pgf_stewards_proposal( - &mut namada, - &args, - proposal, - ) - .await? + tx::build_pgf_stewards_proposal(&mut namada, &args, proposal).await? } else { let proposal = DefaultProposal::try_from(args.proposal_data.as_ref()) .map_err(|e| { @@ -836,12 +700,7 @@ where ) .await?; - tx::build_default_proposal( - &mut namada, - &args, - proposal, - ) - .await? + tx::build_default_proposal(&mut namada, &args, proposal).await? }; signing::generate_test_vector(&mut namada, &tx_builder).await?; @@ -866,7 +725,8 @@ where { let mut namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline { + let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline + { let default_signer = Some(args.voter.clone()); let signing_data = aux_signing_data( &mut namada, @@ -874,8 +734,8 @@ where Some(args.voter.clone()), default_signer.clone(), ) - .await?; - + .await?; + let proposal_vote = ProposalVote::try_from(args.vote) .map_err(|_| error::TxError::InvalidProposalVote)?; @@ -1049,14 +909,15 @@ where let default_address = args.source.clone().unwrap_or(args.validator.clone()); submit_reveal_aux(&mut namada, args.tx.clone(), &default_address).await?; - let (mut tx, signing_data, _fee_unshield_epoch) = args.build(&mut namada).await?; + let (mut tx, signing_data, _fee_unshield_epoch) = + args.build(&mut namada).await?; signing::generate_test_vector(&mut namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data)?; - + namada.submit(tx, &args.tx).await?; } diff --git a/benches/lib.rs b/benches/lib.rs index b420d24a43..a816451477 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -69,13 +69,14 @@ use namada::ledger::queries::{ Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada::ledger::storage_api::StorageRead; +use namada::sdk::wallet::Wallet; +use namada::ledger::NamadaImpl; use namada::proof_of_stake; use namada::proto::{Code, Data, Section, Signature, Tx}; use namada::sdk::args::InputAmount; use namada::sdk::masp::{ self, ShieldedContext, ShieldedTransfer, ShieldedUtils, }; -use namada::sdk::wallet::Wallet; use namada::tendermint::Hash; use namada::tendermint_rpc::{self}; use namada::types::address::InternalAddress; @@ -104,7 +105,6 @@ use namada_test_utils::tx_data::TxWriteData; use rand_core::OsRng; use sha2::{Digest, Sha256}; use tempfile::TempDir; -use namada::ledger::NamadaImpl; pub const WASM_DIR: &str = "../wasm"; pub const TX_BOND_WASM: &str = "tx_bond.wasm"; @@ -804,13 +804,15 @@ impl BenchShieldedCtx { &[], )) .unwrap(); - let mut namada = NamadaImpl::new( - &self.shell, - &mut self.wallet, - &mut self.shielded, - ); + let mut namada = + NamadaImpl::new(&self.shell, &mut self.wallet, &mut self.shielded); let shielded = async_runtime - .block_on(ShieldedContext::::gen_shielded_transfer(&mut namada, &args)) + .block_on( + ShieldedContext::::gen_shielded_transfer( + &mut namada, + &args, + ), + ) .unwrap() .map( |ShieldedTransfer { diff --git a/shared/src/ledger/eth_bridge/bridge_pool.rs b/shared/src/ledger/eth_bridge/bridge_pool.rs index 38cfcd51cb..430537fdc2 100644 --- a/shared/src/ledger/eth_bridge/bridge_pool.rs +++ b/shared/src/ledger/eth_bridge/bridge_pool.rs @@ -19,11 +19,12 @@ use crate::ledger::queries::{ Client, GenBridgePoolProofReq, GenBridgePoolProofRsp, TransferToErcArgs, RPC, }; +use crate::sdk::rpc::{query_wasm_code_hash, validate_amount}; +use crate::ledger::signing::aux_signing_data; +use crate::ledger::tx::prepare_tx; +use crate::ledger::{args, Namada, SigningTxData}; use crate::proto::Tx; -use crate::sdk::args; use crate::sdk::error::Error; -use crate::sdk::rpc::{query_wasm_code_hash, validate_amount}; -use crate::sdk::tx::prepare_tx; use crate::types::address::Address; use crate::types::control_flow::time::{Duration, Instant}; use crate::types::control_flow::{ @@ -38,9 +39,6 @@ use crate::types::keccak::KeccakHash; use crate::types::token::{Amount, DenominatedAmount}; use crate::types::voting_power::FractionalVotingPower; use crate::{display, display_line}; -use crate::ledger::Namada; -use crate::sdk::signing::aux_signing_data; -use crate::sdk::signing::SigningTxData; /// Craft a transaction that adds a transfer to the Ethereum bridge pool. @@ -66,7 +64,7 @@ pub async fn build_bridge_pool_tx<'a>( Some(sender.clone()), default_signer, ) - .await?; + .await?; let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); let DenominatedAmount { amount, .. } = validate_amount( context.client, diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index d82e09bd8a..7686635f98 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -14,6 +14,10 @@ pub mod queries; pub mod storage; pub mod vp_host_fns; +use std::ops::{Deref, DerefMut}; +use std::path::PathBuf; +use std::str::FromStr; + pub use namada_core::ledger::{ gas, parameters, replay_protection, storage_api, tx_env, vp_env, }; @@ -31,7 +35,6 @@ use crate::sdk::tx::{ TX_WITHDRAW_WASM, TX_BRIDGE_POOL_WASM, TX_RESIGN_STEWARD, TX_UPDATE_STEWARD_COMMISSION, self, }; -use std::path::PathBuf; use crate::types::transaction::GasLimit; use crate::sdk::signing::{SigningTxData, self}; use crate::proto::Tx; @@ -40,13 +43,12 @@ use crate::types::token; use crate::sdk::tx::ProcessTxResponse; use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use crate::types::token::NATIVE_MAX_DECIMAL_PLACES; -use std::str::FromStr; -use std::ops::{Deref, DerefMut}; use namada_core::types::dec::Dec; use namada_core::types::ethereum_events::EthAddress; /// Encapsulates a Namada session to enable splitting borrows of its parts -pub struct NamadaStruct<'a, C, U, V> where +pub struct NamadaStruct<'a, C, U, V> +where C: crate::ledger::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, @@ -61,7 +63,16 @@ pub struct NamadaStruct<'a, C, U, V> where #[async_trait::async_trait(?Send)] /// An interface for high-level interaction with the Namada SDK -pub trait Namada<'a> : DerefMut> { +pub trait Namada<'a>: + DerefMut< + Target = NamadaStruct< + 'a, + Self::Client, + Self::WalletUtils, + Self::ShieldedUtils, + >, +> +{ /// A client with async request dispatcher method type Client: 'a + crate::ledger::queries::Client + Sync; /// Captures the interactive parts of the wallet's functioning @@ -72,9 +83,12 @@ pub trait Namada<'a> : DerefMut Address { - self.wallet.find_address(args::NAM).expect("NAM not in wallet").clone() + self.wallet + .find_address(args::NAM) + .expect("NAM not in wallet") + .clone() } - + /// Make a tx builder using no arguments fn tx_builder(&mut self) -> args::Tx { args::Tx { @@ -205,10 +219,7 @@ pub trait Namada<'a> : DerefMut args::TxUpdateAccount { + fn new_update_account(&mut self, addr: Address) -> args::TxUpdateAccount { args::TxUpdateAccount { addr, vp_code_path: None, @@ -236,7 +247,8 @@ pub trait Namada<'a> : DerefMut : DerefMut args::Withdraw { + fn new_withdraw(&mut self, validator: Address) -> args::Withdraw { args::Withdraw { validator, source: None, @@ -348,10 +357,7 @@ pub trait Namada<'a> : DerefMut args::TxCustom { + fn new_custom(&mut self, owner: Address) -> args::TxCustom { args::TxCustom { owner, tx: self.tx_builder(), @@ -382,7 +388,8 @@ pub trait Namada<'a> : DerefMut where +pub struct NamadaImpl<'a, C, U, V> +where C: crate::ledger::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, @@ -391,7 +398,8 @@ pub struct NamadaImpl<'a, C, U, V> where prototype: args::Tx, } -impl<'a, C, U, V> NamadaImpl<'a, C, U, V> where +impl<'a, C, U, V> NamadaImpl<'a, C, U, V> +where C: crate::ledger::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, @@ -407,7 +415,11 @@ impl<'a, C, U, V> NamadaImpl<'a, C, U, V> where .expect("NAM not in wallet") .clone(); Self { - namada: NamadaStruct { client, wallet, shielded }, + namada: NamadaStruct { + client, + wallet, + shielded, + }, prototype: args::Tx { dry_run: false, dry_run_wrapper: false, @@ -436,7 +448,8 @@ impl<'a, C, U, V> NamadaImpl<'a, C, U, V> where } } -impl<'a, C, U, V> Deref for NamadaImpl<'a, C, U, V> where +impl<'a, C, U, V> Deref for NamadaImpl<'a, C, U, V> +where C: crate::ledger::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, @@ -448,7 +461,8 @@ impl<'a, C, U, V> Deref for NamadaImpl<'a, C, U, V> where } } -impl<'a, C, U, V> DerefMut for NamadaImpl<'a, C, U, V> where +impl<'a, C, U, V> DerefMut for NamadaImpl<'a, C, U, V> +where C: crate::ledger::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, @@ -458,14 +472,15 @@ impl<'a, C, U, V> DerefMut for NamadaImpl<'a, C, U, V> where } } -impl<'a, C, U, V> Namada<'a> for NamadaImpl<'a, C, U, V> where +impl<'a, C, U, V> Namada<'a> for NamadaImpl<'a, C, U, V> +where C: crate::ledger::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, { type Client = C; - type WalletUtils = U; type ShieldedUtils = V; + type WalletUtils = U; /// Obtain the prototypical Tx builder fn tx_builder(&mut self) -> args::Tx { @@ -474,14 +489,19 @@ impl<'a, C, U, V> Namada<'a> for NamadaImpl<'a, C, U, V> where } /// Allow the prototypical Tx builder to be modified -impl<'a, C, U, V> args::TxBuilder for NamadaImpl<'a, C, U, V> where +impl<'a, C, U, V> args::TxBuilder for NamadaImpl<'a, C, U, V> +where C: crate::ledger::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, { - fn tx(self, func: F) -> Self where + fn tx(self, func: F) -> Self + where F: FnOnce(args::Tx) -> args::Tx, { - Self { prototype: func(self.prototype), ..self } + Self { + prototype: func(self.prototype), + ..self + } } } diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index 3c6dca8cfa..8a1d0d81b0 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -4,6 +4,9 @@ use std::collections::HashMap; use std::path::PathBuf; use std::time::Duration as StdDuration; +use namada_core::ledger::governance::cli::onchain::{ + DefaultProposal, PgfFundingProposal, PgfStewardProposal, +}; use namada_core::types::chain::ChainId; use namada_core::types::dec::Dec; use namada_core::types::ethereum_events::EthAddress; @@ -12,6 +15,8 @@ use serde::{Deserialize, Serialize}; use zeroize::Zeroizing; use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::ledger::eth_bridge::bridge_pool; +use crate::ledger::Namada; use crate::types::address::Address; use crate::types::keccak::KeccakHash; use crate::types::key::{common, SchemeType}; @@ -19,13 +24,8 @@ use crate::types::masp::MaspValue; use crate::types::storage::Epoch; use crate::types::transaction::GasLimit; use crate::types::{storage, token}; -use crate::ledger::Namada; use crate::sdk::signing::SigningTxData; use crate::sdk::{tx, rpc}; -use crate::ledger::eth_bridge::bridge_pool; -use namada_core::ledger::governance::cli::onchain::{ - DefaultProposal, PgfFundingProposal, PgfStewardProposal, -}; /// The Namada token pub const NAM: &str = "NAM"; @@ -135,24 +135,42 @@ pub struct TxCustom { } impl TxBuilder for TxCustom { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - TxCustom { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxCustom { + tx: func(self.tx), + ..self + } } } impl TxCustom { /// Path to the tx WASM code file pub fn code_path(self, code_path: PathBuf) -> Self { - Self { code_path: Some(code_path), ..self } + Self { + code_path: Some(code_path), + ..self + } } + /// Path to the data file pub fn data_path(self, data_path: C::Data) -> Self { - Self { data_path: Some(data_path), ..self } + Self { + data_path: Some(data_path), + ..self + } } + /// Path to the serialized transaction pub fn serialized_tx(self, serialized_tx: C::Data) -> Self { - Self { serialized_tx: Some(serialized_tx), ..self } + Self { + serialized_tx: Some(serialized_tx), + ..self + } } + /// The address that correspond to the signatures/signing-keys pub fn owner(self, owner: C::Address) -> Self { Self { owner, ..self } @@ -161,9 +179,14 @@ impl TxCustom { impl TxCustom { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { tx::build_custom(context, self).await } } @@ -182,7 +205,7 @@ pub enum InputAmount { impl std::str::FromStr for InputAmount { type Err = ::Err; - + fn from_str(s: &str) -> Result { token::DenominatedAmount::from_str(s).map(InputAmount::Unvalidated) } @@ -208,8 +231,14 @@ pub struct TxTransfer { } impl TxBuilder for TxTransfer { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - TxTransfer { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxTransfer { + tx: func(self.tx), + ..self + } } } @@ -218,33 +247,49 @@ impl TxTransfer { pub fn source(self, source: C::TransferSource) -> Self { Self { source, ..self } } + /// Transfer target address pub fn receiver(self, target: C::TransferTarget) -> Self { Self { target, ..self } } + /// Transferred token address pub fn token(self, token: C::Address) -> Self { Self { token, ..self } } + /// Transferred token amount pub fn amount(self, amount: InputAmount) -> Self { Self { amount, ..self } } + /// Native token address pub fn native_token(self, native_token: C::NativeAddress) -> Self { - Self { native_token, ..self } + Self { + native_token, + ..self + } } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } impl TxTransfer { /// Build a transaction from this builder - pub async fn build<'a>(&mut self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &mut self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { tx::build_transfer(context, self).await } } @@ -277,8 +322,14 @@ pub struct TxIbcTransfer { } impl TxBuilder for TxIbcTransfer { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - TxIbcTransfer { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxIbcTransfer { + tx: func(self.tx), + ..self + } } } @@ -287,54 +338,79 @@ impl TxIbcTransfer { pub fn source(self, source: C::Address) -> Self { Self { source, ..self } } + /// Transfer target address pub fn receiver(self, receiver: String) -> Self { Self { receiver, ..self } } + /// Transferred token address pub fn token(self, token: C::Address) -> Self { Self { token, ..self } } + /// Transferred token amount pub fn amount(self, amount: InputAmount) -> Self { Self { amount, ..self } } + /// Port ID pub fn port_id(self, port_id: PortId) -> Self { Self { port_id, ..self } } + /// Channel ID pub fn channel_id(self, channel_id: ChannelId) -> Self { Self { channel_id, ..self } } + /// Timeout height of the destination chain pub fn timeout_height(self, timeout_height: u64) -> Self { - Self { timeout_height: Some(timeout_height), ..self } + Self { + timeout_height: Some(timeout_height), + ..self + } } + /// Timeout timestamp offset pub fn timeout_sec_offset(self, timeout_sec_offset: u64) -> Self { - Self { timeout_sec_offset: Some(timeout_sec_offset), ..self } + Self { + timeout_sec_offset: Some(timeout_sec_offset), + ..self + } } + /// Memo pub fn memo(self, memo: String) -> Self { - Self { memo: Some(memo), ..self } + Self { + memo: Some(memo), + ..self + } } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } impl TxIbcTransfer { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { tx::build_ibc_transfer(context, self).await } } - /// Transaction to initialize create a new proposal #[derive(Clone, Debug)] pub struct InitProposal { @@ -355,46 +431,78 @@ pub struct InitProposal { } impl TxBuilder for InitProposal { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - InitProposal { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + InitProposal { + tx: func(self.tx), + ..self + } } } impl InitProposal { /// The proposal data pub fn proposal_data(self, proposal_data: C::Data) -> Self { - Self { proposal_data, ..self } + Self { + proposal_data, + ..self + } } + /// Native token address pub fn native_token(self, native_token: C::NativeAddress) -> Self { - Self { native_token, ..self } + Self { + native_token, + ..self + } } + /// Flag if proposal should be run offline pub fn is_offline(self, is_offline: bool) -> Self { Self { is_offline, ..self } } + /// Flag if proposal is of type Pgf stewards pub fn is_pgf_stewards(self, is_pgf_stewards: bool) -> Self { - Self { is_pgf_stewards, ..self } + Self { + is_pgf_stewards, + ..self + } } + /// Flag if proposal is of type Pgf funding pub fn is_pgf_funding(self, is_pgf_funding: bool) -> Self { - Self { is_pgf_funding, ..self } + Self { + is_pgf_funding, + ..self + } } + /// Path to the tx WASM file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } impl InitProposal { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { let current_epoch = rpc::query_epoch(context.client).await?; - let governance_parameters = rpc::query_governance_parameters(context.client).await; - + let governance_parameters = + rpc::query_governance_parameters(context.client).await; + if self.is_pgf_funding { let proposal = PgfFundingProposal::try_from(self.proposal_data.as_ref()) @@ -405,13 +513,8 @@ impl InitProposal { })? .validate(&governance_parameters, current_epoch, self.tx.force) .map_err(|e| crate::sdk::error::TxError::InvalidProposal(e.to_string()))?; - - tx::build_pgf_funding_proposal( - context, - self, - proposal, - ) - .await + + tx::build_pgf_funding_proposal(context, self, proposal).await } else if self.is_pgf_stewards { let proposal = PgfStewardProposal::try_from( self.proposal_data.as_ref(), @@ -424,7 +527,7 @@ impl InitProposal { context.wallet.find_address(NAM).expect("NAM not in wallet"), &proposal.proposal.author, ) - .await?; + .await?; let proposal = proposal .validate( &governance_parameters, @@ -432,14 +535,11 @@ impl InitProposal { author_balance, self.tx.force, ) - .map_err(|e| crate::sdk::error::TxError::InvalidProposal(e.to_string()))?; - - tx::build_pgf_stewards_proposal( - context, - self, - proposal, - ) - .await + .map_err(|e| { + crate::sdk::error::TxError::InvalidProposal(e.to_string()) + })?; + + tx::build_pgf_stewards_proposal(context, self, proposal).await } else { let proposal = DefaultProposal::try_from(self.proposal_data.as_ref()) .map_err(|e| { @@ -450,7 +550,7 @@ impl InitProposal { context.wallet.find_address(NAM).expect("NAM not in wallet"), &proposal.proposal.author, ) - .await?; + .await?; let proposal = proposal .validate( &governance_parameters, @@ -458,13 +558,10 @@ impl InitProposal { author_balance, self.tx.force, ) - .map_err(|e| crate::sdk::error::TxError::InvalidProposal(e.to_string()))?; - tx::build_default_proposal( - context, - self, - proposal, - ) - .await + .map_err(|e| { + crate::sdk::error::TxError::InvalidProposal(e.to_string()) + })?; + tx::build_default_proposal(context, self, proposal).await } } } @@ -489,43 +586,68 @@ pub struct VoteProposal { } impl TxBuilder for VoteProposal { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - VoteProposal { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + VoteProposal { + tx: func(self.tx), + ..self + } } } impl VoteProposal { /// Proposal id pub fn proposal_id(self, proposal_id: u64) -> Self { - Self { proposal_id: Some(proposal_id), ..self } + Self { + proposal_id: Some(proposal_id), + ..self + } } + /// The vote pub fn vote(self, vote: String) -> Self { Self { vote, ..self } } + /// The address of the voter pub fn voter(self, voter: C::Address) -> Self { Self { voter, ..self } } + /// Flag if proposal vote should be run offline pub fn is_offline(self, is_offline: bool) -> Self { Self { is_offline, ..self } } + /// The proposal file path pub fn proposal_data(self, proposal_data: C::Data) -> Self { - Self { proposal_data: Some(proposal_data), ..self } + Self { + proposal_data: Some(proposal_data), + ..self + } } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } impl VoteProposal { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { let current_epoch = rpc::query_epoch(context.client).await?; tx::build_vote_proposal(context, self, current_epoch).await } @@ -595,39 +717,66 @@ pub struct TxUpdateAccount { } impl TxBuilder for TxUpdateAccount { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - TxUpdateAccount { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxUpdateAccount { + tx: func(self.tx), + ..self + } } } impl TxUpdateAccount { /// Path to the VP WASM code file pub fn vp_code_path(self, vp_code_path: PathBuf) -> Self { - Self { vp_code_path: Some(vp_code_path), ..self } + Self { + vp_code_path: Some(vp_code_path), + ..self + } } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } + /// Address of the account whose VP is to be updated pub fn addr(self, addr: C::Address) -> Self { Self { addr, ..self } } + /// Public keys pub fn public_keys(self, public_keys: Vec) -> Self { - Self { public_keys, ..self } + Self { + public_keys, + ..self + } } + /// The account threshold pub fn threshold(self, threshold: u8) -> Self { - Self { threshold: Some(threshold), ..self } + Self { + threshold: Some(threshold), + ..self + } } } impl TxUpdateAccount { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { tx::build_update_account(context, self).await } } @@ -651,8 +800,14 @@ pub struct Bond { } impl TxBuilder for Bond { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - Bond { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + Bond { + tx: func(self.tx), + ..self + } } } @@ -661,30 +816,48 @@ impl Bond { pub fn validator(self, validator: C::Address) -> Self { Self { validator, ..self } } + /// Amount of tokens to stake in a bond pub fn amount(self, amount: token::Amount) -> Self { Self { amount, ..self } } + /// Source address for delegations. For self-bonds, the validator is /// also the source. pub fn source(self, source: C::Address) -> Self { - Self { source: Some(source), ..self } + Self { + source: Some(source), + ..self + } } + /// Native token address pub fn native_token(self, native_token: C::NativeAddress) -> Self { - Self { native_token, ..self } + Self { + native_token, + ..self + } } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } impl Bond { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { tx::build_bond(context, self).await } } @@ -707,16 +880,28 @@ pub struct Unbond { impl Unbond { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option, Option<(Epoch, token::Amount)>)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + Option<(Epoch, token::Amount)>, + )> { tx::build_unbond(context, self).await } } impl TxBuilder for Unbond { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - Unbond { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + Unbond { + tx: func(self.tx), + ..self + } } } @@ -725,18 +910,27 @@ impl Unbond { pub fn validator(self, validator: C::Address) -> Self { Self { validator, ..self } } + /// Amount of tokens to unbond from a bond pub fn amount(self, amount: token::Amount) -> Self { Self { amount, ..self } } + /// Source address for unbonding from delegations. For unbonding from /// self-bonds, the validator is also the source pub fn source(self, source: C::Address) -> Self { - Self { source: Some(source), ..self } + Self { + source: Some(source), + ..self + } } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } @@ -750,8 +944,14 @@ pub struct RevealPk { } impl TxBuilder for RevealPk { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - RevealPk { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + RevealPk { + tx: func(self.tx), + ..self + } } } @@ -764,14 +964,15 @@ impl RevealPk { impl RevealPk { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { - tx::build_reveal_pk( - context, - &self.tx, - &self.public_key, - ).await + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { + tx::build_reveal_pk(context, &self.tx, &self.public_key).await } } @@ -813,8 +1014,14 @@ pub struct Withdraw { } impl TxBuilder for Withdraw { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - Withdraw { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + Withdraw { + tx: func(self.tx), + ..self + } } } @@ -823,22 +1030,35 @@ impl Withdraw { pub fn validator(self, validator: C::Address) -> Self { Self { validator, ..self } } + /// Source address for withdrawing from delegations. For withdrawing /// from self-bonds, the validator is also the source pub fn source(self, source: C::Address) -> Self { - Self { source: Some(source), ..self } + Self { + source: Some(source), + ..self + } } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } impl Withdraw { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { tx::build_withdraw(context, self).await } } @@ -934,32 +1154,47 @@ pub struct CommissionRateChange { } impl TxBuilder for CommissionRateChange { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - CommissionRateChange { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + CommissionRateChange { + tx: func(self.tx), + ..self + } } } - impl CommissionRateChange { /// Validator address (should be self) pub fn validator(self, validator: C::Address) -> Self { Self { validator, ..self } } + /// Value to which the tx changes the commission rate pub fn rate(self, rate: Dec) -> Self { Self { rate, ..self } } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } impl CommissionRateChange { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { tx::build_validator_commission_change(context, self).await } } @@ -978,8 +1213,14 @@ pub struct UpdateStewardCommission { } impl TxBuilder for UpdateStewardCommission { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - UpdateStewardCommission { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + UpdateStewardCommission { + tx: func(self.tx), + ..self + } } } @@ -988,21 +1229,31 @@ impl UpdateStewardCommission { pub fn steward(self, steward: C::Address) -> Self { Self { steward, ..self } } + /// Value to which the tx changes the commission rate pub fn commission(self, commission: C::Data) -> Self { Self { commission, ..self } } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } impl UpdateStewardCommission { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { tx::build_update_steward_commission(context, self).await } } @@ -1019,8 +1270,14 @@ pub struct ResignSteward { } impl TxBuilder for ResignSteward { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - ResignSteward { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + ResignSteward { + tx: func(self.tx), + ..self + } } } @@ -1029,17 +1286,26 @@ impl ResignSteward { pub fn steward(self, steward: C::Address) -> Self { Self { steward, ..self } } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } impl ResignSteward { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { tx::build_resign_steward(context, self).await } } @@ -1056,8 +1322,14 @@ pub struct TxUnjailValidator { } impl TxBuilder for TxUnjailValidator { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - TxUnjailValidator { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxUnjailValidator { + tx: func(self.tx), + ..self + } } } @@ -1066,17 +1338,26 @@ impl TxUnjailValidator { pub fn validator(self, validator: C::Address) -> Self { Self { validator, ..self } } + /// Path to the TX WASM code file pub fn tc_code_path(self, tx_code_path: PathBuf) -> Self { - Self { tx_code_path, ..self } + Self { + tx_code_path, + ..self + } } } impl TxUnjailValidator { /// Build a transaction from this builder - pub async fn build<'a>(&self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + &self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { tx::build_unjail_validator(context, self).await } } @@ -1192,16 +1473,21 @@ pub struct Tx { } /// Builder functions for Tx -pub trait TxBuilder : Sized { +pub trait TxBuilder: Sized { /// Apply the given function to the Tx inside self - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx; + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx; /// Simulate applying the transaction fn dry_run(self, dry_run: bool) -> Self { self.tx(|x| Tx { dry_run, ..x }) } /// Simulate applying both the wrapper and inner transactions fn dry_run_wrapper(self, dry_run_wrapper: bool) -> Self { - self.tx(|x| Tx { dry_run_wrapper, ..x }) + self.tx(|x| Tx { + dry_run_wrapper, + ..x + }) } /// Dump the transaction bytes to file fn dump_tx(self, dump_tx: bool) -> Self { @@ -1209,7 +1495,10 @@ pub trait TxBuilder : Sized { } /// The output directory path to where serialize the data fn output_folder(self, output_folder: PathBuf) -> Self { - self.tx(|x| Tx { output_folder: Some(output_folder), ..x }) + self.tx(|x| Tx { + output_folder: Some(output_folder), + ..x + }) } /// Submit the transaction even if it doesn't pass client checks fn force(self, force: bool) -> Self { @@ -1217,29 +1506,50 @@ pub trait TxBuilder : Sized { } /// Do not wait for the transaction to be added to the blockchain fn broadcast_only(self, broadcast_only: bool) -> Self { - self.tx(|x| Tx { broadcast_only, ..x }) + self.tx(|x| Tx { + broadcast_only, + ..x + }) } /// The address of the ledger node as host:port fn ledger_address(self, ledger_address: C::TendermintAddress) -> Self { - self.tx(|x| Tx { ledger_address, ..x }) + self.tx(|x| Tx { + ledger_address, + ..x + }) } /// If any new account is initialized by the tx, use the given alias to /// save it in the wallet. - fn initialized_account_alias(self, initialized_account_alias: String) -> Self { - self.tx(|x| Tx { initialized_account_alias: Some(initialized_account_alias), ..x }) + fn initialized_account_alias( + self, + initialized_account_alias: String, + ) -> Self { + self.tx(|x| Tx { + initialized_account_alias: Some(initialized_account_alias), + ..x + }) } /// Whether to force overwrite the above alias, if it is provided, in the /// wallet. fn wallet_alias_force(self, wallet_alias_force: bool) -> Self { - self.tx(|x| Tx { wallet_alias_force, ..x }) + self.tx(|x| Tx { + wallet_alias_force, + ..x + }) } /// The amount being payed (for gas unit) to include the transaction fn fee_amount(self, fee_amount: InputAmount) -> Self { - self.tx(|x| Tx { fee_amount: Some(fee_amount), ..x }) + self.tx(|x| Tx { + fee_amount: Some(fee_amount), + ..x + }) } /// The fee payer signing key fn wrapper_fee_payer(self, wrapper_fee_payer: C::Keypair) -> Self { - self.tx(|x| Tx { wrapper_fee_payer: Some(wrapper_fee_payer), ..x }) + self.tx(|x| Tx { + wrapper_fee_payer: Some(wrapper_fee_payer), + ..x + }) } /// The token in which the fee is being paid fn fee_token(self, fee_token: C::Address) -> Self { @@ -1247,7 +1557,10 @@ pub trait TxBuilder : Sized { } /// The optional spending key for fee unshielding fn fee_unshield(self, fee_unshield: C::TransferSource) -> Self { - self.tx(|x| Tx { fee_unshield: Some(fee_unshield), ..x }) + self.tx(|x| Tx { + fee_unshield: Some(fee_unshield), + ..x + }) } /// The max amount of gas used to process tx fn gas_limit(self, gas_limit: GasLimit) -> Self { @@ -1255,16 +1568,25 @@ pub trait TxBuilder : Sized { } /// The optional expiration of the transaction fn expiration(self, expiration: DateTimeUtc) -> Self { - self.tx(|x| Tx { expiration: Some(expiration), ..x }) + self.tx(|x| Tx { + expiration: Some(expiration), + ..x + }) } /// Generate an ephimeral signing key to be used only once to sign a /// wrapper tx fn disposable_signing_key(self, disposable_signing_key: bool) -> Self { - self.tx(|x| Tx { disposable_signing_key, ..x }) + self.tx(|x| Tx { + disposable_signing_key, + ..x + }) } /// The chain id for which the transaction is intended fn chain_id(self, chain_id: ChainId) -> Self { - self.tx(|x| Tx { chain_id: Some(chain_id), ..x }) + self.tx(|x| Tx { + chain_id: Some(chain_id), + ..x + }) } /// Sign the tx with the key for the given alias from your wallet fn signing_keys(self, signing_keys: Vec) -> Self { @@ -1276,20 +1598,32 @@ pub trait TxBuilder : Sized { } /// Path to the TX WASM code file to reveal PK fn tx_reveal_code_path(self, tx_reveal_code_path: PathBuf) -> Self { - self.tx(|x| Tx { tx_reveal_code_path, ..x }) + self.tx(|x| Tx { + tx_reveal_code_path, + ..x + }) } /// Sign the tx with the public key for the given alias from your wallet fn verification_key(self, verification_key: C::PublicKey) -> Self { - self.tx(|x| Tx { verification_key: Some(verification_key), ..x }) + self.tx(|x| Tx { + verification_key: Some(verification_key), + ..x + }) } /// Password to decrypt key fn password(self, password: Zeroizing) -> Self { - self.tx(|x| Tx { password: Some(password), ..x }) + self.tx(|x| Tx { + password: Some(password), + ..x + }) } } impl TxBuilder for Tx { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { func(self) } } @@ -1473,8 +1807,14 @@ pub struct EthereumBridgePool { } impl TxBuilder for EthereumBridgePool { - fn tx(self, func: F) -> Self where F: FnOnce(Tx) -> Tx { - EthereumBridgePool { tx: func(self.tx), ..self } + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + EthereumBridgePool { + tx: func(self.tx), + ..self + } } } @@ -1486,36 +1826,47 @@ impl EthereumBridgePool { pub fn nut(self, nut: bool) -> Self { Self { nut, ..self } } + /// The type of token pub fn asset(self, asset: EthAddress) -> Self { Self { asset, ..self } } + /// The recipient address pub fn recipient(self, recipient: EthAddress) -> Self { Self { recipient, ..self } } + /// The sender of the transfer pub fn sender(self, sender: C::Address) -> Self { Self { sender, ..self } } + /// The amount to be transferred pub fn amount(self, amount: InputAmount) -> Self { Self { amount, ..self } } + /// The amount of gas fees pub fn fee_amount(self, fee_amount: InputAmount) -> Self { Self { fee_amount, ..self } } + /// The account of fee payer. /// /// If unset, it is the same as the sender. pub fn fee_payer(self, fee_payer: C::Address) -> Self { - Self { fee_payer: Some(fee_payer), ..self } + Self { + fee_payer: Some(fee_payer), + ..self + } } + /// The token in which the gas is being paid pub fn fee_token(self, fee_token: C::Address) -> Self { Self { fee_token, ..self } } + /// Path to the tx WASM code file pub fn code_path(self, code_path: PathBuf) -> Self { Self { code_path, ..self } @@ -1524,9 +1875,14 @@ impl EthereumBridgePool { impl EthereumBridgePool { /// Build a transaction from this builder - pub async fn build<'a>(self, context: &mut impl Namada<'a>) -> - crate::sdk::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + pub async fn build<'a>( + self, + context: &mut impl Namada<'a>, + ) -> crate::sdk::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + )> { bridge_pool::build_bridge_pool_tx(context, self).await } } diff --git a/shared/src/sdk/masp.rs b/shared/src/sdk/masp.rs index 6b44dd018d..1cd9ae7e5b 100644 --- a/shared/src/sdk/masp.rs +++ b/shared/src/sdk/masp.rs @@ -58,12 +58,13 @@ use ripemd::Digest as RipemdDigest; use sha2::Digest; use thiserror::Error; -use crate::proto::Tx; use crate::sdk::args::InputAmount; -use crate::sdk::error::{EncodingError, Error, PinnedBalanceError, QueryError}; -use crate::sdk::queries::Client; +use crate::ledger::queries::Client; use crate::sdk::rpc::{query_conversion, query_storage_value}; use crate::sdk::tx::decode_component; +use crate::ledger::Namada; +use crate::proto::Tx; +use crate::sdk::error::{EncodingError, Error, PinnedBalanceError, QueryError}; use crate::sdk::{args, rpc}; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; @@ -77,7 +78,6 @@ use crate::types::token::{ }; use crate::types::transaction::{EllipticCurve, PairingEngine, WrapperTx}; use crate::{display_line, edisplay_line}; -use crate::ledger::Namada; /// Env var to point to a dir with MASP parameters. When not specified, /// the default OS specific path is used. @@ -1509,7 +1509,10 @@ impl ShieldedContext { // Load the current shielded context given the spending key we possess let _ = context.shielded.load().await; let context = &mut **context; - context.shielded.fetch(context.client, &spending_keys, &[]).await?; + context + .shielded + .fetch(context.client, &spending_keys, &[]) + .await?; // Save the update state so that future fetches can be short-circuited let _ = context.shielded.save().await; // Determine epoch in which to submit potential shielded transaction @@ -2110,3 +2113,128 @@ mod tests { super::load_pvks(); } } + +#[cfg(feature = "std")] +/// Implementation of MASP functionality depending on a standard filesystem +pub mod fs { + use std::fs::{File, OpenOptions}; + use std::io::{Read, Write}; + use async_trait::async_trait; + + use super::*; + + /// Shielded context file name + const FILE_NAME: &str = "shielded.dat"; + const TMP_FILE_NAME: &str = "shielded.tmp"; + + #[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] + /// An implementation of ShieldedUtils for standard filesystems + pub struct FsShieldedUtils { + #[borsh_skip] + context_dir: PathBuf, + } + + impl FsShieldedUtils { + /// Initialize a shielded transaction context that identifies notes + /// decryptable by any viewing key in the given set + pub fn new(context_dir: PathBuf) -> ShieldedContext { + // Make sure that MASP parameters are downloaded to enable MASP + // transaction building and verification later on + let params_dir = get_params_dir(); + let spend_path = params_dir.join(SPEND_NAME); + let convert_path = params_dir.join(CONVERT_NAME); + let output_path = params_dir.join(OUTPUT_NAME); + if !(spend_path.exists() + && convert_path.exists() + && output_path.exists()) + { + println!("MASP parameters not present, downloading..."); + masp_proofs::download_masp_parameters(None) + .expect("MASP parameters not present or downloadable"); + println!( + "MASP parameter download complete, resuming execution..." + ); + } + // Finally initialize a shielded context with the supplied directory + let utils = Self { context_dir }; + ShieldedContext { + utils, + ..Default::default() + } + } + } + + impl Default for FsShieldedUtils { + fn default() -> Self { + Self { + context_dir: PathBuf::from(FILE_NAME), + } + } + } + + #[async_trait(?Send)] + impl ShieldedUtils for FsShieldedUtils { + fn local_tx_prover(&self) -> LocalTxProver { + if let Ok(params_dir) = env::var(ENV_VAR_MASP_PARAMS_DIR) { + let params_dir = PathBuf::from(params_dir); + let spend_path = params_dir.join(SPEND_NAME); + let convert_path = params_dir.join(CONVERT_NAME); + let output_path = params_dir.join(OUTPUT_NAME); + LocalTxProver::new(&spend_path, &output_path, &convert_path) + } else { + LocalTxProver::with_default_location() + .expect("unable to load MASP Parameters") + } + } + + /// Try to load the last saved shielded context from the given context + /// directory. If this fails, then leave the current context unchanged. + async fn load(self) -> std::io::Result> { + // Try to load shielded context from file + let mut ctx_file = File::open(self.context_dir.join(FILE_NAME))?; + let mut bytes = Vec::new(); + ctx_file.read_to_end(&mut bytes)?; + let mut new_ctx = ShieldedContext::deserialize(&mut &bytes[..])?; + // Associate the originating context directory with the + // shielded context under construction + new_ctx.utils = self; + Ok(new_ctx) + } + + /// Save this shielded context into its associated context directory + async fn save( + &self, + ctx: &ShieldedContext, + ) -> std::io::Result<()> { + // TODO: use mktemp crate? + let tmp_path = self.context_dir.join(TMP_FILE_NAME); + { + // First serialize the shielded context into a temporary file. + // Inability to create this file implies a simultaneuous write + // is in progress. In this case, immediately + // fail. This is unproblematic because the data + // intended to be stored can always be re-fetched + // from the blockchain. + let mut ctx_file = OpenOptions::new() + .write(true) + .create_new(true) + .open(tmp_path.clone())?; + let mut bytes = Vec::new(); + ctx.serialize(&mut bytes) + .expect("cannot serialize shielded context"); + ctx_file.write_all(&bytes[..])?; + } + // Atomically update the old shielded context file with new data. + // Atomicity is required to prevent other client instances from + // reading corrupt data. + std::fs::rename( + tmp_path.clone(), + self.context_dir.join(FILE_NAME), + )?; + // Finally, remove our temporary file to allow future saving of + // shielded contexts. + std::fs::remove_file(tmp_path)?; + Ok(()) + } + } +} diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 3afb990422..aa2ac368f6 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -44,6 +44,7 @@ pub use crate::sdk::wallet::store::AddressVpType; use crate::sdk::wallet::{Wallet, WalletUtils}; use crate::sdk::{args, rpc}; use crate::types::io::*; +use crate::sdk::args::SdkTypes; use crate::types::key::*; use crate::types::masp::{ExtendedViewingKey, PaymentAddress}; use crate::types::storage::Epoch; @@ -53,9 +54,8 @@ use crate::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use crate::types::transaction::pos::InitValidator; -use crate::types::transaction::Fee; use crate::ledger::Namada; -use crate::sdk::args::SdkTypes; +use crate::types::transaction::Fee; #[cfg(feature = "std")] /// Env. var specifying where to store signing test vectors @@ -174,8 +174,13 @@ pub async fn tx_signers<'a>( Some(signer) if signer == masp() => Ok(vec![masp_tx_key().ref_to()]), Some(signer) => Ok(vec![ - find_pk(context.client, context.wallet, &signer, args.password.clone()) - .await?, + find_pk( + context.client, + context.wallet, + &signer, + args.password.clone(), + ) + .await?, ]), None => other_err( "All transactions must be signed; please either specify the key \ @@ -348,10 +353,14 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( }; let fee_amount = match args.fee_amount { Some(amount) => { - let validated_fee_amount = - validate_amount(context.client, amount, &args.fee_token, args.force) - .await - .expect("Expected to be able to validate fee"); + let validated_fee_amount = validate_amount( + context.client, + amount, + &args.fee_token, + args.force, + ) + .await + .expect("Expected to be able to validate fee"); let amount = Amount::from_uint(validated_fee_amount.amount, 0).unwrap(); @@ -385,9 +394,12 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( let balance_key = token::balance_key(&args.fee_token, &fee_payer_address); - rpc::query_storage_value::<_, token::Amount>(context.client, &balance_key) - .await - .unwrap_or_default() + rpc::query_storage_value::<_, token::Amount>( + context.client, + &balance_key, + ) + .await + .unwrap_or_default() } }; @@ -832,20 +844,25 @@ pub async fn to_ledger_vector<'a>( query_wasm_code_hash(context.client, TX_INIT_PROPOSAL).await?; let vote_proposal_hash = query_wasm_code_hash(context.client, TX_VOTE_PROPOSAL).await?; - let reveal_pk_hash = query_wasm_code_hash(context.client, TX_REVEAL_PK).await?; + let reveal_pk_hash = + query_wasm_code_hash(context.client, TX_REVEAL_PK).await?; let update_account_hash = query_wasm_code_hash(context.client, TX_UPDATE_ACCOUNT_WASM).await?; - let transfer_hash = query_wasm_code_hash(context.client, TX_TRANSFER_WASM).await?; + let transfer_hash = + query_wasm_code_hash(context.client, TX_TRANSFER_WASM).await?; let ibc_hash = query_wasm_code_hash(context.client, TX_IBC_WASM).await?; let bond_hash = query_wasm_code_hash(context.client, TX_BOND_WASM).await?; - let unbond_hash = query_wasm_code_hash(context.client, TX_UNBOND_WASM).await?; - let withdraw_hash = query_wasm_code_hash(context.client, TX_WITHDRAW_WASM).await?; + let unbond_hash = + query_wasm_code_hash(context.client, TX_UNBOND_WASM).await?; + let withdraw_hash = + query_wasm_code_hash(context.client, TX_WITHDRAW_WASM).await?; let change_commission_hash = query_wasm_code_hash(context.client, TX_CHANGE_COMMISSION_WASM).await?; let user_hash = query_wasm_code_hash(context.client, VP_USER_WASM).await?; // To facilitate lookups of human-readable token names - let tokens: HashMap = context.wallet + let tokens: HashMap = context + .wallet .get_addresses_with_vp_type(AddressVpType::Token) .into_iter() .map(|addr| { diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 4178986caf..e7a86a5d3b 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -50,6 +50,7 @@ use crate::sdk::rpc::{ TxResponse, query_wasm_code_hash }; use crate::sdk::wallet::{Wallet, WalletUtils}; +use crate::ledger::Namada; use crate::proto::{MaspBuilder, Tx}; use crate::sdk::args::{self, InputAmount}; use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; @@ -66,7 +67,6 @@ use crate::types::transaction::account::{InitAccount, UpdateAccount}; use crate::types::transaction::{pos, TxType}; use crate::types::{storage, token}; use crate::{display_line, edisplay_line, vm}; -use crate::ledger::Namada; /// Initialize account transaction WASM pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; @@ -103,7 +103,7 @@ pub const TX_CHANGE_COMMISSION_WASM: &str = pub const TX_RESIGN_STEWARD: &str = "tx_resign_steward.wasm"; /// Update steward commission WASM path pub const TX_UPDATE_STEWARD_COMMISSION: &str = - "tx_update_steward_commission.wasm"; + "tx_update_steward_commission.wasm"; /// Default timeout in seconds for requests to the `/accepted` /// and `/applied` ABCI query endpoints. @@ -273,8 +273,7 @@ pub async fn build_reveal_pk<'a>( public_key: &common::PublicKey, ) -> Result<(Tx, SigningTxData, Option)> { let signing_data = - signing::aux_signing_data(context, args, None, None) - .await?; + signing::aux_signing_data(context, args, None, None).await?; build( context, @@ -285,7 +284,8 @@ pub async fn build_reveal_pk<'a>( &signing_data.fee_payer, None, ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Broadcast a transaction to be included in the blockchain and checks that @@ -522,8 +522,8 @@ pub async fn build_validator_commission_change<'a>( Some(validator.clone()), default_signer, ) - .await?; - + .await?; + let epoch = rpc::query_epoch(context.client).await?; let params: PosParams = rpc::get_pos_params(context.client).await?; @@ -598,7 +598,8 @@ pub async fn build_validator_commission_change<'a>( &signing_data.fee_payer, None, ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Craft transaction to update a steward commission @@ -618,8 +619,8 @@ pub async fn build_update_steward_commission<'a>( Some(steward.clone()), default_signer, ) - .await?; - + .await?; + if !rpc::is_steward(context.client, steward).await && !tx_args.force { edisplay_line!(StdIo, "The given address {} is not a steward.", &steward); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); @@ -652,7 +653,8 @@ pub async fn build_update_steward_commission<'a>( &signing_data.fee_payer, None, ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Craft transaction to resign as a steward @@ -671,8 +673,8 @@ pub async fn build_resign_steward<'a>( Some(steward.clone()), default_signer, ) - .await?; - + .await?; + if !rpc::is_steward(context.client, steward).await && !tx_args.force { edisplay_line!(StdIo, "The given address {} is not a steward.", &steward); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); @@ -687,7 +689,8 @@ pub async fn build_resign_steward<'a>( &signing_data.fee_payer, None, ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit transaction to unjail a jailed validator @@ -706,8 +709,8 @@ pub async fn build_unjail_validator<'a>( Some(validator.clone()), default_signer, ) - .await?; - + .await?; + if !rpc::is_validator(context.client, validator).await? { edisplay_line!( StdIo, @@ -725,14 +728,17 @@ pub async fn build_unjail_validator<'a>( let current_epoch = rpc::query_epoch(context.client).await?; let pipeline_epoch = current_epoch + params.pipeline_len; - let validator_state_at_pipeline = - rpc::get_validator_state(context.client, validator, Some(pipeline_epoch)) - .await? - .ok_or_else(|| { - Error::from(TxError::Other( - "Validator state should be defined.".to_string(), - )) - })?; + let validator_state_at_pipeline = rpc::get_validator_state( + context.client, + validator, + Some(pipeline_epoch), + ) + .await? + .ok_or_else(|| { + Error::from(TxError::Other( + "Validator state should be defined.".to_string(), + )) + })?; if validator_state_at_pipeline != ValidatorState::Jailed { edisplay_line!( StdIo, @@ -749,9 +755,11 @@ pub async fn build_unjail_validator<'a>( let last_slash_epoch_key = crate::ledger::pos::validator_last_slash_key(validator); - let last_slash_epoch = - rpc::query_storage_value::<_, Epoch>(context.client, &last_slash_epoch_key) - .await; + let last_slash_epoch = rpc::query_storage_value::<_, Epoch>( + context.client, + &last_slash_epoch_key, + ) + .await; match last_slash_epoch { Ok(last_slash_epoch) => { let eligible_epoch = @@ -793,7 +801,8 @@ pub async fn build_unjail_validator<'a>( &signing_data.fee_payer, None, ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit transaction to withdraw an unbond @@ -814,13 +823,16 @@ pub async fn build_withdraw<'a>( Some(default_address), default_signer, ) - .await?; - + .await?; + let epoch = rpc::query_epoch(context.client).await?; - let validator = - known_validator_or_err(validator.clone(), tx_args.force, context.client) - .await?; + let validator = known_validator_or_err( + validator.clone(), + tx_args.force, + context.client, + ) + .await?; let source = source.clone(); @@ -841,7 +853,8 @@ pub async fn build_withdraw<'a>( epoch {}.", epoch ); - rpc::query_and_print_unbonds(context.client, &bond_source, &validator).await?; + rpc::query_and_print_unbonds(context.client, &bond_source, &validator) + .await?; if !tx_args.force { return Err(Error::from(TxError::NoUnbondReady(epoch))); } @@ -865,7 +878,8 @@ pub async fn build_withdraw<'a>( &signing_data.fee_payer, None, ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit a transaction to unbond @@ -878,7 +892,12 @@ pub async fn build_unbond<'a>( source, tx_code_path, }: &args::Unbond, -) -> Result<(Tx, SigningTxData, Option, Option<(Epoch, token::Amount)>)> { +) -> Result<( + Tx, + SigningTxData, + Option, + Option<(Epoch, token::Amount)>, +)> { let default_address = source.clone().unwrap_or(validator.clone()); let default_signer = Some(default_address.clone()); let signing_data = signing::aux_signing_data( @@ -887,15 +906,19 @@ pub async fn build_unbond<'a>( Some(default_address), default_signer, ) - .await?; - + .await?; + let source = source.clone(); // Check the source's current bond amount let bond_source = source.clone().unwrap_or_else(|| validator.clone()); if !tx_args.force { - known_validator_or_err(validator.clone(), tx_args.force, context.client) - .await?; + known_validator_or_err( + validator.clone(), + tx_args.force, + context.client, + ) + .await?; let bond_amount = rpc::query_bond(context.client, &bond_source, validator, None).await?; @@ -926,9 +949,12 @@ pub async fn build_unbond<'a>( } // Query the unbonds before submitting the tx - let unbonds = - rpc::query_unbond_with_slashing(context.client, &bond_source, validator) - .await?; + let unbonds = rpc::query_unbond_with_slashing( + context.client, + &bond_source, + validator, + ) + .await?; let mut withdrawable = BTreeMap::::new(); for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { let to_withdraw = withdrawable.entry(withdraw_epoch).or_default(); @@ -1043,17 +1069,22 @@ pub async fn build_bond<'a>( Some(default_address.clone()), default_signer, ) - .await?; - - let validator = - known_validator_or_err(validator.clone(), tx_args.force, context.client) - .await?; + .await?; + + let validator = known_validator_or_err( + validator.clone(), + tx_args.force, + context.client, + ) + .await?; // Check that the source address exists on chain let source = match source.clone() { - Some(source) => source_exists_or_err(source, tx_args.force, context.client) - .await - .map(Some), + Some(source) => { + source_exists_or_err(source, tx_args.force, context.client) + .await + .map(Some) + } None => Ok(source.clone()), }?; // Check bond's source (source for delegation or validator for self-bonds) @@ -1092,7 +1123,8 @@ pub async fn build_bond<'a>( &signing_data.fee_payer, tx_source_balance, ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a default proposal governance @@ -1116,8 +1148,8 @@ pub async fn build_default_proposal<'a>( Some(proposal.proposal.author.clone()), default_signer, ) - .await?; - + .await?; + let init_proposal_data = InitProposalData::try_from(proposal.clone()) .map_err(|e| TxError::InvalidProposal(e.to_string()))?; @@ -1144,7 +1176,8 @@ pub async fn build_default_proposal<'a>( &signing_data.fee_payer, None, // TODO: need to pay the fee to submit a proposal ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a proposal vote @@ -1168,8 +1201,8 @@ pub async fn build_vote_proposal<'a>( Some(voter.clone()), default_signer.clone(), ) - .await?; - + .await?; + let proposal_vote = ProposalVote::try_from(vote.clone()) .map_err(|_| TxError::InvalidProposalVote)?; @@ -1230,7 +1263,8 @@ pub async fn build_vote_proposal<'a>( &signing_data.fee_payer, None, ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a pgf funding proposal governance @@ -1254,8 +1288,8 @@ pub async fn build_pgf_funding_proposal<'a>( Some(proposal.proposal.author.clone()), default_signer, ) - .await?; - + .await?; + let init_proposal_data = InitProposalData::try_from(proposal.clone()) .map_err(|e| TxError::InvalidProposal(e.to_string()))?; @@ -1274,7 +1308,8 @@ pub async fn build_pgf_funding_proposal<'a>( &signing_data.fee_payer, None, // TODO: need to pay the fee to submit a proposal ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a pgf funding proposal governance @@ -1298,8 +1333,8 @@ pub async fn build_pgf_stewards_proposal<'a>( Some(proposal.proposal.author.clone()), default_signer, ) - .await?; - + .await?; + let init_proposal_data = InitProposalData::try_from(proposal.clone()) .map_err(|e| TxError::InvalidProposal(e.to_string()))?; @@ -1319,7 +1354,8 @@ pub async fn build_pgf_stewards_proposal<'a>( &signing_data.fee_payer, None, // TODO: need to pay the fee to submit a proposal ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit an IBC transfer @@ -1336,16 +1372,23 @@ pub async fn build_ibc_transfer<'a>( ) .await?; // Check that the source address exists on chain - let source = - source_exists_or_err(args.source.clone(), args.tx.force, context.client) - .await?; + let source = source_exists_or_err( + args.source.clone(), + args.tx.force, + context.client, + ) + .await?; // We cannot check the receiver // validate the amount given - let validated_amount = - validate_amount(context.client, args.amount, &args.token, args.tx.force) - .await - .expect("expected to validate amount"); + let validated_amount = validate_amount( + context.client, + args.amount, + &args.token, + args.tx.force, + ) + .await + .expect("expected to validate amount"); if validated_amount.canonical().denom.0 != 0 { return Err(Error::Other(format!( "The amount for the IBC transfer should be an integer: {}", @@ -1371,17 +1414,22 @@ pub async fn build_ibc_transfer<'a>( token: args.token.clone(), }); - let tx_code_hash = - query_wasm_code_hash(context.client, args.tx_code_path.to_str().unwrap()) - .await - .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let tx_code_hash = query_wasm_code_hash( + context.client, + args.tx_code_path.to_str().unwrap(), + ) + .await + .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; let ibc_denom = match &args.token { Address::Internal(InternalAddress::IbcToken(hash)) => { let ibc_denom_key = ibc_denom_key(hash); - rpc::query_storage_value::<_, String>(context.client, &ibc_denom_key) - .await - .map_err(|_e| TxError::TokenDoesNotExist(args.token.clone()))? + rpc::query_storage_value::<_, String>( + context.client, + &ibc_denom_key, + ) + .await + .map_err(|_e| TxError::TokenDoesNotExist(args.token.clone()))? } _ => args.token.to_string(), }; @@ -1499,9 +1547,10 @@ where let mut tx_builder = Tx::new(chain_id, tx_args.expiration); - let tx_code_hash = query_wasm_code_hash(context.client, path.to_string_lossy()) - .await - .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let tx_code_hash = + query_wasm_code_hash(context.client, path.to_string_lossy()) + .await + .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; on_tx(&mut tx_builder, &mut data)?; @@ -1526,8 +1575,10 @@ async fn add_asset_type<'a>( asset_type: AssetType, ) -> bool { let context = &mut **context; - if let Some(asset_type) = - context.shielded.decode_asset_type(context.client, asset_type).await + if let Some(asset_type) = context + .shielded + .decode_asset_type(context.client, asset_type) + .await { asset_types.insert(asset_type) } else { @@ -1545,35 +1596,27 @@ async fn used_asset_types<'a, P, R, K, N>( let mut asset_types = HashSet::new(); // Collect all the asset types used in the Sapling inputs for input in builder.sapling_inputs() { - add_asset_type(&mut asset_types, context, input.asset_type()) - .await; + add_asset_type(&mut asset_types, context, input.asset_type()).await; } // Collect all the asset types used in the transparent inputs for input in builder.transparent_inputs() { - add_asset_type( - &mut asset_types, - context, - input.coin().asset_type(), - ) - .await; + add_asset_type(&mut asset_types, context, input.coin().asset_type()) + .await; } // Collect all the asset types used in the Sapling outputs for output in builder.sapling_outputs() { - add_asset_type(&mut asset_types, context, output.asset_type()) - .await; + add_asset_type(&mut asset_types, context, output.asset_type()).await; } // Collect all the asset types used in the transparent outputs for output in builder.transparent_outputs() { - add_asset_type(&mut asset_types, context, output.asset_type()) - .await; + add_asset_type(&mut asset_types, context, output.asset_type()).await; } // Collect all the asset types used in the Sapling converts for output in builder.sapling_converts() { for (asset_type, _) in I32Sum::from(output.conversion().clone()).components() { - add_asset_type(&mut asset_types, context, *asset_type) - .await; + add_asset_type(&mut asset_types, context, *asset_type).await; } } Ok(asset_types) @@ -1591,8 +1634,8 @@ pub async fn build_transfer<'a, N: Namada<'a>>( Some(args.source.effective_address()), default_signer, ) - .await?; - + .await?; + let source = args.source.effective_address(); let target = args.target.effective_address(); let token = args.token.clone(); @@ -1606,7 +1649,8 @@ pub async fn build_transfer<'a, N: Namada<'a>>( // validate the amount given let validated_amount = - validate_amount(context.client, args.amount, &token, args.tx.force).await?; + validate_amount(context.client, args.amount, &token, args.tx.force) + .await?; args.amount = InputAmount::Validated(validated_amount); let post_balance = check_balance_too_low_err( @@ -1643,7 +1687,11 @@ pub async fn build_transfer<'a, N: Namada<'a>>( }; // Construct the shielded part of the transaction, if any - let stx_result = ShieldedContext::::gen_shielded_transfer(context, args).await; + let stx_result = + ShieldedContext::::gen_shielded_transfer( + context, args, + ) + .await; let shielded_parts = match stx_result { Ok(stx) => Ok(stx), @@ -1664,10 +1712,9 @@ pub async fn build_transfer<'a, N: Namada<'a>>( Some(transfer) => { // Get the decoded asset types used in the transaction to give // offline wallet users more information - let asset_types = - used_asset_types(context, &transfer.builder) - .await - .unwrap_or_default(); + let asset_types = used_asset_types(context, &transfer.builder) + .await + .unwrap_or_default(); Some(asset_types) } }; @@ -1756,8 +1803,9 @@ pub async fn build_init_account<'a>( ) -> Result<(Tx, SigningTxData, Option)> { let signing_data = signing::aux_signing_data(context, tx_args, None, None).await?; - - let vp_code_hash = query_wasm_code_hash_buf(context.client, vp_code_path).await?; + + let vp_code_hash = + query_wasm_code_hash_buf(context.client, vp_code_path).await?; let threshold = match threshold { Some(threshold) => *threshold, @@ -1791,7 +1839,8 @@ pub async fn build_init_account<'a>( &signing_data.fee_payer, None, ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit a transaction to update a VP @@ -1813,20 +1862,22 @@ pub async fn build_update_account<'a>( Some(addr.clone()), default_signer, ) - .await?; - - let addr = - if let Some(account) = rpc::get_account_info(context.client, addr).await? { - account.address - } else if tx_args.force { - addr.clone() - } else { - return Err(Error::from(TxError::LocationDoesNotExist(addr.clone()))); - }; + .await?; + + let addr = if let Some(account) = + rpc::get_account_info(context.client, addr).await? + { + account.address + } else if tx_args.force { + addr.clone() + } else { + return Err(Error::from(TxError::LocationDoesNotExist(addr.clone()))); + }; let vp_code_hash = match vp_code_path { Some(code_path) => { - let vp_hash = query_wasm_code_hash_buf(context.client, code_path).await?; + let vp_hash = + query_wasm_code_hash_buf(context.client, code_path).await?; Some(vp_hash) } None => None, @@ -1859,7 +1910,8 @@ pub async fn build_update_account<'a>( &signing_data.fee_payer, None, ) - .await.map(|(tx, epoch)| (tx, signing_data, epoch)) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit a custom transaction @@ -1880,8 +1932,8 @@ pub async fn build_custom<'a>( Some(owner.clone()), default_signer, ) - .await?; - + .await?; + let mut tx = if let Some(serialized_tx) = serialized_tx { Tx::deserialize(serialized_tx.as_ref()).map_err(|_| { Error::Other("Invalid tx deserialization.".to_string()) @@ -1889,7 +1941,8 @@ pub async fn build_custom<'a>( } else { let tx_code_hash = query_wasm_code_hash_buf( context.client, - code_path.as_ref() + code_path + .as_ref() .ok_or(Error::Other("No code path supplied".to_string()))?, ) .await?; diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index b763aec013..025f360c42 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -19,11 +19,10 @@ use std::time::{Duration, Instant}; use borsh::BorshSerialize; use color_eyre::eyre::Result; use data_encoding::HEXLOWER; +use namada::sdk::masp::fs::FsShieldedUtils; use namada::types::address::Address; -use namada::types::io::StdIo; use namada::types::storage::Epoch; use namada::types::token; -use namada_apps::client::tx::CLIShieldedUtils; use namada_apps::config::ethereum_bridge; use namada_apps::config::genesis::genesis_config::{ GenesisConfig, ParametersConfig, PgfParametersConfig, PosParamsConfig, @@ -688,7 +687,7 @@ fn ledger_txs_and_queries() -> Result<()> { #[test] fn masp_txs_and_queries() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -836,7 +835,7 @@ fn masp_txs_and_queries() -> Result<()> { #[test] fn wrapper_disposable_signer() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. diff --git a/tests/src/integration/masp.rs b/tests/src/integration/masp.rs index 1cd08b9976..261d3acd08 100644 --- a/tests/src/integration/masp.rs +++ b/tests/src/integration/masp.rs @@ -2,8 +2,7 @@ use std::path::PathBuf; use color_eyre::eyre::Result; use color_eyre::owo_colors::OwoColorize; -use namada::types::io::StdIo; -use namada_apps::client::tx::CLIShieldedUtils; +use namada::sdk::masp::fs::FsShieldedUtils; use namada_apps::node::ledger::shell::testing::client::run; use namada_apps::node::ledger::shell::testing::utils::{Bin, CapturedOutput}; use namada_core::types::address::{btc, eth, masp_rewards}; @@ -29,7 +28,7 @@ fn masp_incentives() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -765,7 +764,7 @@ fn masp_pinned_txs() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); let mut node = setup::setup()?; // Wait till epoch boundary @@ -928,7 +927,7 @@ fn masp_txs_and_queries() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); enum Response { Ok(&'static str), @@ -1234,7 +1233,7 @@ fn wrapper_fee_unshielding() { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. From 1142e6470a38191a4149c49c24be2aedfc48066e Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Thu, 28 Sep 2023 16:34:31 +0200 Subject: [PATCH 052/161] Adding saving and loading function to Wallet. --- Cargo.lock | 1 + apps/src/lib/cli/wallet.rs | 22 +- apps/src/lib/client/tx.rs | 10 +- apps/src/lib/client/utils.rs | 45 ++- apps/src/lib/wallet/cli_utils.rs | 22 +- apps/src/lib/wallet/mod.rs | 81 ++-- apps/src/lib/wallet/pre_genesis.rs | 4 +- apps/src/lib/wallet/store.rs | 71 +--- shared/Cargo.toml | 3 +- shared/src/ledger/mod.rs | 18 +- shared/src/sdk/signing.rs | 8 +- shared/src/sdk/tx.rs | 6 +- shared/src/sdk/wallet/keys.rs | 6 +- shared/src/sdk/wallet/mod.rs | 569 ++++++++++++++++++----------- shared/src/sdk/wallet/store.rs | 16 +- 15 files changed, 499 insertions(+), 383 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84cbd6f48e..d8303e37f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4026,6 +4026,7 @@ dependencies = [ "ethbridge-bridge-contract", "ethers", "eyre", + "fd-lock", "futures", "itertools", "libsecp256k1 0.7.0", diff --git a/apps/src/lib/cli/wallet.rs b/apps/src/lib/cli/wallet.rs index d039c7ef10..09d66096af 100644 --- a/apps/src/lib/cli/wallet.rs +++ b/apps/src/lib/cli/wallet.rs @@ -8,7 +8,7 @@ use color_eyre::eyre::Result; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; use namada::sdk::masp::find_valid_diversifier; -use namada::sdk::wallet::{DecryptionError, FindKeyError}; +use namada::sdk::wallet::{DecryptionError, FindKeyError, GenRestoreKeyError}; use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::{MaspValue, PaymentAddress}; @@ -356,6 +356,7 @@ fn key_and_address_restore( alias, alias_force, derivation_path, + None, encryption_password, ) .unwrap_or_else(|err| { @@ -393,21 +394,26 @@ fn key_and_address_gen( let mut rng = OsRng; let derivation_path_and_mnemonic_rng = derivation_path.map(|p| (p, &mut rng)); - let (alias, _key) = wallet + let (alias, _key, _mnemonic) = wallet .gen_key( scheme, alias, alias_force, + None, encryption_password, derivation_path_and_mnemonic_rng, ) .unwrap_or_else(|err| { - edisplay_line!(IO, "{}", err); - cli::safe_exit(1); - }) - .unwrap_or_else(|| { - display_line!(IO, "No changes are persisted. Exiting."); - cli::safe_exit(0); + match err { + GenRestoreKeyError::KeyStorageError => { + println!("No changes are persisted. Exiting."); + cli::safe_exit(0); + }, + _ => { + eprintln!("{}", err); + cli::safe_exit(1); + } + } }); crate::wallet::save(&wallet) .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 1289492faa..2928ce755f 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -8,7 +8,7 @@ use namada::core::ledger::governance::cli::onchain::{ }; use namada::ledger::pos; use namada::sdk::rpc::{TxBroadcastData, TxResponse}; -use namada::sdk::wallet::{Wallet, WalletUtils}; +use namada::sdk::wallet::{Wallet, WalletIo}; use namada::ledger::{Namada, NamadaImpl}; use namada::proof_of_stake::parameters::PosParams; use namada::proto::Tx; @@ -247,11 +247,11 @@ where SchemeType::Ed25519, Some(consensus_key_alias.clone()), tx_args.wallet_alias_force, + None, password, None, ) .expect("Key generation should not fail.") - .expect("No existing alias expected.") .1 }); @@ -273,11 +273,11 @@ where SchemeType::Secp256k1, Some(eth_cold_key_alias.clone()), tx_args.wallet_alias_force, + None, password, None, ) .expect("Key generation should not fail.") - .expect("No existing alias expected.") .1 .ref_to() }); @@ -300,11 +300,11 @@ where SchemeType::Secp256k1, Some(eth_hot_key_alias.clone()), tx_args.wallet_alias_force, + None, password, None, ) .expect("Key generation should not fail.") - .expect("No existing alias expected.") .1 .ref_to() }); @@ -1087,7 +1087,7 @@ where } /// Save accounts initialized from a tx into the wallet, if any. -pub async fn save_initialized_accounts( +pub async fn save_initialized_accounts( wallet: &mut Wallet, args: &args::Tx, initialized_accounts: Vec
, diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 0caab25d35..7e243fd502 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -505,10 +505,9 @@ pub fn init_network( println!("Generating validator {} consensus key...", name); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet - .gen_key(SchemeType::Ed25519, Some(alias), true, password, None) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + let (_alias, keypair, _mnemonic) = wallet + .gen_key(SchemeType::Ed25519, Some(alias), true, None, password, None) + .expect("Key generation should not fail."); // Write consensus key for Tendermint tendermint_node::write_validator_key(&tm_home_dir, &keypair); @@ -525,10 +524,9 @@ pub fn init_network( println!("Generating validator {} account key...", name); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet - .gen_key(SchemeType::Ed25519, Some(alias), true, password, None) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + let (_alias, keypair, _mnemonic) = wallet + .gen_key(SchemeType::Ed25519, Some(alias), true, None, password, None) + .expect("Key generation should not fail."); keypair.ref_to() }); @@ -541,10 +539,9 @@ pub fn init_network( println!("Generating validator {} protocol signing key...", name); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet - .gen_key(SchemeType::Ed25519, Some(alias), true, password, None) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + let (_alias, keypair, _mnemonic) = wallet + .gen_key(SchemeType::Ed25519, Some(alias), true, None, password, None) + .expect("Key generation should not fail."); keypair.ref_to() }); @@ -557,16 +554,16 @@ pub fn init_network( println!("Generating validator {} eth hot key...", name); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet + let (_alias, keypair, _mnemonic) = wallet .gen_key( SchemeType::Secp256k1, Some(alias), true, + None, password, None, ) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + .expect("Key generation should not fail."); keypair.ref_to() }); @@ -579,16 +576,16 @@ pub fn init_network( println!("Generating validator {} eth cold key...", name); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet + let (_alias, keypair, _mnemonic) = wallet .gen_key( SchemeType::Secp256k1, Some(alias), true, + None, password, None, ) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + .expect("Key generation should not fail."); keypair.ref_to() }); @@ -675,16 +672,16 @@ pub fn init_network( ); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet + let (_alias, keypair, _mnemonic) = wallet .gen_key( SchemeType::Ed25519, Some(name.clone()), true, + None, password, None, ) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + .expect("Key generation should not fail."); let public_key = genesis_config::HexString(keypair.ref_to().to_string()); config.public_key = Some(public_key); @@ -938,16 +935,16 @@ fn init_established_account( println!("Generating established account {} key...", name.as_ref()); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet + let (_alias, keypair, _mnemonic) = wallet .gen_key( SchemeType::Ed25519, Some(format!("{}-key", name.as_ref())), true, + None, password, None, // do not use mnemonic code / HD derivation path ) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + .expect("Key generation should not fail."); let public_key = genesis_config::HexString(keypair.ref_to().to_string()); config.public_key = Some(public_key); diff --git a/apps/src/lib/wallet/cli_utils.rs b/apps/src/lib/wallet/cli_utils.rs index 72bb0acaab..fb288df193 100644 --- a/apps/src/lib/wallet/cli_utils.rs +++ b/apps/src/lib/wallet/cli_utils.rs @@ -5,7 +5,7 @@ use borsh::BorshSerialize; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; use namada::sdk::masp::find_valid_diversifier; -use namada::sdk::wallet::{DecryptionError, FindKeyError}; +use namada::sdk::wallet::{DecryptionError, FindKeyError, GenRestoreKeyError}; use namada::types::key::{PublicKeyHash, RefTo}; use namada::types::masp::{MaspValue, PaymentAddress}; use rand_core::OsRng; @@ -271,6 +271,7 @@ pub fn key_and_address_restore( alias, alias_force, derivation_path, + None, encryption_password, ) .unwrap_or_else(|err| { @@ -306,21 +307,26 @@ pub fn key_and_address_gen( let mut rng = OsRng; let derivation_path_and_mnemonic_rng = derivation_path.map(|p| (p, &mut rng)); - let (alias, _key) = wallet + let (alias, _key, _mnemonic) = wallet .gen_key( scheme, alias, alias_force, + None, encryption_password, derivation_path_and_mnemonic_rng, ) .unwrap_or_else(|err| { - eprintln!("{}", err); - cli::safe_exit(1); - }) - .unwrap_or_else(|| { - println!("No changes are persisted. Exiting."); - cli::safe_exit(0); + match err { + GenRestoreKeyError::KeyStorageError => { + println!("No changes are persisted. Exiting."); + cli::safe_exit(0); + }, + _ => { + eprintln!("{}", err); + cli::safe_exit(1); + } + } }); crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); println!( diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index f6611ebe18..01967a5c24 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -12,7 +12,7 @@ use namada::bip39::{Language, Mnemonic}; pub use namada::sdk::wallet::alias::Alias; use namada::sdk::wallet::{ AddressVpType, ConfirmationResponse, FindKeyError, GenRestoreKeyError, - Wallet, WalletUtils, + Wallet, WalletIo, }; pub use namada::sdk::wallet::{ValidatorData, ValidatorKeys}; use namada::types::address::Address; @@ -20,36 +20,34 @@ use namada::types::key::*; use rand_core::OsRng; pub use store::wallet_file; use zeroize::Zeroizing; +use namada::sdk::wallet::store::Store; +use namada::sdk::wallet::fs::FsWalletStorage; use crate::cli; use crate::config::genesis::genesis_config::GenesisConfig; -#[derive(Debug)] -pub struct CliWalletUtils; +#[derive(Debug, Clone)] +pub struct CliWalletUtils { + store_dir: PathBuf, +} -impl WalletUtils for CliWalletUtils { - type Rng = OsRng; - type Storage = PathBuf; +impl CliWalletUtils { + /// Initialize a wallet at the given directory + pub fn new(store_dir: PathBuf) -> Wallet { + Wallet::new(Self { store_dir }, Store::default()) + } +} - fn read_decryption_password() -> Zeroizing { - match env::var("NAMADA_WALLET_PASSWORD_FILE") { - Ok(path) => Zeroizing::new( - fs::read_to_string(path) - .expect("Something went wrong reading the file"), - ), - Err(_) => match env::var("NAMADA_WALLET_PASSWORD") { - Ok(password) => Zeroizing::new(password), - Err(_) => { - let prompt = "Enter your decryption password: "; - rpassword::read_password_from_tty(Some(prompt)) - .map(Zeroizing::new) - .expect("Failed reading password from tty.") - } - }, - } +impl FsWalletStorage for CliWalletUtils { + fn store_dir(&self) -> &PathBuf { + &self.store_dir } +} + +impl WalletIo for CliWalletUtils { + type Rng = OsRng; - fn read_encryption_password() -> Zeroizing { + fn read_password(confirm: bool) -> Zeroizing { let pwd = match env::var("NAMADA_WALLET_PASSWORD_FILE") { Ok(path) => Zeroizing::new( fs::read_to_string(path) @@ -57,7 +55,7 @@ impl WalletUtils for CliWalletUtils { ), Err(_) => match env::var("NAMADA_WALLET_PASSWORD") { Ok(password) => Zeroizing::new(password), - Err(_) => { + Err(_) if confirm => { let prompt = "Enter your encryption password: "; read_and_confirm_passphrase_tty(prompt).unwrap_or_else( |e| { @@ -68,10 +66,16 @@ impl WalletUtils for CliWalletUtils { cli::safe_exit(1) }, ) + }, + Err(_) => { + let prompt = "Enter your decryption password: "; + rpassword::read_password_from_tty(Some(prompt)) + .map(Zeroizing::new) + .expect("Failed reading password from tty.") } }, }; - if pwd.as_str().is_empty() { + if confirm && pwd.as_str().is_empty() { eprintln!("Password cannot be empty"); eprintln!("Action cancelled, no changes persisted."); cli::safe_exit(1) @@ -190,7 +194,7 @@ pub fn read_and_confirm_passphrase_tty( /// for signing protocol txs and for the DKG (which will also be stored) /// A protocol keypair may be optionally provided, indicating that /// we should re-use a keypair already in the wallet -pub fn gen_validator_keys( +pub fn gen_validator_keys( wallet: &mut Wallet, eth_bridge_pk: Option, protocol_pk: Option, @@ -221,7 +225,7 @@ fn find_secret_key( ) -> Result, FindKeyError> where F: Fn(&ValidatorData) -> common::SecretKey, - U: WalletUtils, + U: WalletIo, { maybe_pk .map(|pk| { @@ -254,19 +258,18 @@ pub fn add_genesis_addresses( /// Save the wallet store to a file. pub fn save(wallet: &Wallet) -> std::io::Result<()> { - self::store::save(wallet.store(), wallet.store_dir()) + wallet.save() + .map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err)) } /// Load a wallet from the store file. pub fn load(store_dir: &Path) -> Option> { - let store = self::store::load(store_dir).unwrap_or_else(|err| { + let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); + wallet.load().unwrap_or_else(|err| { eprintln!("Unable to load the wallet: {}", err); cli::safe_exit(1) }); - Some(Wallet::::new( - store_dir.to_path_buf(), - store, - )) + Some(wallet) } /// Load a wallet from the store file or create a new wallet without any @@ -276,7 +279,9 @@ pub fn load_or_new(store_dir: &Path) -> Wallet { eprintln!("Unable to load the wallet: {}", err); cli::safe_exit(1) }); - Wallet::::new(store_dir.to_path_buf(), store) + let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); + *wallet.store_mut() = store; + wallet } /// Load a wallet from the store file or create a new one with the default @@ -290,7 +295,9 @@ pub fn load_or_new_from_genesis( eprintln!("Unable to load the wallet: {}", err); cli::safe_exit(1) }); - Wallet::::new(store_dir.to_path_buf(), store) + let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); + *wallet.store_mut() = store; + wallet } /// Read the password for encryption from the file/env/stdin, with @@ -302,14 +309,14 @@ pub fn read_and_confirm_encryption_password( println!("Warning: The keypair will NOT be encrypted."); None } else { - Some(CliWalletUtils::read_encryption_password()) + Some(CliWalletUtils::read_password(true)) } } #[cfg(test)] mod tests { use namada::bip39::MnemonicType; - use namada::sdk::wallet::WalletUtils; + use namada::sdk::wallet::WalletIo; use rand_core; use super::CliWalletUtils; diff --git a/apps/src/lib/wallet/pre_genesis.rs b/apps/src/lib/wallet/pre_genesis.rs index 21a80267f1..13a2c21f2b 100644 --- a/apps/src/lib/wallet/pre_genesis.rs +++ b/apps/src/lib/wallet/pre_genesis.rs @@ -6,7 +6,7 @@ use fd_lock::RwLock; use namada::sdk::wallet::pre_genesis::{ ReadError, ValidatorStore, ValidatorWallet, }; -use namada::sdk::wallet::{gen_key_to_store, WalletUtils}; +use namada::sdk::wallet::{gen_key_to_store, WalletIo}; use namada::types::key::SchemeType; use zeroize::Zeroizing; @@ -75,7 +75,7 @@ pub fn load(store_dir: &Path) -> Result { || store.consensus_key.is_encrypted() || store.account_key.is_encrypted() { - Some(CliWalletUtils::read_decryption_password()) + Some(CliWalletUtils::read_password(false)) } else { None }; diff --git a/apps/src/lib/wallet/store.rs b/apps/src/lib/wallet/store.rs index 0f2aa86b7b..24c62e4866 100644 --- a/apps/src/lib/wallet/store.rs +++ b/apps/src/lib/wallet/store.rs @@ -1,13 +1,9 @@ -use std::fs; -use std::io::prelude::*; -use std::io::Write; use std::path::{Path, PathBuf}; #[cfg(not(feature = "dev"))] use std::str::FromStr; use ark_std::rand::prelude::*; use ark_std::rand::SeedableRng; -use fd_lock::RwLock; #[cfg(not(feature = "dev"))] use namada::sdk::wallet::store::AddressVpType; #[cfg(feature = "dev")] @@ -17,21 +13,11 @@ use namada::sdk::wallet::{gen_sk_rng, Store, ValidatorKeys}; use namada::types::address::Address; use namada::types::key::*; use namada::types::transaction::EllipticCurve; -use thiserror::Error; +use namada::sdk::wallet::LoadStoreError; use crate::config::genesis::genesis_config::GenesisConfig; use crate::wallet::CliWalletUtils; -#[derive(Error, Debug)] -pub enum LoadStoreError { - #[error("Failed decoding the wallet store: {0}")] - Decode(toml::de::Error), - #[error("Failed to read the wallet store from {0}: {1}")] - ReadWallet(String, String), - #[error("Failed to write the wallet store: {0}")] - StoreNewWallet(String), -} - /// Wallet file name const FILE_NAME: &str = "wallet.toml"; @@ -40,28 +26,12 @@ pub fn wallet_file(store_dir: impl AsRef) -> PathBuf { store_dir.as_ref().join(FILE_NAME) } -/// Save the wallet store to a file. -pub fn save(store: &Store, store_dir: &Path) -> std::io::Result<()> { - let data = store.encode(); - let wallet_path = wallet_file(store_dir); - // Make sure the dir exists - let wallet_dir = wallet_path.parent().unwrap(); - fs::create_dir_all(wallet_dir)?; - // Write the file - let mut options = fs::OpenOptions::new(); - options.create(true).write(true).truncate(true); - let mut lock = RwLock::new(options.open(wallet_path)?); - let mut guard = lock.write()?; - guard.write_all(&data) -} - /// Load the store file or create a new one without any keys or addresses. pub fn load_or_new(store_dir: &Path) -> Result { load(store_dir).or_else(|_| { - let store = Store::default(); - save(&store, store_dir) - .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?; - Ok(store) + let wallet = CliWalletUtils::new(store_dir.to_path_buf()); + wallet.save()?; + Ok(wallet.into()) }) } @@ -80,37 +50,18 @@ pub fn load_or_new_from_genesis( let _ = genesis_cfg; new() }; - save(&store, store_dir) - .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?; - Ok(store) + let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); + *wallet.store_mut() = store; + wallet.save()?; + Ok(wallet.into()) }) } /// Attempt to load the store file. pub fn load(store_dir: &Path) -> Result { - let wallet_file = wallet_file(store_dir); - let mut options = fs::OpenOptions::new(); - options.read(true).write(false); - let lock = RwLock::new(options.open(&wallet_file).map_err(|err| { - LoadStoreError::ReadWallet( - wallet_file.to_string_lossy().into_owned(), - err.to_string(), - ) - })?); - let guard = lock.read().map_err(|err| { - LoadStoreError::ReadWallet( - wallet_file.to_string_lossy().into_owned(), - err.to_string(), - ) - })?; - let mut store = Vec::::new(); - (&*guard).read_to_end(&mut store).map_err(|err| { - LoadStoreError::ReadWallet( - store_dir.to_str().unwrap().parse().unwrap(), - err.to_string(), - ) - })?; - Store::decode(store).map_err(LoadStoreError::Decode) + let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); + wallet.load()?; + Ok(wallet.into()) } /// Add addresses from a genesis configuration. diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 3259afd2c7..660a810087 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -19,7 +19,7 @@ default = ["abciplus", "namada-sdk", "wasm-runtime"] mainnet = [ "namada_core/mainnet", ] -std = [] +std = ["fd-lock"] # NOTE "dev" features that shouldn't be used in live networks are enabled by default for now dev = [] ferveo-tpke = [ @@ -107,6 +107,7 @@ derivative.workspace = true ethbridge-bridge-contract.workspace = true ethers.workspace = true eyre.workspace = true +fd-lock = { workspace = true, optional = true } futures.workspace = true itertools.workspace = true loupe = {version = "0.1.3", optional = true} diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 7686635f98..e952c99a08 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -22,7 +22,7 @@ pub use namada_core::ledger::{ gas, parameters, replay_protection, storage_api, tx_env, vp_env, }; -use crate::sdk::wallet::{Wallet, WalletUtils}; +use crate::sdk::wallet::{Wallet, WalletIo, WalletStorage}; use crate::sdk::masp::{ShieldedContext, ShieldedUtils}; use crate::types::masp::{TransferSource, TransferTarget}; use crate::types::address::Address; @@ -50,7 +50,7 @@ use namada_core::types::ethereum_events::EthAddress; pub struct NamadaStruct<'a, C, U, V> where C: crate::ledger::queries::Client + Sync, - U: WalletUtils, + U: WalletIo, V: ShieldedUtils, { /// Used to send and receive messages from the ledger @@ -76,7 +76,7 @@ pub trait Namada<'a>: /// A client with async request dispatcher method type Client: 'a + crate::ledger::queries::Client + Sync; /// Captures the interactive parts of the wallet's functioning - type WalletUtils: 'a + WalletUtils; + type WalletUtils: 'a + WalletIo + WalletStorage; /// Abstracts platform specific details away from the logic of shielded pool /// operations. type ShieldedUtils: 'a + ShieldedUtils; @@ -391,7 +391,7 @@ pub trait Namada<'a>: pub struct NamadaImpl<'a, C, U, V> where C: crate::ledger::queries::Client + Sync, - U: WalletUtils, + U: WalletIo, V: ShieldedUtils, { namada: NamadaStruct<'a, C, U, V>, @@ -401,7 +401,7 @@ where impl<'a, C, U, V> NamadaImpl<'a, C, U, V> where C: crate::ledger::queries::Client + Sync, - U: WalletUtils, + U: WalletIo, V: ShieldedUtils, { /// Construct a new Namada context @@ -451,7 +451,7 @@ where impl<'a, C, U, V> Deref for NamadaImpl<'a, C, U, V> where C: crate::ledger::queries::Client + Sync, - U: WalletUtils, + U: WalletIo, V: ShieldedUtils, { type Target = NamadaStruct<'a, C, U, V>; @@ -464,7 +464,7 @@ where impl<'a, C, U, V> DerefMut for NamadaImpl<'a, C, U, V> where C: crate::ledger::queries::Client + Sync, - U: WalletUtils, + U: WalletIo, V: ShieldedUtils, { fn deref_mut(&mut self) -> &mut Self::Target { @@ -475,7 +475,7 @@ where impl<'a, C, U, V> Namada<'a> for NamadaImpl<'a, C, U, V> where C: crate::ledger::queries::Client + Sync, - U: WalletUtils, + U: WalletIo + WalletStorage, V: ShieldedUtils, { type Client = C; @@ -492,7 +492,7 @@ where impl<'a, C, U, V> args::TxBuilder for NamadaImpl<'a, C, U, V> where C: crate::ledger::queries::Client + Sync, - U: WalletUtils, + U: WalletIo, V: ShieldedUtils, { fn tx(self, func: F) -> Self diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index aa2ac368f6..24f3b12c7b 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -41,7 +41,7 @@ use crate::sdk::tx::{ VP_USER_WASM, }; pub use crate::sdk::wallet::store::AddressVpType; -use crate::sdk::wallet::{Wallet, WalletUtils}; +use crate::sdk::wallet::{Wallet, WalletIo}; use crate::sdk::{args, rpc}; use crate::types::io::*; use crate::sdk::args::SdkTypes; @@ -85,7 +85,7 @@ pub struct SigningTxData { /// found or loaded. pub async fn find_pk< C: crate::ledger::queries::Client + Sync, - U: WalletUtils, + U: WalletIo, >( client: &C, wallet: &mut Wallet, @@ -127,7 +127,7 @@ pub async fn find_pk< /// Load the secret key corresponding to the given public key from the wallet. /// If the keypair is encrypted but a password is not supplied, then it is /// interactively prompted. Errors if the key cannot be found or loaded. -pub fn find_key_by_pk( +pub fn find_key_by_pk( wallet: &mut Wallet, args: &args::Tx, public_key: &common::PublicKey, @@ -201,7 +201,7 @@ pub async fn tx_signers<'a>( /// hashes needed for monitoring the tx on chain. /// /// If it is a dry run, it is not put in a wrapper, but returned as is. -pub fn sign_tx( +pub fn sign_tx( wallet: &mut Wallet, args: &args::Tx, tx: &mut Tx, diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index e7a86a5d3b..f718084709 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -49,7 +49,7 @@ use crate::sdk::rpc::{ self, format_denominated_amount, validate_amount, TxBroadcastData, TxResponse, query_wasm_code_hash }; -use crate::sdk::wallet::{Wallet, WalletUtils}; +use crate::sdk::wallet::{Wallet, WalletIo}; use crate::ledger::Namada; use crate::proto::{MaspBuilder, Tx}; use crate::sdk::args::{self, InputAmount}; @@ -187,7 +187,7 @@ pub async fn prepare_tx<'a>( /// initialized in the transaction if any. In dry run, this is always empty. pub async fn process_tx< C: crate::sdk::queries::Client + Sync, - U: WalletUtils, + U: WalletIo, >( client: &C, wallet: &mut Wallet, @@ -450,7 +450,7 @@ pub fn decode_component( } /// Save accounts initialized from a tx into the wallet, if any. -pub async fn save_initialized_accounts( +pub async fn save_initialized_accounts( wallet: &mut Wallet, args: &args::Tx, initialized_accounts: Vec
, diff --git a/shared/src/sdk/wallet/keys.rs b/shared/src/sdk/wallet/keys.rs index 867a2b1ad0..6b10352c8b 100644 --- a/shared/src/sdk/wallet/keys.rs +++ b/shared/src/sdk/wallet/keys.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use zeroize::Zeroizing; -use crate::sdk::wallet::WalletUtils; +use crate::sdk::wallet::WalletIo; const ENCRYPTED_KEY_PREFIX: &str = "encrypted:"; const UNENCRYPTED_KEY_PREFIX: &str = "unencrypted:"; @@ -166,7 +166,7 @@ where /// Get a raw keypair from a stored keypair. If the keypair is encrypted and /// no password is provided in the argument, a password will be prompted /// from stdin. - pub fn get( + pub fn get( &self, decrypt: bool, password: Option>, @@ -175,7 +175,7 @@ where StoredKeypair::Encrypted(encrypted_keypair) => { if decrypt { let password = password - .unwrap_or_else(|| U::read_decryption_password()); + .unwrap_or_else(|| U::read_password(false)); let key = encrypted_keypair.decrypt(password)?; Ok(key) } else { diff --git a/shared/src/sdk/wallet/mod.rs b/shared/src/sdk/wallet/mod.rs index 371c97806b..580b2db5a1 100644 --- a/shared/src/sdk/wallet/mod.rs +++ b/shared/src/sdk/wallet/mod.rs @@ -40,12 +40,13 @@ pub enum GenRestoreKeyError { /// Mnemonic input error #[error("Mnemonic input error")] MnemonicInputError, + /// Key storage error + #[error("Key storage error")] + KeyStorageError, } /// Captures the interactive parts of the wallet's functioning -pub trait WalletUtils { - /// The location where the wallet is stored - type Storage; +pub trait WalletIo: Sized + Clone { /// Secure random number generator type Rng: RngCore; @@ -67,29 +68,150 @@ pub trait WalletUtils { } /// Read the password for decryption from the file/env/stdin. - fn read_decryption_password() -> Zeroizing; - - /// Read the password for encryption from the file/env/stdin. - /// If the password is read from stdin, the implementation is expected - /// to ask for a confirmation. - fn read_encryption_password() -> Zeroizing; + fn read_password(_confirm: bool) -> Zeroizing { + panic!("attempted to prompt for password in non-interactive mode"); + } /// Read an alias from the file/env/stdin. - fn read_alias(prompt_msg: &str) -> String; + fn read_alias(_prompt_msg: &str) -> String { + panic!("attempted to prompt for alias in non-interactive mode"); + } /// Read mnemonic code from the file/env/stdin. - fn read_mnemonic_code() -> Result; + fn read_mnemonic_code() -> Result { + panic!("attempted to prompt for alias in non-interactive mode"); + } /// Read a mnemonic code from the file/env/stdin. - fn read_mnemonic_passphrase(confirm: bool) -> Zeroizing; + fn read_mnemonic_passphrase(_confirm: bool) -> Zeroizing { + panic!("attempted to prompt for alias in non-interactive mode"); + } /// The given alias has been selected but conflicts with another alias in /// the store. Offer the user to either replace existing mapping, alter the /// chosen alias to a name of their choice, or cancel the aliasing. fn show_overwrite_confirmation( - alias: &Alias, - alias_for: &str, - ) -> store::ConfirmationResponse; + _alias: &Alias, + _alias_for: &str, + ) -> store::ConfirmationResponse { + // Automatically replace aliases in non-interactive mode + store::ConfirmationResponse::Replace + } +} + +/// Errors of wallet loading and storing +#[derive(Error, Debug)] +pub enum LoadStoreError { + /// Wallet store decoding error + #[error("Failed decoding the wallet store: {0}")] + Decode(toml::de::Error), + /// Wallet store reading error + #[error("Failed to read the wallet store from {0}: {1}")] + ReadWallet(String, String), + /// Wallet store writing error + #[error("Failed to write the wallet store: {0}")] + StoreNewWallet(String), +} + +/// Captures the permanent storage parts of the wallet's functioning +pub trait WalletStorage: Sized + Clone { + /// Save the wallet store to a file. + fn save(&self, wallet: &Wallet) -> Result<(), LoadStoreError>; + + /// Load a wallet from the store file. + fn load(&self, wallet: &mut Wallet) -> Result<(), LoadStoreError>; +} + +#[cfg(feature = "std")] +/// Implementation of wallet functionality depending on a standard filesystem +pub mod fs { + use super::*; + use std::fs; + use fd_lock::RwLock; + use std::path::PathBuf; + use rand_core::OsRng; + use std::io::{Read, Write}; + + /// A trait for deriving WalletStorage for standard filesystems + pub trait FsWalletStorage: Clone { + /// The directory in which the wallet is supposed to be stored + fn store_dir(&self) -> &PathBuf; + } + + /// Wallet file name + const FILE_NAME: &str = "wallet.toml"; + + impl WalletStorage for F { + fn save(&self, wallet: &Wallet) -> Result<(), LoadStoreError> { + let data = wallet.store.encode(); + let wallet_path = self.store_dir().join(FILE_NAME); + // Make sure the dir exists + let wallet_dir = wallet_path.parent().unwrap(); + fs::create_dir_all(wallet_dir) + .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?; + // Write the file + let mut options = fs::OpenOptions::new(); + options.create(true).write(true).truncate(true); + let mut lock = RwLock::new( + options.open(wallet_path) + .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?, + ); + let mut guard = lock.write() + .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?; + guard.write_all(&data).map_err(|err| LoadStoreError::StoreNewWallet(err.to_string())) + } + + fn load(&self, wallet: &mut Wallet) -> Result<(), LoadStoreError> { + let wallet_file = self.store_dir().join(FILE_NAME); + let mut options = fs::OpenOptions::new(); + options.read(true).write(false); + let lock = RwLock::new(options.open(&wallet_file).map_err(|err| { + LoadStoreError::ReadWallet( + wallet_file.to_string_lossy().into_owned(), + err.to_string(), + ) + })?); + let guard = lock.read().map_err(|err| { + LoadStoreError::ReadWallet( + wallet_file.to_string_lossy().into_owned(), + err.to_string(), + ) + })?; + let mut store = Vec::::new(); + (&*guard).read_to_end(&mut store).map_err(|err| { + LoadStoreError::ReadWallet( + self.store_dir().to_str().unwrap().parse().unwrap(), + err.to_string(), + ) + })?; + wallet.store = Store::decode(store).map_err(LoadStoreError::Decode)?; + Ok(()) + } + } + + /// For a non-interactive filesystem based wallet + #[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] + pub struct FsWalletUtils { + #[borsh_skip] + store_dir: PathBuf, + } + + impl FsWalletUtils { + /// Initialize a wallet at the given directory + pub fn new(store_dir: PathBuf) -> Wallet { + Wallet::new(Self { store_dir }, Store::default()) + } + } + + impl WalletIo for FsWalletUtils { + type Rng = OsRng; + } + + impl FsWalletStorage for FsWalletUtils { + fn store_dir(&self) -> &PathBuf { + &self.store_dir + } + } } /// The error that is produced when a given key cannot be obtained @@ -105,24 +227,217 @@ pub enum FindKeyError { /// Represents a collection of keys and addresses while caching key decryptions #[derive(Debug)] -pub struct Wallet { - store_dir: U::Storage, +pub struct Wallet { + /// Location where this shielded context is saved + utils: U, store: Store, decrypted_key_cache: HashMap, decrypted_spendkey_cache: HashMap, } -impl Wallet { +impl From> for Store { + fn from(wallet: Wallet) -> Self { + wallet.store + } +} + +impl Wallet { /// Create a new wallet from the given backing store and storage location - pub fn new(store_dir: U::Storage, store: Store) -> Self { + pub fn new(utils: U, store: Store) -> Self { Self { - store_dir, + utils, store, decrypted_key_cache: HashMap::default(), decrypted_spendkey_cache: HashMap::default(), } } + /// Add validator data to the store + pub fn add_validator_data( + &mut self, + address: Address, + keys: ValidatorKeys, + ) { + self.store.add_validator_data(address, keys); + } + + /// Returns a reference to the validator data, if it exists. + pub fn get_validator_data(&self) -> Option<&ValidatorData> { + self.store.get_validator_data() + } + + /// Returns a mut reference to the validator data, if it exists. + pub fn get_validator_data_mut(&mut self) -> Option<&mut ValidatorData> { + self.store.get_validator_data_mut() + } + + /// Take the validator data, if it exists. + pub fn take_validator_data(&mut self) -> Option { + self.store.take_validator_data() + } + + /// Returns the validator data, if it exists. + pub fn into_validator_data(self) -> Option { + self.store.into_validator_data() + } + + /// Provide immutable access to the backing store + pub fn store(&self) -> &Store { + &self.store + } + + /// Provide mutable access to the backing store + pub fn store_mut(&mut self) -> &mut Store { + &mut self.store + } + + /// Extend this wallet from pre-genesis validator wallet. + pub fn extend_from_pre_genesis_validator( + &mut self, + validator_address: Address, + validator_alias: Alias, + other: pre_genesis::ValidatorWallet, + ) { + self.store.extend_from_pre_genesis_validator( + validator_address, + validator_alias, + other, + ) + } + + /// Gets all addresses given a vp_type + pub fn get_addresses_with_vp_type( + &self, + vp_type: AddressVpType, + ) -> HashSet
{ + self.store.get_addresses_with_vp_type(vp_type) + } + + /// Add a vp_type to a given address + pub fn add_vp_type_to_address( + &mut self, + vp_type: AddressVpType, + address: Address, + ) { + // defaults to an empty set + self.store.add_vp_type_to_address(vp_type, address) + } + + /// Get addresses with tokens VP type keyed and ordered by their aliases. + pub fn tokens_with_aliases(&self) -> BTreeMap { + self.get_addresses_with_vp_type(AddressVpType::Token) + .into_iter() + .map(|addr| { + let alias = self.lookup_alias(&addr); + (alias, addr) + }) + .collect() + } + + /// Find the stored address by an alias. + pub fn find_address(&self, alias: impl AsRef) -> Option<&Address> { + self.store.find_address(alias) + } + + /// Find an alias by the address if it's in the wallet. + pub fn find_alias(&self, address: &Address) -> Option<&Alias> { + self.store.find_alias(address) + } + + /// Try to find an alias for a given address from the wallet. If not found, + /// formats the address into a string. + pub fn lookup_alias(&self, addr: &Address) -> String { + match self.find_alias(addr) { + Some(alias) => format!("{}", alias), + None => format!("{}", addr), + } + } + + /// Find the viewing key with the given alias in the wallet and return it + pub fn find_viewing_key( + &mut self, + alias: impl AsRef, + ) -> Result<&ExtendedViewingKey, FindKeyError> { + self.store + .find_viewing_key(alias.as_ref()) + .ok_or(FindKeyError::KeyNotFound) + } + + /// Find the payment address with the given alias in the wallet and return + /// it + pub fn find_payment_addr( + &self, + alias: impl AsRef, + ) -> Option<&PaymentAddress> { + self.store.find_payment_addr(alias.as_ref()) + } + + /// Get all known keys by their alias, paired with PKH, if known. + pub fn get_keys( + &self, + ) -> HashMap< + String, + (&StoredKeypair, Option<&PublicKeyHash>), + > { + self.store + .get_keys() + .into_iter() + .map(|(alias, value)| (alias.into(), value)) + .collect() + } + + /// Get all known addresses by their alias, paired with PKH, if known. + pub fn get_addresses(&self) -> HashMap { + self.store + .get_addresses() + .iter() + .map(|(alias, value)| (alias.into(), value.clone())) + .collect() + } + + /// Get all known payment addresses by their alias + pub fn get_payment_addrs(&self) -> HashMap { + self.store + .get_payment_addrs() + .iter() + .map(|(alias, value)| (alias.into(), *value)) + .collect() + } + + /// Get all known viewing keys by their alias + pub fn get_viewing_keys(&self) -> HashMap { + self.store + .get_viewing_keys() + .iter() + .map(|(alias, value)| (alias.into(), *value)) + .collect() + } + + /// Get all known viewing keys by their alias + pub fn get_spending_keys( + &self, + ) -> HashMap> { + self.store + .get_spending_keys() + .iter() + .map(|(alias, value)| (alias.into(), value)) + .collect() + } +} + +impl Wallet { + /// Load a wallet from the store file. + pub fn load(&mut self) -> Result<(), LoadStoreError> { + self.utils.clone().load(self) + } + + /// Save the wallet store to a file. + pub fn save(&self) -> Result<(), LoadStoreError> { + self.utils.save(self) + } +} + +impl Wallet { fn gen_and_store_key( &mut self, scheme: SchemeType, @@ -161,6 +476,7 @@ impl Wallet { alias: Option, alias_force: bool, derivation_path: Option, + mnemonic_passphrase: Option<(Mnemonic, Zeroizing)>, password: Option>, ) -> Result, GenRestoreKeyError> { let parsed_derivation_path = derivation_path @@ -182,8 +498,12 @@ impl Wallet { ) } println!("Using HD derivation path {}", parsed_derivation_path); - let mnemonic = U::read_mnemonic_code()?; - let passphrase = U::read_mnemonic_passphrase(false); + let (mnemonic, passphrase) = + if let Some(mnemonic_passphrase) = mnemonic_passphrase { + mnemonic_passphrase + } else { + (U::read_mnemonic_code()?, U::read_mnemonic_passphrase(false)) + }; let seed = Seed::new(&mnemonic, &passphrase); Ok(self.gen_and_store_key( @@ -212,9 +532,10 @@ impl Wallet { scheme: SchemeType, alias: Option, alias_force: bool, + passphrase: Option>, password: Option>, derivation_path_and_mnemonic_rng: Option<(String, &mut U::Rng)>, - ) -> Result, GenRestoreKeyError> { + ) -> Result<(String, common::SecretKey, Option), GenRestoreKeyError> { let parsed_path_and_rng = derivation_path_and_mnemonic_rng .map(|(raw_derivation_path, rng)| { let is_default = @@ -242,27 +563,31 @@ impl Wallet { println!("Using HD derivation path {}", parsed_derivation_path); } + let mut mnemonic_opt = None; let seed_and_derivation_path //: Option> = parsed_path_and_rng.map(|(path, rng)| { const MNEMONIC_TYPE: MnemonicType = MnemonicType::Words24; - let mnemonic = U::generate_mnemonic_code(MNEMONIC_TYPE, rng)?; + let mnemonic = mnemonic_opt + .insert(U::generate_mnemonic_code(MNEMONIC_TYPE, rng)?); println!( "Safely store your {} words mnemonic.", MNEMONIC_TYPE.word_count() ); println!("{}", mnemonic.clone().into_phrase()); - let passphrase = U::read_mnemonic_passphrase(true); - Ok((Seed::new(&mnemonic, &passphrase), path)) + let passphrase = passphrase + .unwrap_or_else(|| U::read_mnemonic_passphrase(true)); + Ok((Seed::new(mnemonic, &passphrase), path)) }).transpose()?; - Ok(self.gen_and_store_key( + let (alias, key) = self.gen_and_store_key( scheme, alias, alias_force, seed_and_derivation_path, password, - )) + ).ok_or(GenRestoreKeyError::KeyStorageError)?; + Ok((alias, key, mnemonic_opt)) } /// Generate a disposable signing key for fee payment and store it under the @@ -280,10 +605,9 @@ impl Wallet { // Generate a disposable keypair to sign the wrapper if requested // TODO: once the wrapper transaction has been accepted, this key can be // deleted from wallet - let (alias, disposable_keypair) = self - .gen_key(SchemeType::Ed25519, Some(alias), false, None, None) - .expect("Failed to initialize disposable keypair") - .expect("Missing alias and secret key"); + let (alias, disposable_keypair, _mnemonic) = self + .gen_key(SchemeType::Ed25519, Some(alias), false, None, None, None) + .expect("Failed to initialize disposable keypair"); println!("Created disposable keypair with alias {alias}"); disposable_keypair @@ -304,35 +628,6 @@ impl Wallet { (alias.into(), key) } - /// Add validator data to the store - pub fn add_validator_data( - &mut self, - address: Address, - keys: ValidatorKeys, - ) { - self.store.add_validator_data(address, keys); - } - - /// Returns a reference to the validator data, if it exists. - pub fn get_validator_data(&self) -> Option<&ValidatorData> { - self.store.get_validator_data() - } - - /// Returns a mut reference to the validator data, if it exists. - pub fn get_validator_data_mut(&mut self) -> Option<&mut ValidatorData> { - self.store.get_validator_data_mut() - } - - /// Take the validator data, if it exists. - pub fn take_validator_data(&mut self) -> Option { - self.store.take_validator_data() - } - - /// Returns the validator data, if it exists. - pub fn into_validator_data(self) -> Option { - self.store.into_validator_data() - } - /// Find the stored key by an alias, a public key hash or a public key. /// If the key is encrypted and password not supplied, then password will be /// interactively prompted. Any keys that are decrypted are stored in and @@ -389,25 +684,6 @@ impl Wallet { ) } - /// Find the viewing key with the given alias in the wallet and return it - pub fn find_viewing_key( - &mut self, - alias: impl AsRef, - ) -> Result<&ExtendedViewingKey, FindKeyError> { - self.store - .find_viewing_key(alias.as_ref()) - .ok_or(FindKeyError::KeyNotFound) - } - - /// Find the payment address with the given alias in the wallet and return - /// it - pub fn find_payment_addr( - &self, - alias: impl AsRef, - ) -> Option<&PaymentAddress> { - self.store.find_payment_addr(alias.as_ref()) - } - /// Find the stored key by a public key. /// If the key is encrypted and password not supplied, then password will be /// interactively prompted for. Any keys that are decrypted are stored in @@ -463,7 +739,7 @@ impl Wallet { .store .find_key_by_pkh(pkh) .ok_or(FindKeyError::KeyNotFound)?; - Self::decrypt_stored_key::<_>( + Self::decrypt_stored_key( &mut self.decrypted_key_cache, stored_key, alias, @@ -489,7 +765,7 @@ impl Wallet { match stored_key { StoredKeypair::Encrypted(encrypted) => { let password = - password.unwrap_or_else(U::read_decryption_password); + password.unwrap_or_else(|| U::read_password(false)); let key = encrypted .decrypt(password) .map_err(FindKeyError::KeyDecryptionError)?; @@ -503,77 +779,6 @@ impl Wallet { } } - /// Get all known keys by their alias, paired with PKH, if known. - pub fn get_keys( - &self, - ) -> HashMap< - String, - (&StoredKeypair, Option<&PublicKeyHash>), - > { - self.store - .get_keys() - .into_iter() - .map(|(alias, value)| (alias.into(), value)) - .collect() - } - - /// Find the stored address by an alias. - pub fn find_address(&self, alias: impl AsRef) -> Option<&Address> { - self.store.find_address(alias) - } - - /// Find an alias by the address if it's in the wallet. - pub fn find_alias(&self, address: &Address) -> Option<&Alias> { - self.store.find_alias(address) - } - - /// Try to find an alias for a given address from the wallet. If not found, - /// formats the address into a string. - pub fn lookup_alias(&self, addr: &Address) -> String { - match self.find_alias(addr) { - Some(alias) => format!("{}", alias), - None => format!("{}", addr), - } - } - - /// Get all known addresses by their alias, paired with PKH, if known. - pub fn get_addresses(&self) -> HashMap { - self.store - .get_addresses() - .iter() - .map(|(alias, value)| (alias.into(), value.clone())) - .collect() - } - - /// Get all known payment addresses by their alias - pub fn get_payment_addrs(&self) -> HashMap { - self.store - .get_payment_addrs() - .iter() - .map(|(alias, value)| (alias.into(), *value)) - .collect() - } - - /// Get all known viewing keys by their alias - pub fn get_viewing_keys(&self) -> HashMap { - self.store - .get_viewing_keys() - .iter() - .map(|(alias, value)| (alias.into(), *value)) - .collect() - } - - /// Get all known viewing keys by their alias - pub fn get_spending_keys( - &self, - ) -> HashMap> { - self.store - .get_spending_keys() - .iter() - .map(|(alias, value)| (alias.into(), value)) - .collect() - } - /// Add a new address with the given alias. If the alias is already used, /// will ask whether the existing alias should be replaced, a different /// alias is desired, or the alias creation should be cancelled. Return @@ -664,62 +869,4 @@ impl Wallet { .insert_payment_addr::(alias.into(), payment_addr, force_alias) .map(Into::into) } - - /// Extend this wallet from pre-genesis validator wallet. - pub fn extend_from_pre_genesis_validator( - &mut self, - validator_address: Address, - validator_alias: Alias, - other: pre_genesis::ValidatorWallet, - ) { - self.store.extend_from_pre_genesis_validator( - validator_address, - validator_alias, - other, - ) - } - - /// Gets all addresses given a vp_type - pub fn get_addresses_with_vp_type( - &self, - vp_type: AddressVpType, - ) -> HashSet
{ - self.store.get_addresses_with_vp_type(vp_type) - } - - /// Add a vp_type to a given address - pub fn add_vp_type_to_address( - &mut self, - vp_type: AddressVpType, - address: Address, - ) { - // defaults to an empty set - self.store.add_vp_type_to_address(vp_type, address) - } - - /// Provide immutable access to the backing store - pub fn store(&self) -> &Store { - &self.store - } - - /// Provide mutable access to the backing store - pub fn store_mut(&mut self) -> &mut Store { - &mut self.store - } - - /// Access storage location data - pub fn store_dir(&self) -> &U::Storage { - &self.store_dir - } - - /// Get addresses with tokens VP type keyed and ordered by their aliases. - pub fn tokens_with_aliases(&self) -> BTreeMap { - self.get_addresses_with_vp_type(AddressVpType::Token) - .into_iter() - .map(|addr| { - let alias = self.lookup_alias(&addr); - (alias, addr) - }) - .collect() - } } diff --git a/shared/src/sdk/wallet/store.rs b/shared/src/sdk/wallet/store.rs index 509ff5afe6..b674391127 100644 --- a/shared/src/sdk/wallet/store.rs +++ b/shared/src/sdk/wallet/store.rs @@ -17,7 +17,7 @@ use zeroize::Zeroizing; use super::alias::{self, Alias}; use super::derivation_path::DerivationPath; use super::pre_genesis; -use crate::sdk::wallet::{StoredKeypair, WalletUtils}; +use crate::sdk::wallet::{StoredKeypair, WalletIo}; use crate::types::address::{Address, ImplicitAddress}; use crate::types::key::dkg_session_keys::DkgKeypair; use crate::types::key::*; @@ -239,7 +239,7 @@ impl Store { /// key. /// Returns None if the alias already exists and the user decides to skip /// it. No changes in the wallet store are made. - pub fn gen_key( + pub fn gen_key( &mut self, scheme: SchemeType, alias: Option, @@ -277,7 +277,7 @@ impl Store { } /// Generate a spending key similarly to how it's done for keypairs - pub fn gen_spending_key( + pub fn gen_spending_key( &mut self, alias: String, password: Option>, @@ -335,7 +335,7 @@ impl Store { /// will prompt for overwrite/reselection confirmation. If declined, then /// keypair is not inserted and nothing is returned, otherwise selected /// alias is returned. - pub fn insert_keypair( + pub fn insert_keypair( &mut self, alias: Alias, keypair: StoredKeypair, @@ -388,7 +388,7 @@ impl Store { } /// Insert spending keys similarly to how it's done for keypairs - pub fn insert_spending_key( + pub fn insert_spending_key( &mut self, alias: Alias, spendkey: StoredKeypair, @@ -418,7 +418,7 @@ impl Store { } /// Insert viewing keys similarly to how it's done for keypairs - pub fn insert_viewing_key( + pub fn insert_viewing_key( &mut self, alias: Alias, viewkey: ExtendedViewingKey, @@ -463,7 +463,7 @@ impl Store { } /// Insert payment addresses similarly to how it's done for keypairs - pub fn insert_payment_addr( + pub fn insert_payment_addr( &mut self, alias: Alias, payment_addr: PaymentAddress, @@ -507,7 +507,7 @@ impl Store { /// will prompt for overwrite/reselection confirmation, which when declined, /// the address won't be added. Return the selected alias if the address has /// been added. - pub fn insert_address( + pub fn insert_address( &mut self, alias: Alias, address: Address, From f99eaa3e09e6b194a45c39961257c19fb23c303f Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Sun, 1 Oct 2023 03:42:32 +0200 Subject: [PATCH 053/161] Made the ShieldedUtils trait similar to the WalletStorage trait. --- apps/src/lib/cli/wallet.rs | 18 ++++---- apps/src/lib/client/utils.rs | 27 ++++++++++-- apps/src/lib/wallet/cli_utils.rs | 18 ++++---- apps/src/lib/wallet/mod.rs | 5 ++- apps/src/lib/wallet/store.rs | 3 +- benches/lib.rs | 15 +++---- shared/src/sdk/masp.rs | 36 +++++++++------- shared/src/sdk/signing.rs | 5 +-- shared/src/sdk/wallet/keys.rs | 4 +- shared/src/sdk/wallet/mod.rs | 70 +++++++++++++++++++------------- 10 files changed, 119 insertions(+), 82 deletions(-) diff --git a/apps/src/lib/cli/wallet.rs b/apps/src/lib/cli/wallet.rs index 09d66096af..33b443edd9 100644 --- a/apps/src/lib/cli/wallet.rs +++ b/apps/src/lib/cli/wallet.rs @@ -403,16 +403,14 @@ fn key_and_address_gen( encryption_password, derivation_path_and_mnemonic_rng, ) - .unwrap_or_else(|err| { - match err { - GenRestoreKeyError::KeyStorageError => { - println!("No changes are persisted. Exiting."); - cli::safe_exit(0); - }, - _ => { - eprintln!("{}", err); - cli::safe_exit(1); - } + .unwrap_or_else(|err| match err { + GenRestoreKeyError::KeyStorageError => { + println!("No changes are persisted. Exiting."); + cli::safe_exit(0); + } + _ => { + eprintln!("{}", err); + cli::safe_exit(1); } }); crate::wallet::save(&wallet) diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 7e243fd502..f508267db3 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -506,7 +506,14 @@ pub fn init_network( let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); let (_alias, keypair, _mnemonic) = wallet - .gen_key(SchemeType::Ed25519, Some(alias), true, None, password, None) + .gen_key( + SchemeType::Ed25519, + Some(alias), + true, + None, + password, + None, + ) .expect("Key generation should not fail."); // Write consensus key for Tendermint @@ -525,7 +532,14 @@ pub fn init_network( let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); let (_alias, keypair, _mnemonic) = wallet - .gen_key(SchemeType::Ed25519, Some(alias), true, None, password, None) + .gen_key( + SchemeType::Ed25519, + Some(alias), + true, + None, + password, + None, + ) .expect("Key generation should not fail."); keypair.ref_to() }); @@ -540,7 +554,14 @@ pub fn init_network( let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); let (_alias, keypair, _mnemonic) = wallet - .gen_key(SchemeType::Ed25519, Some(alias), true, None, password, None) + .gen_key( + SchemeType::Ed25519, + Some(alias), + true, + None, + password, + None, + ) .expect("Key generation should not fail."); keypair.ref_to() }); diff --git a/apps/src/lib/wallet/cli_utils.rs b/apps/src/lib/wallet/cli_utils.rs index fb288df193..ada1b16684 100644 --- a/apps/src/lib/wallet/cli_utils.rs +++ b/apps/src/lib/wallet/cli_utils.rs @@ -316,16 +316,14 @@ pub fn key_and_address_gen( encryption_password, derivation_path_and_mnemonic_rng, ) - .unwrap_or_else(|err| { - match err { - GenRestoreKeyError::KeyStorageError => { - println!("No changes are persisted. Exiting."); - cli::safe_exit(0); - }, - _ => { - eprintln!("{}", err); - cli::safe_exit(1); - } + .unwrap_or_else(|err| match err { + GenRestoreKeyError::KeyStorageError => { + println!("No changes are persisted. Exiting."); + cli::safe_exit(0); + } + _ => { + eprintln!("{}", err); + cli::safe_exit(1); } }); crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index 01967a5c24..cca43a43d6 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -66,7 +66,7 @@ impl WalletIo for CliWalletUtils { cli::safe_exit(1) }, ) - }, + } Err(_) => { let prompt = "Enter your decryption password: "; rpassword::read_password_from_tty(Some(prompt)) @@ -258,7 +258,8 @@ pub fn add_genesis_addresses( /// Save the wallet store to a file. pub fn save(wallet: &Wallet) -> std::io::Result<()> { - wallet.save() + wallet + .save() .map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err)) } diff --git a/apps/src/lib/wallet/store.rs b/apps/src/lib/wallet/store.rs index 24c62e4866..c035925160 100644 --- a/apps/src/lib/wallet/store.rs +++ b/apps/src/lib/wallet/store.rs @@ -8,12 +8,11 @@ use ark_std::rand::SeedableRng; use namada::sdk::wallet::store::AddressVpType; #[cfg(feature = "dev")] use namada::sdk::wallet::StoredKeypair; -use namada::sdk::wallet::{gen_sk_rng, Store, ValidatorKeys}; +use namada::sdk::wallet::{gen_sk_rng, LoadStoreError, Store, ValidatorKeys}; #[cfg(not(feature = "dev"))] use namada::types::address::Address; use namada::types::key::*; use namada::types::transaction::EllipticCurve; -use namada::sdk::wallet::LoadStoreError; use crate::config::genesis::genesis_config::GenesisConfig; use crate::wallet::CliWalletUtils; diff --git a/benches/lib.rs b/benches/lib.rs index a816451477..5ea8b091ee 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -586,22 +586,23 @@ impl ShieldedUtils for BenchShieldedUtils { /// Try to load the last saved shielded context from the given context /// directory. If this fails, then leave the current context unchanged. - async fn load(self) -> std::io::Result> { + async fn load(&self, ctx: &mut ShieldedContext) -> std::io::Result<()> { // Try to load shielded context from file let mut ctx_file = File::open( self.context_dir.0.path().to_path_buf().join(FILE_NAME), )?; let mut bytes = Vec::new(); ctx_file.read_to_end(&mut bytes)?; - let mut new_ctx = ShieldedContext::deserialize(&mut &bytes[..])?; - // Associate the originating context directory with the - // shielded context under construction - new_ctx.utils = self; - Ok(new_ctx) + // Fill the supplied context with the deserialized object + *ctx = ShieldedContext { + utils: ctx.utils.clone(), + ..ShieldedContext::deserialize(&mut &bytes[..])? + }; + Ok(()) } /// Save this shielded context into its associated context directory - async fn save(&self, ctx: &ShieldedContext) -> std::io::Result<()> { + async fn save(&self, ctx: &ShieldedContext) -> std::io::Result<()> { let tmp_path = self.context_dir.0.path().to_path_buf().join(TMP_FILE_NAME); { diff --git a/shared/src/sdk/masp.rs b/shared/src/sdk/masp.rs index 1cd9ae7e5b..7959916944 100644 --- a/shared/src/sdk/masp.rs +++ b/shared/src/sdk/masp.rs @@ -397,10 +397,16 @@ pub trait ShieldedUtils: fn local_tx_prover(&self) -> LocalTxProver; /// Load up the currently saved ShieldedContext - async fn load(self) -> std::io::Result>; + async fn load( + &self, + ctx: &mut ShieldedContext, + ) -> std::io::Result<()>; - /// Sace the given ShieldedContext for future loads - async fn save(&self, ctx: &ShieldedContext) -> std::io::Result<()>; + /// Save the given ShieldedContext for future loads + async fn save( + &self, + ctx: &ShieldedContext, + ) -> std::io::Result<()>; } /// Make a ViewingKey that can view notes encrypted by given ExtendedSpendingKey @@ -621,9 +627,7 @@ impl ShieldedContext { /// Try to load the last saved shielded context from the given context /// directory. If this fails, then leave the current context unchanged. pub async fn load(&mut self) -> std::io::Result<()> { - let new_ctx = self.utils.clone().load().await?; - *self = new_ctx; - Ok(()) + self.utils.clone().load(self).await } /// Save this shielded context into its associated context directory @@ -2189,22 +2193,26 @@ pub mod fs { /// Try to load the last saved shielded context from the given context /// directory. If this fails, then leave the current context unchanged. - async fn load(self) -> std::io::Result> { + async fn load( + &self, + ctx: &mut ShieldedContext, + ) -> std::io::Result<()> { // Try to load shielded context from file let mut ctx_file = File::open(self.context_dir.join(FILE_NAME))?; let mut bytes = Vec::new(); ctx_file.read_to_end(&mut bytes)?; - let mut new_ctx = ShieldedContext::deserialize(&mut &bytes[..])?; - // Associate the originating context directory with the - // shielded context under construction - new_ctx.utils = self; - Ok(new_ctx) + // Fill the supplied context with the deserialized object + *ctx = ShieldedContext { + utils: ctx.utils.clone(), + ..ShieldedContext::::deserialize(&mut &bytes[..])? + }; + Ok(()) } /// Save this shielded context into its associated context directory - async fn save( + async fn save( &self, - ctx: &ShieldedContext, + ctx: &ShieldedContext, ) -> std::io::Result<()> { // TODO: use mktemp crate? let tmp_path = self.context_dir.join(TMP_FILE_NAME); diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 24f3b12c7b..9d534217a4 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -83,10 +83,7 @@ pub struct SigningTxData { /// for it from the wallet. If the keypair is encrypted but a password is not /// supplied, then it is interactively prompted. Errors if the key cannot be /// found or loaded. -pub async fn find_pk< - C: crate::ledger::queries::Client + Sync, - U: WalletIo, ->( +pub async fn find_pk( client: &C, wallet: &mut Wallet, addr: &Address, diff --git a/shared/src/sdk/wallet/keys.rs b/shared/src/sdk/wallet/keys.rs index 6b10352c8b..749fa1e25f 100644 --- a/shared/src/sdk/wallet/keys.rs +++ b/shared/src/sdk/wallet/keys.rs @@ -174,8 +174,8 @@ where match self { StoredKeypair::Encrypted(encrypted_keypair) => { if decrypt { - let password = password - .unwrap_or_else(|| U::read_password(false)); + let password = + password.unwrap_or_else(|| U::read_password(false)); let key = encrypted_keypair.decrypt(password)?; Ok(key) } else { diff --git a/shared/src/sdk/wallet/mod.rs b/shared/src/sdk/wallet/mod.rs index 580b2db5a1..cadfdc718c 100644 --- a/shared/src/sdk/wallet/mod.rs +++ b/shared/src/sdk/wallet/mod.rs @@ -125,12 +125,14 @@ pub trait WalletStorage: Sized + Clone { #[cfg(feature = "std")] /// Implementation of wallet functionality depending on a standard filesystem pub mod fs { - use super::*; use std::fs; - use fd_lock::RwLock; + use std::io::{Read, Write}; use std::path::PathBuf; + + use fd_lock::RwLock; use rand_core::OsRng; - use std::io::{Read, Write}; + + use super::*; /// A trait for deriving WalletStorage for standard filesystems pub trait FsWalletStorage: Clone { @@ -147,30 +149,38 @@ pub mod fs { let wallet_path = self.store_dir().join(FILE_NAME); // Make sure the dir exists let wallet_dir = wallet_path.parent().unwrap(); - fs::create_dir_all(wallet_dir) - .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?; + fs::create_dir_all(wallet_dir).map_err(|err| { + LoadStoreError::StoreNewWallet(err.to_string()) + })?; // Write the file let mut options = fs::OpenOptions::new(); options.create(true).write(true).truncate(true); - let mut lock = RwLock::new( - options.open(wallet_path) - .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?, - ); - let mut guard = lock.write() - .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?; - guard.write_all(&data).map_err(|err| LoadStoreError::StoreNewWallet(err.to_string())) + let mut lock = + RwLock::new(options.open(wallet_path).map_err(|err| { + LoadStoreError::StoreNewWallet(err.to_string()) + })?); + let mut guard = lock.write().map_err(|err| { + LoadStoreError::StoreNewWallet(err.to_string()) + })?; + guard + .write_all(&data) + .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string())) } - fn load(&self, wallet: &mut Wallet) -> Result<(), LoadStoreError> { + fn load( + &self, + wallet: &mut Wallet, + ) -> Result<(), LoadStoreError> { let wallet_file = self.store_dir().join(FILE_NAME); let mut options = fs::OpenOptions::new(); options.read(true).write(false); - let lock = RwLock::new(options.open(&wallet_file).map_err(|err| { - LoadStoreError::ReadWallet( - wallet_file.to_string_lossy().into_owned(), - err.to_string(), - ) - })?); + let lock = + RwLock::new(options.open(&wallet_file).map_err(|err| { + LoadStoreError::ReadWallet( + wallet_file.to_string_lossy().into_owned(), + err.to_string(), + ) + })?); let guard = lock.read().map_err(|err| { LoadStoreError::ReadWallet( wallet_file.to_string_lossy().into_owned(), @@ -184,7 +194,8 @@ pub mod fs { err.to_string(), ) })?; - wallet.store = Store::decode(store).map_err(LoadStoreError::Decode)?; + wallet.store = + Store::decode(store).map_err(LoadStoreError::Decode)?; Ok(()) } } @@ -535,7 +546,8 @@ impl Wallet { passphrase: Option>, password: Option>, derivation_path_and_mnemonic_rng: Option<(String, &mut U::Rng)>, - ) -> Result<(String, common::SecretKey, Option), GenRestoreKeyError> { + ) -> Result<(String, common::SecretKey, Option), GenRestoreKeyError> + { let parsed_path_and_rng = derivation_path_and_mnemonic_rng .map(|(raw_derivation_path, rng)| { let is_default = @@ -580,13 +592,15 @@ impl Wallet { Ok((Seed::new(mnemonic, &passphrase), path)) }).transpose()?; - let (alias, key) = self.gen_and_store_key( - scheme, - alias, - alias_force, - seed_and_derivation_path, - password, - ).ok_or(GenRestoreKeyError::KeyStorageError)?; + let (alias, key) = self + .gen_and_store_key( + scheme, + alias, + alias_force, + seed_and_derivation_path, + password, + ) + .ok_or(GenRestoreKeyError::KeyStorageError)?; Ok((alias, key, mnemonic_opt)) } From 79f006aed09501f96a21a611e6693fb0a2a52937 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Sun, 1 Oct 2023 04:57:10 +0200 Subject: [PATCH 054/161] Added function to construct DenominatedAmounts from Amounts. --- benches/lib.rs | 10 ++++++++-- shared/src/sdk/args.rs | 6 ++++++ shared/src/sdk/rpc.rs | 24 +++++++++++++++++------- 3 files changed, 31 insertions(+), 9 deletions(-) diff --git a/benches/lib.rs b/benches/lib.rs index 5ea8b091ee..95c722de77 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -586,7 +586,10 @@ impl ShieldedUtils for BenchShieldedUtils { /// Try to load the last saved shielded context from the given context /// directory. If this fails, then leave the current context unchanged. - async fn load(&self, ctx: &mut ShieldedContext) -> std::io::Result<()> { + async fn load( + &self, + ctx: &mut ShieldedContext, + ) -> std::io::Result<()> { // Try to load shielded context from file let mut ctx_file = File::open( self.context_dir.0.path().to_path_buf().join(FILE_NAME), @@ -602,7 +605,10 @@ impl ShieldedUtils for BenchShieldedUtils { } /// Save this shielded context into its associated context directory - async fn save(&self, ctx: &ShieldedContext) -> std::io::Result<()> { + async fn save( + &self, + ctx: &ShieldedContext, + ) -> std::io::Result<()> { let tmp_path = self.context_dir.0.path().to_path_buf().join(TMP_FILE_NAME); { diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index 8a1d0d81b0..cf43d5dbd7 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -211,6 +211,12 @@ impl std::str::FromStr for InputAmount { } } +impl From for InputAmount { + fn from(amt: token::DenominatedAmount) -> Self { + InputAmount::Unvalidated(amt) + } +} + /// Transfer transaction arguments #[derive(Clone, Debug)] pub struct TxTransfer { diff --git a/shared/src/sdk/rpc.rs b/shared/src/sdk/rpc.rs index cb0e55835a..8a40d8a8dd 100644 --- a/shared/src/sdk/rpc.rs +++ b/shared/src/sdk/rpc.rs @@ -1058,15 +1058,13 @@ where .try_halt(|_| ()) } -/// Look up the denomination of a token in order to format it -/// correctly as a string. -pub async fn format_denominated_amount< - C: crate::ledger::queries::Client + Sync, ->( +/// Look up the denomination of a token in order to make a correctly denominated +/// amount. +pub async fn denominate_amount( client: &C, token: &Address, amount: token::Amount, -) -> String { +) -> DenominatedAmount { let denom = convert_response::>( RPC.vp().token().denomination(client, token).await, ) @@ -1082,5 +1080,17 @@ pub async fn format_denominated_amount< ); 0.into() }); - DenominatedAmount { amount, denom }.to_string() + DenominatedAmount { amount, denom } +} + +/// Look up the denomination of a token in order to format it +/// correctly as a string. +pub async fn format_denominated_amount< + C: crate::ledger::queries::Client + Sync, +>( + client: &C, + token: &Address, + amount: token::Amount, +) -> String { + denominate_amount(client, token, amount).await.to_string() } From c775ec720c94da3f119b2ea2457d819c1dcc8c38 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Sun, 1 Oct 2023 12:28:31 +0200 Subject: [PATCH 055/161] Removed unnecessary requirement for mutable references in Namada trait. --- shared/src/ledger/mod.rs | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index e952c99a08..59397be235 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -82,7 +82,7 @@ pub trait Namada<'a>: type ShieldedUtils: 'a + ShieldedUtils; /// Return the native token - fn native_token(&mut self) -> Address { + fn native_token(&self) -> Address { self.wallet .find_address(args::NAM) .expect("NAM not in wallet") @@ -90,7 +90,7 @@ pub trait Namada<'a>: } /// Make a tx builder using no arguments - fn tx_builder(&mut self) -> args::Tx { + fn tx_builder(&self) -> args::Tx { args::Tx { dry_run: false, dry_run_wrapper: false, @@ -119,7 +119,7 @@ pub trait Namada<'a>: /// Make a TxTransfer builder from the given minimum set of arguments fn new_transfer( - &mut self, + &self, source: TransferSource, target: TransferTarget, token: Address, @@ -138,7 +138,7 @@ pub trait Namada<'a>: /// Make a RevealPK builder from the given minimum set of arguments fn new_reveal_pk( - &mut self, + &self, public_key: common::PublicKey, ) -> args::RevealPk { args::RevealPk { @@ -149,7 +149,7 @@ pub trait Namada<'a>: /// Make a Bond builder from the given minimum set of arguments fn new_bond( - &mut self, + &self, validator: Address, amount: token::Amount, ) -> args::Bond { @@ -165,7 +165,7 @@ pub trait Namada<'a>: /// Make a Unbond builder from the given minimum set of arguments fn new_unbond( - &mut self, + &self, validator: Address, amount: token::Amount, ) -> args::Unbond { @@ -180,7 +180,7 @@ pub trait Namada<'a>: /// Make a TxIbcTransfer builder from the given minimum set of arguments fn new_ibc_transfer( - &mut self, + &self, source: Address, receiver: String, token: Address, @@ -204,7 +204,7 @@ pub trait Namada<'a>: /// Make a InitProposal builder from the given minimum set of arguments fn new_init_proposal( - &mut self, + &self, proposal_data: Vec, ) -> args::InitProposal { args::InitProposal { @@ -219,7 +219,7 @@ pub trait Namada<'a>: } /// Make a TxUpdateAccount builder from the given minimum set of arguments - fn new_update_account(&mut self, addr: Address) -> args::TxUpdateAccount { + fn new_update_account(&self, addr: Address) -> args::TxUpdateAccount { args::TxUpdateAccount { addr, vp_code_path: None, @@ -232,7 +232,7 @@ pub trait Namada<'a>: /// Make a VoteProposal builder from the given minimum set of arguments fn new_vote_prposal( - &mut self, + &self, vote: String, voter: Address, ) -> args::VoteProposal { @@ -250,7 +250,7 @@ pub trait Namada<'a>: /// Make a CommissionRateChange builder from the given minimum set of /// arguments fn new_change_commission_rate( - &mut self, + &self, rate: Dec, validator: Address, ) -> args::CommissionRateChange { @@ -264,7 +264,7 @@ pub trait Namada<'a>: /// Make a TxInitValidator builder from the given minimum set of arguments fn new_init_validator( - &mut self, + &self, commission_rate: Dec, max_commission_rate_change: Dec, ) -> args::TxInitValidator { @@ -287,7 +287,7 @@ pub trait Namada<'a>: /// Make a TxUnjailValidator builder from the given minimum set of arguments fn new_unjail_validator( - &mut self, + &self, validator: Address, ) -> args::TxUnjailValidator { args::TxUnjailValidator { @@ -298,7 +298,7 @@ pub trait Namada<'a>: } /// Make a Withdraw builder from the given minimum set of arguments - fn new_withdraw(&mut self, validator: Address) -> args::Withdraw { + fn new_withdraw(&self, validator: Address) -> args::Withdraw { args::Withdraw { validator, source: None, @@ -309,7 +309,7 @@ pub trait Namada<'a>: /// Make a Withdraw builder from the given minimum set of arguments fn new_add_erc20_transfer( - &mut self, + &self, sender: Address, recipient: EthAddress, asset: EthAddress, @@ -333,7 +333,7 @@ pub trait Namada<'a>: } /// Make a ResignSteward builder from the given minimum set of arguments - fn new_resign_steward(&mut self, steward: Address) -> args::ResignSteward { + fn new_resign_steward(&self, steward: Address) -> args::ResignSteward { args::ResignSteward { steward, tx: self.tx_builder(), @@ -344,7 +344,7 @@ pub trait Namada<'a>: /// Make a UpdateStewardCommission builder from the given minimum set of /// arguments fn new_update_steward_rewards( - &mut self, + &self, steward: Address, commission: Vec, ) -> args::UpdateStewardCommission { @@ -357,7 +357,7 @@ pub trait Namada<'a>: } /// Make a TxCustom builder from the given minimum set of arguments - fn new_custom(&mut self, owner: Address) -> args::TxCustom { + fn new_custom(&self, owner: Address) -> args::TxCustom { args::TxCustom { owner, tx: self.tx_builder(), @@ -483,7 +483,7 @@ where type WalletUtils = U; /// Obtain the prototypical Tx builder - fn tx_builder(&mut self) -> args::Tx { + fn tx_builder(&self) -> args::Tx { self.prototype.clone() } } From 9f06a4bd546352839c4cde833f0debac5adcd647 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Sun, 1 Oct 2023 15:09:48 +0200 Subject: [PATCH 056/161] Enabled parallel usage of the Namada trait using read and write locks. --- apps/src/lib/cli/client.rs | 12 +- apps/src/lib/client/tx.rs | 202 ++++++++---------- benches/lib.rs | 5 +- shared/Cargo.toml | 1 + shared/src/ledger/eth_bridge/bridge_pool.rs | 27 ++- shared/src/ledger/mod.rs | 217 +++++++++++--------- shared/src/sdk/args.rs | 56 +++-- shared/src/sdk/masp.rs | 60 +++--- shared/src/sdk/signing.rs | 78 ++++--- shared/src/sdk/tx.rs | 136 ++++++------ 10 files changed, 412 insertions(+), 382 deletions(-) diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index 5af2aaa2b2..449d4f38ce 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -258,28 +258,30 @@ impl CliApi { let args = args.to_sdk(&mut ctx); let tx_args = args.tx.clone(); - let mut namada = NamadaImpl::new( + let namada = NamadaImpl::new( &client, &mut ctx.wallet, &mut ctx.shielded, ); let (mut tx, signing_data, _epoch) = - args.clone().build(&mut namada).await?; + args.clone().build(&namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { dump_tx::(&args.tx, tx); } else { tx::submit_reveal_aux( - &mut namada, + &namada, tx_args.clone(), &args.sender, ) .await?; - namada.sign(&mut tx, &tx_args, signing_data)?; + namada + .sign(&mut tx, &tx_args, signing_data) + .await?; namada.submit(tx, &tx_args).await?; } diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 2928ce755f..c64fdf0044 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -36,7 +36,7 @@ use namada::types::io::StdIo; /// Wrapper around `signing::aux_signing_data` that stores the optional /// disposable address to the wallet pub async fn aux_signing_data<'a>( - context: &mut impl Namada<'a, WalletUtils = CliWalletUtils>, + context: &impl Namada<'a, WalletUtils = CliWalletUtils>, args: &args::Tx, owner: Option
, default_signer: Option
, @@ -47,7 +47,7 @@ pub async fn aux_signing_data<'a>( if args.disposable_signing_key { if !(args.dry_run || args.dry_run_wrapper) { // Store the generated signing key to wallet in case of need - crate::wallet::save(context.wallet).map_err(|_| { + crate::wallet::save(*context.wallet().await).map_err(|_| { error::Error::Other( "Failed to save disposable address to wallet".to_string(), ) @@ -66,7 +66,7 @@ pub async fn aux_signing_data<'a>( // Build a transaction to reveal the signer of the given transaction. pub async fn submit_reveal_aux<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args: args::Tx, address: &Address, ) -> Result<(), error::Error> { @@ -76,12 +76,15 @@ pub async fn submit_reveal_aux<'a>( if let Address::Implicit(ImplicitAddress(pkh)) = address { let key = context - .wallet + .wallet_mut() + .await .find_key_by_pkh(pkh, args.clone().password) .map_err(|e| error::Error::Other(e.to_string()))?; let public_key = key.ref_to(); - if tx::is_reveal_pk_needed(context.client, address, args.force).await? { + if tx::is_reveal_pk_needed(context.client(), address, args.force) + .await? + { println!( "Submitting a tx to reveal the public key for address \ {address}..." @@ -91,7 +94,7 @@ pub async fn submit_reveal_aux<'a>( signing::generate_test_vector(context, &tx).await?; - context.sign(&mut tx, &args, signing_data)?; + context.sign(&mut tx, &args, signing_data).await?; context.submit(tx, &args).await?; } @@ -109,18 +112,17 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - submit_reveal_aux(&mut namada, args.tx.clone(), &args.owner).await?; + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + submit_reveal_aux(&namada, args.tx.clone(), &args.owner).await?; - let (mut tx, signing_data, _epoch) = args.build(&mut namada).await?; + let (mut tx, signing_data, _epoch) = args.build(&namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; } @@ -136,16 +138,15 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - let (mut tx, signing_data, _epoch) = args.build(&mut namada).await?; + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let (mut tx, signing_data, _epoch) = args.build(&namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; } @@ -161,17 +162,16 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _epoch) = - tx::build_init_account(&mut namada, &args).await?; + tx::build_init_account(&namada, &args).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; } @@ -384,13 +384,11 @@ where tx.add_code_from_hash(tx_code_hash).add_data(data); - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - let signing_data = - aux_signing_data(&mut namada, &tx_args, None, None).await?; + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let signing_data = aux_signing_data(&namada, &tx_args, None, None).await?; tx::prepare_tx( - &mut namada, + &namada, &tx_args, &mut tx, signing_data.fee_payer.clone(), @@ -398,12 +396,12 @@ where ) .await?; - signing::generate_test_vector(&mut namada, &tx).await?; + signing::generate_test_vector(&namada, &tx).await?; if tx_args.dump_tx { tx::dump_tx::(&tx_args, tx); } else { - namada.sign(&mut tx, &tx_args, signing_data)?; + namada.sign(&mut tx, &tx_args, signing_data).await?; let result = namada.submit(tx, &tx_args).await?.initialized_accounts(); @@ -502,25 +500,25 @@ pub async fn submit_transfer( args: args::TxTransfer, ) -> Result<(), error::Error> { for _ in 0..2 { - let mut namada = + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); submit_reveal_aux( - &mut namada, + &namada, args.tx.clone(), &args.source.effective_address(), ) .await?; let (mut tx, signing_data, tx_epoch) = - args.clone().build(&mut namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + args.clone().build(&namada).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); break; } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; let result = namada.submit(tx, &args.tx).await?; let submission_epoch = @@ -561,16 +559,15 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - submit_reveal_aux(&mut namada, args.tx.clone(), &args.source).await?; - let (mut tx, signing_data, _epoch) = args.build(&mut namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + submit_reveal_aux(&namada, args.tx.clone(), &args.source).await?; + let (mut tx, signing_data, _epoch) = args.build(&namada).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; } @@ -588,8 +585,7 @@ where { let current_epoch = rpc::query_and_print_epoch::<_, IO>(client).await; let governance_parameters = rpc::query_governance_parameters(client).await; - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline { let proposal = OfflineProposal::try_from(args.proposal_data.as_ref()) @@ -603,7 +599,7 @@ where let default_signer = Some(proposal.author.clone()); let signing_data = aux_signing_data( - &mut namada, + &namada, &args.tx, Some(proposal.author.clone()), default_signer, @@ -635,14 +631,10 @@ where .validate(&governance_parameters, current_epoch, args.tx.force) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - submit_reveal_aux( - &mut namada, - args.tx.clone(), - &proposal.proposal.author, - ) - .await?; + submit_reveal_aux(&namada, args.tx.clone(), &proposal.proposal.author) + .await?; - tx::build_pgf_funding_proposal(&mut namada, &args, proposal).await? + tx::build_pgf_funding_proposal(&namada, &args, proposal).await? } else if args.is_pgf_stewards { let proposal = PgfStewardProposal::try_from( args.proposal_data.as_ref(), @@ -665,14 +657,10 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - submit_reveal_aux( - &mut namada, - args.tx.clone(), - &proposal.proposal.author, - ) - .await?; + submit_reveal_aux(&namada, args.tx.clone(), &proposal.proposal.author) + .await?; - tx::build_pgf_stewards_proposal(&mut namada, &args, proposal).await? + tx::build_pgf_stewards_proposal(&namada, &args, proposal).await? } else { let proposal = DefaultProposal::try_from(args.proposal_data.as_ref()) .map_err(|e| { @@ -693,21 +681,17 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - submit_reveal_aux( - &mut namada, - args.tx.clone(), - &proposal.proposal.author, - ) - .await?; + submit_reveal_aux(&namada, args.tx.clone(), &proposal.proposal.author) + .await?; - tx::build_default_proposal(&mut namada, &args, proposal).await? + tx::build_default_proposal(&namada, &args, proposal).await? }; - signing::generate_test_vector(&mut namada, &tx_builder).await?; + signing::generate_test_vector(&namada, &tx_builder).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx_builder); } else { - namada.sign(&mut tx_builder, &args.tx, signing_data)?; + namada.sign(&mut tx_builder, &args.tx, signing_data).await?; namada.submit(tx_builder, &args.tx).await?; } @@ -723,13 +707,12 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline { let default_signer = Some(args.voter.clone()); let signing_data = aux_signing_data( - &mut namada, + &namada, &args.tx, Some(args.voter.clone()), default_signer.clone(), @@ -777,14 +760,14 @@ where display_line!(IO, "Proposal vote serialized to: {}", output_file_path); return Ok(()); } else { - args.build(&mut namada).await? + args.build(&namada).await? }; - signing::generate_test_vector(&mut namada, &tx_builder).await?; + signing::generate_test_vector(&namada, &tx_builder).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx_builder); } else { - namada.sign(&mut tx_builder, &args.tx, signing_data)?; + namada.sign(&mut tx_builder, &args.tx, signing_data).await?; namada.submit(tx_builder, &args.tx).await?; } @@ -810,11 +793,10 @@ where edisplay_line!(IO, "Couldn't decode the transaction."); safe_exit(1) }; - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let default_signer = Some(owner.clone()); let signing_data = aux_signing_data( - &mut namada, + &namada, &tx_args, Some(owner.clone()), default_signer, @@ -888,9 +870,8 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - submit_reveal_aux(&mut namada, args.tx, &(&args.public_key).into()).await?; + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + submit_reveal_aux(&namada, args.tx, &(&args.public_key).into()).await?; Ok(()) } @@ -904,19 +885,18 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let default_address = args.source.clone().unwrap_or(args.validator.clone()); - submit_reveal_aux(&mut namada, args.tx.clone(), &default_address).await?; + submit_reveal_aux(&namada, args.tx.clone(), &default_address).await?; let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(&mut namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + args.build(&namada).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; } @@ -933,16 +913,15 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch, latest_withdrawal_pre) = - args.build(&mut namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + args.build(&namada).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; @@ -962,16 +941,15 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(&mut namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + args.build(&namada).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; } @@ -987,16 +965,15 @@ pub async fn submit_validator_commission_change( where C: namada::ledger::queries::Client + Sync, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(&mut namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + args.build(&namada).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; } @@ -1015,16 +992,15 @@ pub async fn submit_unjail_validator< where C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(&mut namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + args.build(&namada).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; } @@ -1044,17 +1020,16 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(&mut namada).await?; + args.build(&namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; } @@ -1070,16 +1045,15 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let mut namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - let (mut tx, signing_data, _epoch) = args.build(&mut namada).await?; + let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let (mut tx, signing_data, _epoch) = args.build(&namada).await?; - signing::generate_test_vector(&mut namada, &tx).await?; + signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx::(&args.tx, tx); } else { - namada.sign(&mut tx, &args.tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; } diff --git a/benches/lib.rs b/benches/lib.rs index 95c722de77..3aacaef90f 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -811,13 +811,12 @@ impl BenchShieldedCtx { &[], )) .unwrap(); - let mut namada = + let namada = NamadaImpl::new(&self.shell, &mut self.wallet, &mut self.shielded); let shielded = async_runtime .block_on( ShieldedContext::::gen_shielded_transfer( - &mut namada, - &args, + &namada, &args, ), ) .unwrap() diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 660a810087..f3c5428594 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -150,6 +150,7 @@ zeroize.workspace = true tokio = {workspace = true, features = ["full"]} [target.'cfg(target_family = "wasm")'.dependencies] +tokio = {workspace = true, default-features = false, features = ["sync"]} wasmtimer = "0.2.0" [dev-dependencies] diff --git a/shared/src/ledger/eth_bridge/bridge_pool.rs b/shared/src/ledger/eth_bridge/bridge_pool.rs index 430537fdc2..14002b53d3 100644 --- a/shared/src/ledger/eth_bridge/bridge_pool.rs +++ b/shared/src/ledger/eth_bridge/bridge_pool.rs @@ -43,7 +43,7 @@ use crate::{display, display_line}; /// Craft a transaction that adds a transfer to the Ethereum bridge pool. pub async fn build_bridge_pool_tx<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::EthereumBridgePool { tx: tx_args, nut, @@ -67,7 +67,7 @@ pub async fn build_bridge_pool_tx<'a>( .await?; let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); let DenominatedAmount { amount, .. } = validate_amount( - context.client, + context.client(), amount, &wrapped_erc20s::token(&asset), tx_args.force, @@ -76,14 +76,19 @@ pub async fn build_bridge_pool_tx<'a>( .map_err(|e| Error::Other(format!("Failed to validate amount. {}", e)))?; let DenominatedAmount { amount: fee_amount, .. - } = validate_amount(context.client, fee_amount, &fee_token, tx_args.force) - .await - .map_err(|e| { - Error::Other(format!( - "Failed to validate Bridge pool fee amount. {}", - e - )) - })?; + } = validate_amount( + context.client(), + fee_amount, + &fee_token, + tx_args.force, + ) + .await + .map_err(|e| { + Error::Other(format!( + "Failed to validate Bridge pool fee amount. {}", + e + )) + })?; let transfer = PendingTransfer { transfer: TransferToEthereum { asset, @@ -104,7 +109,7 @@ pub async fn build_bridge_pool_tx<'a>( }; let tx_code_hash = - query_wasm_code_hash(context.client, code_path.to_str().unwrap()) + query_wasm_code_hash(context.client(), code_path.to_str().unwrap()) .await .unwrap(); diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 59397be235..a8159834a5 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -14,9 +14,9 @@ pub mod queries; pub mod storage; pub mod vp_host_fns; -use std::ops::{Deref, DerefMut}; use std::path::PathBuf; use std::str::FromStr; +use std::sync::Arc; pub use namada_core::ledger::{ gas, parameters, replay_protection, storage_api, tx_env, vp_env, @@ -45,34 +45,11 @@ use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use crate::types::token::NATIVE_MAX_DECIMAL_PLACES; use namada_core::types::dec::Dec; use namada_core::types::ethereum_events::EthAddress; - -/// Encapsulates a Namada session to enable splitting borrows of its parts -pub struct NamadaStruct<'a, C, U, V> -where - C: crate::ledger::queries::Client + Sync, - U: WalletIo, - V: ShieldedUtils, -{ - /// Used to send and receive messages from the ledger - pub client: &'a C, - /// Stores the addresses and keys required for ledger interactions - pub wallet: &'a mut Wallet, - /// Stores the current state of the shielded pool - pub shielded: &'a mut ShieldedContext, -} +use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; #[async_trait::async_trait(?Send)] /// An interface for high-level interaction with the Namada SDK -pub trait Namada<'a>: - DerefMut< - Target = NamadaStruct< - 'a, - Self::Client, - Self::WalletUtils, - Self::ShieldedUtils, - >, -> -{ +pub trait Namada<'a> { /// A client with async request dispatcher method type Client: 'a + crate::ledger::queries::Client + Sync; /// Captures the interactive parts of the wallet's functioning @@ -81,16 +58,40 @@ pub trait Namada<'a>: /// operations. type ShieldedUtils: 'a + ShieldedUtils; + /// Obtain the client for communicating with the ledger + fn client(&self) -> &'a Self::Client; + + /// Obtain read lock on the wallet + async fn wallet( + &self, + ) -> RwLockReadGuard<&'a mut Wallet>; + + /// Obtain write lock on the wallet + async fn wallet_mut( + &self, + ) -> RwLockWriteGuard<&'a mut Wallet>; + + /// Obtain read lock on the shielded context + async fn shielded( + &self, + ) -> RwLockReadGuard<&'a mut ShieldedContext>; + + /// Obtain write lock on the shielded context + async fn shielded_mut( + &self, + ) -> RwLockWriteGuard<&'a mut ShieldedContext>; + /// Return the native token - fn native_token(&self) -> Address { - self.wallet + async fn native_token(&self) -> Address { + self.wallet() + .await .find_address(args::NAM) .expect("NAM not in wallet") .clone() } /// Make a tx builder using no arguments - fn tx_builder(&self) -> args::Tx { + async fn tx_builder(&self) -> args::Tx { args::Tx { dry_run: false, dry_run_wrapper: false, @@ -103,7 +104,7 @@ pub trait Namada<'a>: wallet_alias_force: false, fee_amount: None, wrapper_fee_payer: None, - fee_token: self.native_token(), + fee_token: self.native_token().await, fee_unshield: None, gas_limit: GasLimit::from(20_000), expiration: None, @@ -118,7 +119,7 @@ pub trait Namada<'a>: } /// Make a TxTransfer builder from the given minimum set of arguments - fn new_transfer( + async fn new_transfer( &self, source: TransferSource, target: TransferTarget, @@ -131,24 +132,24 @@ pub trait Namada<'a>: token, amount, tx_code_path: PathBuf::from(TX_TRANSFER_WASM), - tx: self.tx_builder(), - native_token: self.native_token(), + tx: self.tx_builder().await, + native_token: self.native_token().await, } } /// Make a RevealPK builder from the given minimum set of arguments - fn new_reveal_pk( + async fn new_reveal_pk( &self, public_key: common::PublicKey, ) -> args::RevealPk { args::RevealPk { public_key, - tx: self.tx_builder(), + tx: self.tx_builder().await, } } /// Make a Bond builder from the given minimum set of arguments - fn new_bond( + async fn new_bond( &self, validator: Address, amount: token::Amount, @@ -157,14 +158,14 @@ pub trait Namada<'a>: validator, amount, source: None, - tx: self.tx_builder(), - native_token: self.native_token(), + tx: self.tx_builder().await, + native_token: self.native_token().await, tx_code_path: PathBuf::from(TX_BOND_WASM), } } /// Make a Unbond builder from the given minimum set of arguments - fn new_unbond( + async fn new_unbond( &self, validator: Address, amount: token::Amount, @@ -173,13 +174,13 @@ pub trait Namada<'a>: validator, amount, source: None, - tx: self.tx_builder(), + tx: self.tx_builder().await, tx_code_path: PathBuf::from(TX_UNBOND_WASM), } } /// Make a TxIbcTransfer builder from the given minimum set of arguments - fn new_ibc_transfer( + async fn new_ibc_transfer( &self, source: Address, receiver: String, @@ -197,41 +198,41 @@ pub trait Namada<'a>: timeout_height: None, timeout_sec_offset: None, memo: None, - tx: self.tx_builder(), + tx: self.tx_builder().await, tx_code_path: PathBuf::from(TX_IBC_WASM), } } /// Make a InitProposal builder from the given minimum set of arguments - fn new_init_proposal( + async fn new_init_proposal( &self, proposal_data: Vec, ) -> args::InitProposal { args::InitProposal { proposal_data, - native_token: self.native_token(), + native_token: self.native_token().await, is_offline: false, is_pgf_stewards: false, is_pgf_funding: false, tx_code_path: PathBuf::from(TX_INIT_PROPOSAL), - tx: self.tx_builder(), + tx: self.tx_builder().await, } } /// Make a TxUpdateAccount builder from the given minimum set of arguments - fn new_update_account(&self, addr: Address) -> args::TxUpdateAccount { + async fn new_update_account(&self, addr: Address) -> args::TxUpdateAccount { args::TxUpdateAccount { addr, vp_code_path: None, public_keys: vec![], threshold: None, tx_code_path: PathBuf::from(TX_UPDATE_ACCOUNT_WASM), - tx: self.tx_builder(), + tx: self.tx_builder().await, } } /// Make a VoteProposal builder from the given minimum set of arguments - fn new_vote_prposal( + async fn new_vote_prposal( &self, vote: String, voter: Address, @@ -243,13 +244,13 @@ pub trait Namada<'a>: is_offline: false, proposal_data: None, tx_code_path: PathBuf::from(TX_VOTE_PROPOSAL), - tx: self.tx_builder(), + tx: self.tx_builder().await, } } /// Make a CommissionRateChange builder from the given minimum set of /// arguments - fn new_change_commission_rate( + async fn new_change_commission_rate( &self, rate: Dec, validator: Address, @@ -258,12 +259,12 @@ pub trait Namada<'a>: rate, validator, tx_code_path: PathBuf::from(TX_CHANGE_COMMISSION_WASM), - tx: self.tx_builder(), + tx: self.tx_builder().await, } } /// Make a TxInitValidator builder from the given minimum set of arguments - fn new_init_validator( + async fn new_init_validator( &self, commission_rate: Dec, max_commission_rate_change: Dec, @@ -281,34 +282,34 @@ pub trait Namada<'a>: validator_vp_code_path: PathBuf::from(VP_USER_WASM), unsafe_dont_encrypt: false, tx_code_path: PathBuf::from(TX_INIT_VALIDATOR_WASM), - tx: self.tx_builder(), + tx: self.tx_builder().await, } } /// Make a TxUnjailValidator builder from the given minimum set of arguments - fn new_unjail_validator( + async fn new_unjail_validator( &self, validator: Address, ) -> args::TxUnjailValidator { args::TxUnjailValidator { validator, tx_code_path: PathBuf::from(TX_UNJAIL_VALIDATOR_WASM), - tx: self.tx_builder(), + tx: self.tx_builder().await, } } /// Make a Withdraw builder from the given minimum set of arguments - fn new_withdraw(&self, validator: Address) -> args::Withdraw { + async fn new_withdraw(&self, validator: Address) -> args::Withdraw { args::Withdraw { validator, source: None, tx_code_path: PathBuf::from(TX_WITHDRAW_WASM), - tx: self.tx_builder(), + tx: self.tx_builder().await, } } /// Make a Withdraw builder from the given minimum set of arguments - fn new_add_erc20_transfer( + async fn new_add_erc20_transfer( &self, sender: Address, recipient: EthAddress, @@ -325,25 +326,28 @@ pub trait Namada<'a>: denom: NATIVE_MAX_DECIMAL_PLACES.into(), }), fee_payer: None, - fee_token: self.native_token(), + fee_token: self.native_token().await, nut: false, code_path: PathBuf::from(TX_BRIDGE_POOL_WASM), - tx: self.tx_builder(), + tx: self.tx_builder().await, } } /// Make a ResignSteward builder from the given minimum set of arguments - fn new_resign_steward(&self, steward: Address) -> args::ResignSteward { + async fn new_resign_steward( + &self, + steward: Address, + ) -> args::ResignSteward { args::ResignSteward { steward, - tx: self.tx_builder(), + tx: self.tx_builder().await, tx_code_path: PathBuf::from(TX_RESIGN_STEWARD), } } /// Make a UpdateStewardCommission builder from the given minimum set of /// arguments - fn new_update_steward_rewards( + async fn new_update_steward_rewards( &self, steward: Address, commission: Vec, @@ -351,16 +355,16 @@ pub trait Namada<'a>: args::UpdateStewardCommission { steward, commission, - tx: self.tx_builder(), + tx: self.tx_builder().await, tx_code_path: PathBuf::from(TX_UPDATE_STEWARD_COMMISSION), } } /// Make a TxCustom builder from the given minimum set of arguments - fn new_custom(&self, owner: Address) -> args::TxCustom { + async fn new_custom(&self, owner: Address) -> args::TxCustom { args::TxCustom { owner, - tx: self.tx_builder(), + tx: self.tx_builder().await, code_path: None, data_path: None, serialized_tx: None, @@ -368,22 +372,22 @@ pub trait Namada<'a>: } /// Sign the given transaction using the given signing data - fn sign( - &mut self, + async fn sign( + &self, tx: &mut Tx, args: &args::Tx, signing_data: SigningTxData, ) -> crate::sdk::error::Result<()> { - signing::sign_tx(self.wallet, args, tx, signing_data) + signing::sign_tx(*self.wallet_mut().await, args, tx, signing_data) } /// Process the given transaction using the given flags async fn submit( - &mut self, + &self, tx: Tx, args: &args::Tx, ) -> crate::sdk::error::Result { - tx::process_tx(self.client, self.wallet, args, tx).await + tx::process_tx(self.client(), *self.wallet_mut().await, args, tx).await } } @@ -394,7 +398,13 @@ where U: WalletIo, V: ShieldedUtils, { - namada: NamadaStruct<'a, C, U, V>, + /// Used to send and receive messages from the ledger + pub client: &'a C, + /// Stores the addresses and keys required for ledger interactions + pub wallet: Arc>>, + /// Stores the current state of the shielded pool + pub shielded: Arc>>, + /// The default builder for a Tx prototype: args::Tx, } @@ -415,11 +425,9 @@ where .expect("NAM not in wallet") .clone(); Self { - namada: NamadaStruct { - client, - wallet, - shielded, - }, + client, + wallet: Arc::new(RwLock::new(wallet)), + shielded: Arc::new(RwLock::new(shielded)), prototype: args::Tx { dry_run: false, dry_run_wrapper: false, @@ -448,30 +456,7 @@ where } } -impl<'a, C, U, V> Deref for NamadaImpl<'a, C, U, V> -where - C: crate::ledger::queries::Client + Sync, - U: WalletIo, - V: ShieldedUtils, -{ - type Target = NamadaStruct<'a, C, U, V>; - - fn deref(&self) -> &Self::Target { - &self.namada - } -} - -impl<'a, C, U, V> DerefMut for NamadaImpl<'a, C, U, V> -where - C: crate::ledger::queries::Client + Sync, - U: WalletIo, - V: ShieldedUtils, -{ - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.namada - } -} - +#[async_trait::async_trait(?Send)] impl<'a, C, U, V> Namada<'a> for NamadaImpl<'a, C, U, V> where C: crate::ledger::queries::Client + Sync, @@ -483,9 +468,37 @@ where type WalletUtils = U; /// Obtain the prototypical Tx builder - fn tx_builder(&self) -> args::Tx { + async fn tx_builder(&self) -> args::Tx { self.prototype.clone() } + + fn client(&self) -> &'a Self::Client { + self.client + } + + async fn wallet( + &self, + ) -> RwLockReadGuard<&'a mut Wallet> { + self.wallet.read().await + } + + async fn wallet_mut( + &self, + ) -> RwLockWriteGuard<&'a mut Wallet> { + self.wallet.write().await + } + + async fn shielded( + &self, + ) -> RwLockReadGuard<&'a mut ShieldedContext> { + self.shielded.read().await + } + + async fn shielded_mut( + &self, + ) -> RwLockWriteGuard<&'a mut ShieldedContext> { + self.shielded.write().await + } } /// Allow the prototypical Tx builder to be modified diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index cf43d5dbd7..91e00dee68 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -181,7 +181,7 @@ impl TxCustom { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -290,7 +290,7 @@ impl TxTransfer { /// Build a transaction from this builder pub async fn build<'a>( &mut self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -407,7 +407,7 @@ impl TxIbcTransfer { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -499,15 +499,15 @@ impl InitProposal { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, Option, )> { - let current_epoch = rpc::query_epoch(context.client).await?; + let current_epoch = rpc::query_epoch(context.client()).await?; let governance_parameters = - rpc::query_governance_parameters(context.client).await; + rpc::query_governance_parameters(context.client()).await; if self.is_pgf_funding { let proposal = @@ -528,9 +528,15 @@ impl InitProposal { .map_err(|e| { crate::sdk::error::TxError::FailedGovernaneProposalDeserialize(e.to_string()) })?; + let nam_address = context + .wallet() + .await + .find_address(NAM) + .expect("NAM not in wallet") + .clone(); let author_balance = rpc::get_token_balance( - context.client, - context.wallet.find_address(NAM).expect("NAM not in wallet"), + context.client(), + &nam_address, &proposal.proposal.author, ) .await?; @@ -551,9 +557,15 @@ impl InitProposal { .map_err(|e| { crate::sdk::error::TxError::FailedGovernaneProposalDeserialize(e.to_string()) })?; + let nam_address = context + .wallet() + .await + .find_address(NAM) + .expect("NAM not in wallet") + .clone(); let author_balance = rpc::get_token_balance( - context.client, - context.wallet.find_address(NAM).expect("NAM not in wallet"), + context.client(), + &nam_address, &proposal.proposal.author, ) .await?; @@ -648,13 +660,13 @@ impl VoteProposal { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, Option, )> { - let current_epoch = rpc::query_epoch(context.client).await?; + let current_epoch = rpc::query_epoch(context.client()).await?; tx::build_vote_proposal(context, self, current_epoch).await } } @@ -777,7 +789,7 @@ impl TxUpdateAccount { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -858,7 +870,7 @@ impl Bond { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -888,7 +900,7 @@ impl Unbond { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -972,7 +984,7 @@ impl RevealPk { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -1059,7 +1071,7 @@ impl Withdraw { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -1195,7 +1207,7 @@ impl CommissionRateChange { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -1254,7 +1266,7 @@ impl UpdateStewardCommission { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -1306,7 +1318,7 @@ impl ResignSteward { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -1358,7 +1370,7 @@ impl TxUnjailValidator { /// Build a transaction from this builder pub async fn build<'a>( &self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, @@ -1883,7 +1895,7 @@ impl EthereumBridgePool { /// Build a transaction from this builder pub async fn build<'a>( self, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, ) -> crate::sdk::error::Result<( crate::proto::Tx, SigningTxData, diff --git a/shared/src/sdk/masp.rs b/shared/src/sdk/masp.rs index 7959916944..35c4f59956 100644 --- a/shared/src/sdk/masp.rs +++ b/shared/src/sdk/masp.rs @@ -1489,7 +1489,7 @@ impl ShieldedContext { /// amounts and signatures specified by the containing Transfer object. #[cfg(feature = "masp-tx-gen")] pub async fn gen_shielded_transfer<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args: &args::TxTransfer, ) -> Result, TransferErr> { // No shielded components are needed when neither source nor destination @@ -1510,17 +1510,20 @@ impl ShieldedContext { // We want to fund our transaction solely from supplied spending key let spending_key = spending_key.map(|x| x.into()); let spending_keys: Vec<_> = spending_key.into_iter().collect(); - // Load the current shielded context given the spending key we possess - let _ = context.shielded.load().await; - let context = &mut **context; - context - .shielded - .fetch(context.client, &spending_keys, &[]) - .await?; - // Save the update state so that future fetches can be short-circuited - let _ = context.shielded.save().await; + { + // Load the current shielded context given the spending key we + // possess + let mut shielded = context.shielded_mut().await; + let _ = shielded.load().await; + shielded + .fetch(context.client(), &spending_keys, &[]) + .await?; + // Save the update state so that future fetches can be + // short-circuited + let _ = shielded.save().await; + } // Determine epoch in which to submit potential shielded transaction - let epoch = rpc::query_epoch(context.client).await?; + let epoch = rpc::query_epoch(context.client()).await?; // Context required for storing which notes are in the source's // possesion let memo = MemoBytes::empty(); @@ -1561,9 +1564,10 @@ impl ShieldedContext { if let Some(sk) = spending_key { // Locate unspent notes that can help us meet the transaction amount let (_, unspent_notes, used_convs) = context - .shielded + .shielded_mut() + .await .collect_unspent_notes( - context.client, + context.client(), &to_viewing_key(&sk).vk, I128Sum::from_sum(amount), epoch, @@ -1749,17 +1753,17 @@ impl ShieldedContext { Error::from(EncodingError::Conversion(e.to_string())) })?; - let build_transfer = - || -> Result> { - let (masp_tx, metadata) = builder.build( - &context.shielded.utils.local_tx_prover(), - &FeeRule::non_standard(U64Sum::zero()), - )?; - Ok(ShieldedTransfer { - builder: builder_clone, - masp_tx, - metadata, - epoch, + let build_transfer = |prover: LocalTxProver| -> Result< + ShieldedTransfer, + builder::Error, + > { + let (masp_tx, metadata) = builder + .build(&prover, &FeeRule::non_standard(U64Sum::zero()))?; + Ok(ShieldedTransfer { + builder: builder_clone, + masp_tx, + metadata, + epoch, }) }; @@ -1805,7 +1809,9 @@ impl ShieldedContext { Ok(Some(loaded)) } else { // Build and return the constructed transaction - let built = build_transfer()?; + let built = build_transfer( + context.shielded().await.utils.local_tx_prover(), + )?; if let LoadOrSaveProofs::Save = load_or_save { let built_bytes = BorshSerialize::try_to_vec(&built) .map_err(|e| { @@ -1824,7 +1830,9 @@ impl ShieldedContext { #[cfg(not(feature = "testing"))] { // Build and return the constructed transaction - let built = build_transfer()?; + let built = build_transfer( + context.shielded().await.utils.local_tx_prover(), + )?; Ok(Some(built)) } } diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 9d534217a4..048d72517f 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -151,7 +151,7 @@ pub fn find_key_by_pk( /// possible. If no explicit signer given, use the `default`. If no `default` /// is given, an `Error` is returned. pub async fn tx_signers<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args: &args::Tx, default: Option
, ) -> Result, Error> { @@ -172,8 +172,8 @@ pub async fn tx_signers<'a>( Some(signer) => Ok(vec![ find_pk( - context.client, - context.wallet, + context.client(), + *context.wallet_mut().await, &signer, args.password.clone(), ) @@ -240,7 +240,7 @@ pub fn sign_tx( /// Return the necessary data regarding an account to be able to generate a /// multisignature section pub async fn aux_signing_data<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args: &args::Tx, owner: Option
, default_signer: Option
, @@ -253,7 +253,8 @@ pub async fn aux_signing_data<'a>( let (account_public_keys_map, threshold) = match &owner { Some(owner @ Address::Established(_)) => { - let account = rpc::get_account_info(context.client, owner).await?; + let account = + rpc::get_account_info(context.client(), owner).await?; if let Some(account) = account { (Some(account.public_keys_map), account.threshold) } else { @@ -273,7 +274,11 @@ pub async fn aux_signing_data<'a>( }; let fee_payer = if args.disposable_signing_key { - context.wallet.generate_disposable_signing_key().to_public() + context + .wallet_mut() + .await + .generate_disposable_signing_key() + .to_public() } else { match &args.wrapper_fee_payer { Some(keypair) => keypair.to_public(), @@ -314,7 +319,7 @@ pub struct TxSourcePostBalance { /// progress on chain. #[allow(clippy::too_many_arguments)] pub async fn wrap_tx<'a, N: Namada<'a>>( - context: &mut N, + context: &N, tx: &mut Tx, args: &args::Tx, tx_source_balance: Option, @@ -327,7 +332,7 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( let minimum_fee = match rpc::query_storage_value::< _, BTreeMap, - >(context.client, &gas_cost_key) + >(context.client(), &gas_cost_key) .await .and_then(|map| { map.get(&args.fee_token) @@ -351,7 +356,7 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( let fee_amount = match args.fee_amount { Some(amount) => { let validated_fee_amount = validate_amount( - context.client, + context.client(), amount, &args.fee_token, args.force, @@ -392,7 +397,7 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( token::balance_key(&args.fee_token, &fee_payer_address); rpc::query_storage_value::<_, token::Amount>( - context.client, + context.client(), &balance_key, ) .await @@ -460,7 +465,7 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( let descriptions_limit_key= parameter_storage::get_fee_unshielding_descriptions_limit_key(); let descriptions_limit = rpc::query_storage_value::<_, u64>( - context.client, + context.client(), &descriptions_limit_key, ) .await @@ -508,14 +513,14 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( let token_addr = args.fee_token.clone(); if !args.force { let fee_amount = format_denominated_amount( - context.client, + context.client(), &token_addr, total_fee, ) .await; let balance = format_denominated_amount( - context.client, + context.client(), &token_addr, updated_balance, ) @@ -779,7 +784,7 @@ pub async fn make_ledger_masp_endpoints< /// Internal method used to generate transaction test vectors #[cfg(feature = "std")] pub async fn generate_test_vector<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, tx: &Tx, ) -> Result<(), Error> { use std::env; @@ -830,40 +835,45 @@ pub async fn generate_test_vector<'a>( /// Converts the given transaction to the form that is displayed on the Ledger /// device pub async fn to_ledger_vector<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, tx: &Tx, ) -> Result { let init_account_hash = - query_wasm_code_hash(context.client, TX_INIT_ACCOUNT_WASM).await?; + query_wasm_code_hash(context.client(), TX_INIT_ACCOUNT_WASM).await?; let init_validator_hash = - query_wasm_code_hash(context.client, TX_INIT_VALIDATOR_WASM).await?; + query_wasm_code_hash(context.client(), TX_INIT_VALIDATOR_WASM).await?; let init_proposal_hash = - query_wasm_code_hash(context.client, TX_INIT_PROPOSAL).await?; + query_wasm_code_hash(context.client(), TX_INIT_PROPOSAL).await?; let vote_proposal_hash = - query_wasm_code_hash(context.client, TX_VOTE_PROPOSAL).await?; + query_wasm_code_hash(context.client(), TX_VOTE_PROPOSAL).await?; let reveal_pk_hash = - query_wasm_code_hash(context.client, TX_REVEAL_PK).await?; + query_wasm_code_hash(context.client(), TX_REVEAL_PK).await?; let update_account_hash = - query_wasm_code_hash(context.client, TX_UPDATE_ACCOUNT_WASM).await?; + query_wasm_code_hash(context.client(), TX_UPDATE_ACCOUNT_WASM).await?; let transfer_hash = - query_wasm_code_hash(context.client, TX_TRANSFER_WASM).await?; - let ibc_hash = query_wasm_code_hash(context.client, TX_IBC_WASM).await?; - let bond_hash = query_wasm_code_hash(context.client, TX_BOND_WASM).await?; + query_wasm_code_hash(context.client(), TX_TRANSFER_WASM).await?; + let ibc_hash = query_wasm_code_hash(context.client(), TX_IBC_WASM).await?; + let bond_hash = + query_wasm_code_hash(context.client(), TX_BOND_WASM).await?; let unbond_hash = - query_wasm_code_hash(context.client, TX_UNBOND_WASM).await?; + query_wasm_code_hash(context.client(), TX_UNBOND_WASM).await?; let withdraw_hash = - query_wasm_code_hash(context.client, TX_WITHDRAW_WASM).await?; + query_wasm_code_hash(context.client(), TX_WITHDRAW_WASM).await?; let change_commission_hash = - query_wasm_code_hash(context.client, TX_CHANGE_COMMISSION_WASM).await?; - let user_hash = query_wasm_code_hash(context.client, VP_USER_WASM).await?; + query_wasm_code_hash(context.client(), TX_CHANGE_COMMISSION_WASM) + .await?; + let user_hash = + query_wasm_code_hash(context.client(), VP_USER_WASM).await?; // To facilitate lookups of human-readable token names + let wallet = context.wallet().await; let tokens: HashMap = context - .wallet + .wallet() + .await .get_addresses_with_vp_type(AddressVpType::Token) .into_iter() .map(|addr| { - let alias = match context.wallet.find_alias(&addr) { + let alias = match wallet.find_alias(&addr) { Some(alias) => alias.to_string(), None => addr.to_string(), }; @@ -1150,7 +1160,7 @@ pub async fn to_ledger_vector<'a>( tv.output.push("Type : Transfer".to_string()); make_ledger_masp_endpoints( - context.client, + context.client(), &tokens, &mut tv.output, &transfer, @@ -1159,7 +1169,7 @@ pub async fn to_ledger_vector<'a>( ) .await; make_ledger_masp_endpoints( - context.client, + context.client(), &tokens, &mut tv.output_expert, &transfer, @@ -1332,13 +1342,13 @@ pub async fn to_ledger_vector<'a>( if let Some(wrapper) = tx.header.wrapper() { let gas_token = wrapper.fee.token.clone(); let gas_limit = format_denominated_amount( - context.client, + context.client(), &gas_token, Amount::from(wrapper.gas_limit), ) .await; let fee_amount_per_gas_unit = format_denominated_amount( - context.client, + context.client(), &gas_token, wrapper.fee.amount_per_gas_unit, ) diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index f718084709..e6272c0d65 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -160,14 +160,14 @@ pub fn dump_tx(args: &args::Tx, tx: Tx) { /// to it. #[allow(clippy::too_many_arguments)] pub async fn prepare_tx<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args: &args::Tx, tx: &mut Tx, fee_payer: common::PublicKey, tx_source_balance: Option, ) -> Result> { if !args.dry_run { - let epoch = rpc::query_epoch(context.client).await?; + let epoch = rpc::query_epoch(context.client()).await?; signing::wrap_tx( context, @@ -268,7 +268,7 @@ pub async fn has_revealed_pk( /// Submit transaction to reveal the given public key pub async fn build_reveal_pk<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args: &args::Tx, public_key: &common::PublicKey, ) -> Result<(Tx, SigningTxData, Option)> { @@ -507,7 +507,7 @@ pub async fn save_initialized_accounts( /// Submit validator comission rate change pub async fn build_validator_commission_change<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::CommissionRateChange { tx: tx_args, validator, @@ -524,12 +524,12 @@ pub async fn build_validator_commission_change<'a>( ) .await?; - let epoch = rpc::query_epoch(context.client).await?; + let epoch = rpc::query_epoch(context.client()).await?; - let params: PosParams = rpc::get_pos_params(context.client).await?; + let params: PosParams = rpc::get_pos_params(context.client()).await?; let validator = validator.clone(); - if rpc::is_validator(context.client, &validator).await? { + if rpc::is_validator(context.client(), &validator).await? { if *rate < Dec::zero() || *rate > Dec::one() { edisplay_line!( StdIo, @@ -542,7 +542,7 @@ pub async fn build_validator_commission_change<'a>( let pipeline_epoch_minus_one = epoch + params.pipeline_len - 1; match rpc::query_commission_rate( - context.client, + context.client(), &validator, Some(pipeline_epoch_minus_one), ) @@ -604,7 +604,7 @@ pub async fn build_validator_commission_change<'a>( /// Craft transaction to update a steward commission pub async fn build_update_steward_commission<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::UpdateStewardCommission { tx: tx_args, steward, @@ -621,7 +621,7 @@ pub async fn build_update_steward_commission<'a>( ) .await?; - if !rpc::is_steward(context.client, steward).await && !tx_args.force { + if !rpc::is_steward(context.client(), steward).await && !tx_args.force { edisplay_line!(StdIo, "The given address {} is not a steward.", &steward); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); }; @@ -659,7 +659,7 @@ pub async fn build_update_steward_commission<'a>( /// Craft transaction to resign as a steward pub async fn build_resign_steward<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::ResignSteward { tx: tx_args, steward, @@ -675,7 +675,7 @@ pub async fn build_resign_steward<'a>( ) .await?; - if !rpc::is_steward(context.client, steward).await && !tx_args.force { + if !rpc::is_steward(context.client(), steward).await && !tx_args.force { edisplay_line!(StdIo, "The given address {} is not a steward.", &steward); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); }; @@ -695,7 +695,7 @@ pub async fn build_resign_steward<'a>( /// Submit transaction to unjail a jailed validator pub async fn build_unjail_validator<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::TxUnjailValidator { tx: tx_args, validator, @@ -711,7 +711,7 @@ pub async fn build_unjail_validator<'a>( ) .await?; - if !rpc::is_validator(context.client, validator).await? { + if !rpc::is_validator(context.client(), validator).await? { edisplay_line!( StdIo, "The given address {} is not a validator.", @@ -724,12 +724,12 @@ pub async fn build_unjail_validator<'a>( } } - let params: PosParams = rpc::get_pos_params(context.client).await?; - let current_epoch = rpc::query_epoch(context.client).await?; + let params: PosParams = rpc::get_pos_params(context.client()).await?; + let current_epoch = rpc::query_epoch(context.client()).await?; let pipeline_epoch = current_epoch + params.pipeline_len; let validator_state_at_pipeline = rpc::get_validator_state( - context.client, + context.client(), validator, Some(pipeline_epoch), ) @@ -756,7 +756,7 @@ pub async fn build_unjail_validator<'a>( let last_slash_epoch_key = crate::ledger::pos::validator_last_slash_key(validator); let last_slash_epoch = rpc::query_storage_value::<_, Epoch>( - context.client, + context.client(), &last_slash_epoch_key, ) .await; @@ -807,7 +807,7 @@ pub async fn build_unjail_validator<'a>( /// Submit transaction to withdraw an unbond pub async fn build_withdraw<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::Withdraw { tx: tx_args, validator, @@ -825,12 +825,12 @@ pub async fn build_withdraw<'a>( ) .await?; - let epoch = rpc::query_epoch(context.client).await?; + let epoch = rpc::query_epoch(context.client()).await?; let validator = known_validator_or_err( validator.clone(), tx_args.force, - context.client, + context.client(), ) .await?; @@ -839,7 +839,7 @@ pub async fn build_withdraw<'a>( // Check the source's current unbond amount let bond_source = source.clone().unwrap_or_else(|| validator.clone()); let tokens = rpc::query_withdrawable_tokens( - context.client, + context.client(), &bond_source, &validator, Some(epoch), @@ -853,8 +853,12 @@ pub async fn build_withdraw<'a>( epoch {}.", epoch ); - rpc::query_and_print_unbonds(context.client, &bond_source, &validator) - .await?; + rpc::query_and_print_unbonds( + context.client(), + &bond_source, + &validator, + ) + .await?; if !tx_args.force { return Err(Error::from(TxError::NoUnbondReady(epoch))); } @@ -884,7 +888,7 @@ pub async fn build_withdraw<'a>( /// Submit a transaction to unbond pub async fn build_unbond<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::Unbond { tx: tx_args, validator, @@ -916,12 +920,12 @@ pub async fn build_unbond<'a>( known_validator_or_err( validator.clone(), tx_args.force, - context.client, + context.client(), ) .await?; let bond_amount = - rpc::query_bond(context.client, &bond_source, validator, None).await?; + rpc::query_bond(context.client(), &bond_source, validator, None).await?; display_line!( StdIo, "Bond amount available for unbonding: {} NAM", @@ -950,7 +954,7 @@ pub async fn build_unbond<'a>( // Query the unbonds before submitting the tx let unbonds = rpc::query_unbond_with_slashing( - context.client, + context.client(), &bond_source, validator, ) @@ -1051,7 +1055,7 @@ pub async fn query_unbonds( /// Submit a transaction to bond pub async fn build_bond<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::Bond { tx: tx_args, validator, @@ -1074,14 +1078,14 @@ pub async fn build_bond<'a>( let validator = known_validator_or_err( validator.clone(), tx_args.force, - context.client, + context.client(), ) .await?; // Check that the source address exists on chain let source = match source.clone() { Some(source) => { - source_exists_or_err(source, tx_args.force, context.client) + source_exists_or_err(source, tx_args.force, context.client()) .await .map(Some) } @@ -1099,7 +1103,7 @@ pub async fn build_bond<'a>( *amount, balance_key, tx_args.force, - context.client, + context.client(), ) .await?; let tx_source_balance = Some(TxSourcePostBalance { @@ -1129,7 +1133,7 @@ pub async fn build_bond<'a>( /// Build a default proposal governance pub async fn build_default_proposal<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::InitProposal { tx, proposal_data: _, @@ -1182,7 +1186,7 @@ pub async fn build_default_proposal<'a>( /// Build a proposal vote pub async fn build_vote_proposal<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::VoteProposal { tx, proposal_id, @@ -1210,7 +1214,7 @@ pub async fn build_vote_proposal<'a>( Error::Other("Proposal id must be defined.".to_string()) })?; let proposal = if let Some(proposal) = - rpc::query_proposal_by_id(context.client, proposal_id).await? + rpc::query_proposal_by_id(context.client(), proposal_id).await? { proposal } else { @@ -1225,7 +1229,7 @@ pub async fn build_vote_proposal<'a>( )) })?; - let is_validator = rpc::is_validator(context.client, voter).await?; + let is_validator = rpc::is_validator(context.client(), voter).await?; if !proposal.can_be_voted(epoch, is_validator) { if tx.force { @@ -1238,7 +1242,7 @@ pub async fn build_vote_proposal<'a>( } let delegations = rpc::get_delegators_delegation_at( - context.client, + context.client(), voter, proposal.voting_start_epoch, ) @@ -1269,7 +1273,7 @@ pub async fn build_vote_proposal<'a>( /// Build a pgf funding proposal governance pub async fn build_pgf_funding_proposal<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::InitProposal { tx, proposal_data: _, @@ -1314,7 +1318,7 @@ pub async fn build_pgf_funding_proposal<'a>( /// Build a pgf funding proposal governance pub async fn build_pgf_stewards_proposal<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::InitProposal { tx, proposal_data: _, @@ -1360,7 +1364,7 @@ pub async fn build_pgf_stewards_proposal<'a>( /// Submit an IBC transfer pub async fn build_ibc_transfer<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args: &args::TxIbcTransfer, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(args.source.clone()); @@ -1375,14 +1379,14 @@ pub async fn build_ibc_transfer<'a>( let source = source_exists_or_err( args.source.clone(), args.tx.force, - context.client, + context.client(), ) .await?; // We cannot check the receiver // validate the amount given let validated_amount = validate_amount( - context.client, + context.client(), args.amount, &args.token, args.tx.force, @@ -1405,7 +1409,7 @@ pub async fn build_ibc_transfer<'a>( validated_amount.amount, balance_key, args.tx.force, - context.client, + context.client(), ) .await?; let tx_source_balance = Some(TxSourcePostBalance { @@ -1415,7 +1419,7 @@ pub async fn build_ibc_transfer<'a>( }); let tx_code_hash = query_wasm_code_hash( - context.client, + context.client(), args.tx_code_path.to_str().unwrap(), ) .await @@ -1425,7 +1429,7 @@ pub async fn build_ibc_transfer<'a>( Address::Internal(InternalAddress::IbcToken(hash)) => { let ibc_denom_key = ibc_denom_key(hash); rpc::query_storage_value::<_, String>( - context.client, + context.client(), &ibc_denom_key, ) .await @@ -1505,7 +1509,7 @@ pub async fn build_ibc_transfer<'a>( /// Abstraction for helping build transactions #[allow(clippy::too_many_arguments)] pub async fn build<'a, F, D>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, tx_args: &crate::sdk::args::Tx, path: PathBuf, data: D, @@ -1531,7 +1535,7 @@ where #[allow(clippy::too_many_arguments)] async fn build_pow_flag<'a, F, D>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, tx_args: &crate::sdk::args::Tx, path: PathBuf, mut data: D, @@ -1548,7 +1552,7 @@ where let mut tx_builder = Tx::new(chain_id, tx_args.expiration); let tx_code_hash = - query_wasm_code_hash(context.client, path.to_string_lossy()) + query_wasm_code_hash(context.client(), path.to_string_lossy()) .await .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; @@ -1571,13 +1575,13 @@ where /// Returns true only if a new decoding has been added to the given set. async fn add_asset_type<'a>( asset_types: &mut HashSet<(Address, MaspDenom, Epoch)>, - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, asset_type: AssetType, ) -> bool { - let context = &mut **context; if let Some(asset_type) = context - .shielded - .decode_asset_type(context.client, asset_type) + .shielded_mut() + .await + .decode_asset_type(context.client(), asset_type) .await { asset_types.insert(asset_type) @@ -1590,7 +1594,7 @@ async fn add_asset_type<'a>( /// function provides the data necessary for offline wallets to present asset /// type information. async fn used_asset_types<'a, P, R, K, N>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, builder: &Builder, ) -> std::result::Result, RpcError> { let mut asset_types = HashSet::new(); @@ -1624,7 +1628,7 @@ async fn used_asset_types<'a, P, R, K, N>( /// Submit an ordinary transfer pub async fn build_transfer<'a, N: Namada<'a>>( - context: &mut N, + context: &N, args: &mut args::TxTransfer, ) -> Result<(Tx, SigningTxData, Option)> { let default_signer = Some(args.source.effective_address()); @@ -1641,15 +1645,17 @@ pub async fn build_transfer<'a, N: Namada<'a>>( let token = args.token.clone(); // Check that the source address exists on chain - source_exists_or_err(source.clone(), args.tx.force, context.client).await?; + source_exists_or_err(source.clone(), args.tx.force, context.client()) + .await?; // Check that the target address exists on chain - target_exists_or_err(target.clone(), args.tx.force, context.client).await?; + target_exists_or_err(target.clone(), args.tx.force, context.client()) + .await?; // Check source balance let balance_key = token::balance_key(&token, &source); // validate the amount given let validated_amount = - validate_amount(context.client, args.amount, &token, args.tx.force) + validate_amount(context.client(), args.amount, &token, args.tx.force) .await?; args.amount = InputAmount::Validated(validated_amount); @@ -1659,7 +1665,7 @@ pub async fn build_transfer<'a, N: Namada<'a>>( validated_amount.amount, balance_key, args.tx.force, - context.client, + context.client(), ) .await?; let tx_source_balance = Some(TxSourcePostBalance { @@ -1792,7 +1798,7 @@ pub async fn build_transfer<'a, N: Namada<'a>>( /// Submit a transaction to initialize an account pub async fn build_init_account<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::TxInitAccount { tx: tx_args, vp_code_path, @@ -1805,7 +1811,7 @@ pub async fn build_init_account<'a>( signing::aux_signing_data(context, tx_args, None, None).await?; let vp_code_hash = - query_wasm_code_hash_buf(context.client, vp_code_path).await?; + query_wasm_code_hash_buf(context.client(), vp_code_path).await?; let threshold = match threshold { Some(threshold) => *threshold, @@ -1845,7 +1851,7 @@ pub async fn build_init_account<'a>( /// Submit a transaction to update a VP pub async fn build_update_account<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::TxUpdateAccount { tx: tx_args, vp_code_path, @@ -1865,7 +1871,7 @@ pub async fn build_update_account<'a>( .await?; let addr = if let Some(account) = - rpc::get_account_info(context.client, addr).await? + rpc::get_account_info(context.client(), addr).await? { account.address } else if tx_args.force { @@ -1877,7 +1883,7 @@ pub async fn build_update_account<'a>( let vp_code_hash = match vp_code_path { Some(code_path) => { let vp_hash = - query_wasm_code_hash_buf(context.client, code_path).await?; + query_wasm_code_hash_buf(context.client(), code_path).await?; Some(vp_hash) } None => None, @@ -1916,7 +1922,7 @@ pub async fn build_update_account<'a>( /// Submit a custom transaction pub async fn build_custom<'a>( - context: &mut impl Namada<'a>, + context: &impl Namada<'a>, args::TxCustom { tx: tx_args, code_path, @@ -1940,7 +1946,7 @@ pub async fn build_custom<'a>( })? } else { let tx_code_hash = query_wasm_code_hash_buf( - context.client, + context.client(), code_path .as_ref() .ok_or(Error::Other("No code path supplied".to_string()))?, From 64e817b13ab2476004b43f62bc997a03dbe57159 Mon Sep 17 00:00:00 2001 From: brentstone Date: Tue, 3 Oct 2023 16:10:14 -0600 Subject: [PATCH 057/161] move protocol keys into epoched PoS data fix tests --- apps/src/lib/config/genesis.rs | 19 ++++------ apps/src/lib/node/ledger/shell/init_chain.rs | 4 -- .../shell/vote_extensions/bridge_pool_vext.rs | 16 +------- ethereum_bridge/src/test_utils.rs | 4 ++ proof_of_stake/src/epoched.rs | 1 + proof_of_stake/src/lib.rs | 27 +++++++++++++- proof_of_stake/src/pos_queries.rs | 37 +++++++++---------- proof_of_stake/src/tests.rs | 17 +++++++++ proof_of_stake/src/tests/state_machine.rs | 9 +++++ proof_of_stake/src/types.rs | 9 +++++ shared/src/ledger/native_vp/ibc/mod.rs | 4 ++ tx_prelude/src/proof_of_stake.rs | 3 +- wasm/wasm_source/src/tx_bond.rs | 2 + .../src/tx_change_validator_commission.rs | 3 ++ wasm/wasm_source/src/tx_unbond.rs | 3 ++ wasm/wasm_source/src/tx_withdraw.rs | 3 ++ wasm/wasm_source/src/vp_implicit.rs | 4 ++ wasm/wasm_source/src/vp_user.rs | 4 ++ wasm/wasm_source/src/vp_validator.rs | 4 ++ 19 files changed, 120 insertions(+), 53 deletions(-) diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index 502082af48..921e42a844 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -338,6 +338,12 @@ pub mod genesis_config { .unwrap() .to_public_key() .unwrap(), + protocol_key: config + .protocol_public_key + .as_ref() + .unwrap() + .to_public_key() + .unwrap(), eth_cold_key: config .eth_cold_key .as_ref() @@ -372,12 +378,6 @@ pub mod genesis_config { .unwrap() .to_public_key() .unwrap(), - protocol_key: config - .protocol_public_key - .as_ref() - .unwrap() - .to_public_key() - .unwrap(), dkg_public_key: config .dkg_public_key .as_ref() @@ -774,9 +774,6 @@ pub struct Validator { /// this key on a transaction signature. /// Note that this is distinct from consensus key used in the PoS system. pub account_key: common::PublicKey, - /// Public key associated with validator account used for signing protocol - /// transactions - pub protocol_key: common::PublicKey, /// The public DKG session key used during the DKG protocol pub dkg_public_key: DkgPublicKey, /// These tokens are not staked and hence do not contribute to the @@ -938,6 +935,7 @@ pub fn genesis(num_validators: u64) -> Genesis { address, tokens: token::Amount::native_whole(200_000), consensus_key: consensus_keypair.ref_to(), + protocol_key: protocol_keypair.ref_to(), commission_rate: Dec::new(5, 2).expect("This can't fail"), max_commission_rate_change: Dec::new(1, 2) .expect("This can't fail"), @@ -945,7 +943,6 @@ pub fn genesis(num_validators: u64) -> Genesis { eth_hot_key: eth_bridge_keypair.ref_to(), }, account_key: account_keypair.ref_to(), - protocol_key: protocol_keypair.ref_to(), dkg_public_key: dkg_keypair.public(), non_staked_balance: token::Amount::native_whole(100_000), // TODO replace with https://github.com/anoma/namada/issues/25) @@ -971,6 +968,7 @@ pub fn genesis(num_validators: u64) -> Genesis { address, tokens: token::Amount::native_whole(200_000), consensus_key: consensus_keypair.ref_to(), + protocol_key: protocol_keypair.ref_to(), commission_rate: Dec::new(5, 2).expect("This can't fail"), max_commission_rate_change: Dec::new(1, 2) .expect("This can't fail"), @@ -978,7 +976,6 @@ pub fn genesis(num_validators: u64) -> Genesis { eth_hot_key: eth_bridge_keypair.ref_to(), }, account_key: account_keypair.ref_to(), - protocol_key: protocol_keypair.ref_to(), dkg_public_key: dkg_keypair.public(), non_staked_balance: token::Amount::native_whole(100_000), // TODO replace with https://github.com/anoma/namada/issues/25) diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index 4d7522aaba..a8d47df174 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -394,10 +394,6 @@ where ) .unwrap(); - self.wl_storage - .write(&protocol_pk_key(addr), &validator.protocol_key) - .expect("Unable to set genesis user protocol public key"); - self.wl_storage .write( &dkg_session_keys::dkg_pk_key(addr), diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs index 880409bef8..05f0263a10 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs @@ -276,7 +276,6 @@ mod test_bp_vote_extensions { #[cfg(not(feature = "abcipp"))] use namada::ledger::eth_bridge::EthBridgeQueries; use namada::ledger::pos::PosQueries; - use namada::ledger::storage_api::StorageWrite; use namada::proof_of_stake::types::{ Position as ValidatorPosition, WeightedValidator, }; @@ -321,24 +320,12 @@ mod test_bp_vote_extensions { ) .expect("Test failed"); - // register Bertha's protocol key - let pk_key = protocol_pk_key(&bertha_address()); - shell - .wl_storage - .write_bytes( - &pk_key, - bertha_keypair() - .ref_to() - .try_to_vec() - .expect("Test failed."), - ) - .expect("Test failed."); - // change pipeline length to 1 let mut params = shell.wl_storage.pos_queries().get_pos_params(); params.owned.pipeline_len = 1; let consensus_key = gen_keypair(); + let protocol_key = bertha_keypair(); let hot_key = gen_secp256k1_keypair(); let cold_key = gen_secp256k1_keypair(); @@ -347,6 +334,7 @@ mod test_bp_vote_extensions { params: ¶ms, address: &bertha_address(), consensus_key: &consensus_key.ref_to(), + protocol_key: &protocol_key.ref_to(), eth_hot_key: &hot_key.ref_to(), eth_cold_key: &cold_key.ref_to(), current_epoch: 0.into(), diff --git a/ethereum_bridge/src/test_utils.rs b/ethereum_bridge/src/test_utils.rs index 8091566f10..2764d5c4d7 100644 --- a/ethereum_bridge/src/test_utils.rs +++ b/ethereum_bridge/src/test_utils.rs @@ -198,6 +198,7 @@ pub fn init_storage_with_validators( .map(|(address, tokens)| { let keys = TestValidatorKeys::generate(); let consensus_key = keys.consensus.ref_to(); + let protocol_key = keys.protocol.ref_to(); let eth_cold_key = keys.eth_gov.ref_to(); let eth_hot_key = keys.eth_bridge.ref_to(); all_keys.insert(address.clone(), keys); @@ -205,6 +206,7 @@ pub fn init_storage_with_validators( address, tokens, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate: Dec::new(5, 2).unwrap(), @@ -270,6 +272,7 @@ pub fn append_validators_to_storage( let keys = TestValidatorKeys::generate(); let consensus_key = &keys.consensus.ref_to(); + let protocol_key = &&keys.protocol.ref_to(); let eth_cold_key = &keys.eth_gov.ref_to(); let eth_hot_key = &keys.eth_bridge.ref_to(); @@ -278,6 +281,7 @@ pub fn append_validators_to_storage( params: ¶ms, address: &validator, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, current_epoch, diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 0f66a36d84..e8e8498645 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -1372,6 +1372,7 @@ mod test { address: established_address_1(), tokens: token::Amount::native_whole(1_000), consensus_key: key::testing::keypair_1().to_public(), + protocol_key: key::testing::keypair_2().to_public(), eth_hot_key: key::testing::keypair_3().to_public(), eth_cold_key: key::testing::keypair_3().to_public(), commission_rate: Dec::new(1, 1).expect("Dec creation failed"), diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 223abdc1fe..bcb03319f9 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -41,7 +41,7 @@ use namada_core::ledger::storage_api::{ use namada_core::types::address::{Address, InternalAddress}; use namada_core::types::dec::Dec; use namada_core::types::key::{ - common, tm_consensus_key_raw_hash, PublicKeyTmRawHash, + common, protocol_pk_key, tm_consensus_key_raw_hash, PublicKeyTmRawHash, }; pub use namada_core::types::storage::{Epoch, Key, KeySeg}; use namada_core::types::token; @@ -58,7 +58,7 @@ use storage::{ BondsAndUnbondsDetail, BondsAndUnbondsDetails, EpochedSlashes, ReverseOrdTokenAmount, RewardsAccumulator, SlashedAmount, TotalConsensusStakes, UnbondDetails, ValidatorAddresses, - ValidatorUnbondRecords, + ValidatorProtocolKeys, ValidatorUnbondRecords, }; use thiserror::Error; use types::{ @@ -261,6 +261,14 @@ pub fn validator_consensus_key_handle( ValidatorConsensusKeys::open(key) } +/// Get the storage handle to a PoS validator's protocol key key. +pub fn validator_protocol_key_handle( + validator: &Address, +) -> ValidatorProtocolKeys { + let key = protocol_pk_key(validator); + ValidatorProtocolKeys::open(key) +} + /// Get the storage handle to a PoS validator's eth hot key. pub fn validator_eth_hot_key_handle( validator: &Address, @@ -414,6 +422,7 @@ where address, tokens, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, @@ -453,6 +462,11 @@ where consensus_key, current_epoch, )?; + validator_protocol_key_handle(&address).init_at_genesis( + storage, + protocol_key, + current_epoch, + )?; validator_eth_hot_key_handle(&address).init_at_genesis( storage, eth_hot_key, @@ -2159,6 +2173,8 @@ pub struct BecomeValidator<'a, S> { pub address: &'a Address, /// The validator's consensus key, used by Tendermint. pub consensus_key: &'a common::PublicKey, + /// The validator's protocol key. + pub protocol_key: &'a common::PublicKey, /// The validator's Ethereum bridge cold key. pub eth_cold_key: &'a common::PublicKey, /// The validator's Ethereum bridge hot key. @@ -2183,6 +2199,7 @@ where params, address, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, current_epoch, @@ -2213,6 +2230,12 @@ where current_epoch, params.pipeline_len, )?; + validator_protocol_key_handle(address).set( + storage, + protocol_key.clone(), + current_epoch, + params.pipeline_len, + )?; validator_eth_hot_key_handle(address).set( storage, eth_hot_key.clone(), diff --git a/proof_of_stake/src/pos_queries.rs b/proof_of_stake/src/pos_queries.rs index 190548570b..b694897aa8 100644 --- a/proof_of_stake/src/pos_queries.rs +++ b/proof_of_stake/src/pos_queries.rs @@ -1,7 +1,6 @@ //! Storage API for querying data about Proof-of-stake related //! data. This includes validator and epoch related data. -use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::ledger::parameters::storage::get_max_proposal_bytes_key; use namada_core::ledger::parameters::EpochDuration; use namada_core::ledger::storage::WlStorage; @@ -172,17 +171,17 @@ where pk: &key::common::PublicKey, epoch: Option, ) -> Result { - let pk_bytes = pk - .try_to_vec() - .expect("Serializing public key should not fail"); + let params = crate::read_pos_params(self.wl_storage) + .expect("Failed to fetch Pos params"); let epoch = epoch .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); self.get_consensus_validators(Some(epoch)) .iter() .find(|validator| { - let pk_key = key::protocol_pk_key(&validator.address); - match self.wl_storage.storage.read(&pk_key) { - Ok((Some(bytes), _)) => bytes == pk_bytes, + let protocol_keys = + crate::validator_protocol_key_handle(&validator.address); + match protocol_keys.get(self.wl_storage, epoch, ¶ms) { + Ok(Some(key)) => key == *pk, _ => false, } }) @@ -195,26 +194,24 @@ where address: &Address, epoch: Option, ) -> Result<(token::Amount, key::common::PublicKey)> { + let params = crate::read_pos_params(self.wl_storage) + .expect("Failed to fetch Pos params"); let epoch = epoch .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); self.get_consensus_validators(Some(epoch)) .iter() .find(|validator| address == &validator.address) .map(|validator| { - let protocol_pk_key = key::protocol_pk_key(&validator.address); - // TODO: rewrite this, to use `StorageRead::read` - let bytes = self - .wl_storage - .storage - .read(&protocol_pk_key) - .expect("Validator should have public protocol key") - .0 - .expect("Validator should have public protocol key"); - let protocol_pk: key::common::PublicKey = - BorshDeserialize::deserialize(&mut bytes.as_ref()).expect( - "Protocol public key in storage should be \ - deserializable", + let protocol_keys = + crate::validator_protocol_key_handle(&validator.address); + let protocol_pk = protocol_keys + .get(self.wl_storage, epoch, ¶ms) + .unwrap() + .expect( + "Protocol public key should be set in storage after \ + genesis.", ); + (validator.bonded_stake, protocol_pk) }) .ok_or_else(|| Error::NotValidatorAddress(address.clone(), epoch)) diff --git a/proof_of_stake/src/tests.rs b/proof_of_stake/src/tests.rs index 045fc383da..497b53151c 100644 --- a/proof_of_stake/src/tests.rs +++ b/proof_of_stake/src/tests.rs @@ -853,6 +853,8 @@ fn test_become_validator_aux( // Initialize the validator account let consensus_key = new_validator_consensus_key.to_public(); + let protocol_sk = common_sk_from_simple_seed(0); + let protocol_key = protocol_sk.to_public(); let eth_hot_key = key::common::PublicKey::Secp256k1( key::testing::gen_keypair::().ref_to(), ); @@ -864,6 +866,7 @@ fn test_become_validator_aux( params: ¶ms, address: &new_validator, consensus_key: &consensus_key, + protocol_key: &protocol_key, eth_cold_key: ð_cold_key, eth_hot_key: ð_hot_key, current_epoch, @@ -1176,6 +1179,9 @@ fn test_validator_sets() { let start_epoch = Epoch::default(); let epoch = start_epoch; + let protocol_sk_1 = common_sk_from_simple_seed(0); + let protocol_sk_2 = common_sk_from_simple_seed(1); + let params = test_init_genesis( &mut s, params, @@ -1184,6 +1190,7 @@ fn test_validator_sets() { address: val1.clone(), tokens: stake1, consensus_key: pk1.clone(), + protocol_key: protocol_sk_1.to_public(), eth_hot_key: key::common::PublicKey::Secp256k1( key::testing::gen_keypair::() .ref_to(), @@ -1200,6 +1207,7 @@ fn test_validator_sets() { address: val2.clone(), tokens: stake2, consensus_key: pk2.clone(), + protocol_key: protocol_sk_2.to_public(), eth_hot_key: key::common::PublicKey::Secp256k1( key::testing::gen_keypair::() .ref_to(), @@ -1847,6 +1855,9 @@ fn test_validator_sets_swap() { println!("val2: {val2}, {pk2}, {}", stake2.to_string_native()); println!("val3: {val3}, {pk3}, {}", stake3.to_string_native()); + let protocol_sk_1 = common_sk_from_simple_seed(0); + let protocol_sk_2 = common_sk_from_simple_seed(1); + let params = test_init_genesis( &mut s, params, @@ -1855,6 +1866,7 @@ fn test_validator_sets_swap() { address: val1, tokens: stake1, consensus_key: pk1, + protocol_key: protocol_sk_1.to_public(), eth_hot_key: key::common::PublicKey::Secp256k1( key::testing::gen_keypair::() .ref_to(), @@ -1871,6 +1883,7 @@ fn test_validator_sets_swap() { address: val2.clone(), tokens: stake2, consensus_key: pk2, + protocol_key: protocol_sk_2.to_public(), eth_hot_key: key::common::PublicKey::Secp256k1( key::testing::gen_keypair::() .ref_to(), @@ -2098,6 +2111,9 @@ fn arb_genesis_validators( let consensus_sk = common_sk_from_simple_seed(seed); let consensus_key = consensus_sk.to_public(); + let protocol_sk = common_sk_from_simple_seed(seed); + let protocol_key = protocol_sk.to_public(); + let eth_hot_key = key::common::PublicKey::Secp256k1( key::testing::gen_keypair::( ) @@ -2116,6 +2132,7 @@ fn arb_genesis_validators( address, tokens, consensus_key, + protocol_key, eth_hot_key, eth_cold_key, commission_rate, diff --git a/proof_of_stake/src/tests/state_machine.rs b/proof_of_stake/src/tests/state_machine.rs index 05b9b26345..1313c467ff 100644 --- a/proof_of_stake/src/tests/state_machine.rs +++ b/proof_of_stake/src/tests/state_machine.rs @@ -106,6 +106,7 @@ enum Transition { InitValidator { address: Address, consensus_key: PublicKey, + protocol_key: PublicKey, eth_cold_key: PublicKey, eth_hot_key: PublicKey, commission_rate: Dec, @@ -189,6 +190,7 @@ impl StateMachineTest for ConcretePosState { Transition::InitValidator { address, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, @@ -202,6 +204,7 @@ impl StateMachineTest for ConcretePosState { params: ¶ms, address: &address, consensus_key: &consensus_key, + protocol_key: &protocol_key, eth_cold_key: ð_cold_key, eth_hot_key: ð_hot_key, current_epoch, @@ -1226,6 +1229,7 @@ impl ReferenceStateMachine for AbstractPosState { address, tokens, consensus_key: _, + protocol_key: _, eth_cold_key: _, eth_hot_key: _, commission_rate: _, @@ -1349,6 +1353,7 @@ impl ReferenceStateMachine for AbstractPosState { 1 => ( address::testing::arb_established_address(), key::testing::arb_common_keypair(), + key::testing::arb_common_keypair(), key::testing::arb_common_secp256k1_keypair(), key::testing::arb_common_secp256k1_keypair(), arb_rate(), @@ -1358,6 +1363,7 @@ impl ReferenceStateMachine for AbstractPosState { |( addr, consensus_key, + protocol_key, eth_hot_key, eth_cold_key, commission_rate, @@ -1366,6 +1372,7 @@ impl ReferenceStateMachine for AbstractPosState { Transition::InitValidator { address: Address::Established(addr), consensus_key: consensus_key.to_public(), + protocol_key: protocol_key.to_public(), eth_hot_key: eth_hot_key.to_public(), eth_cold_key: eth_cold_key.to_public(), commission_rate, @@ -1443,6 +1450,7 @@ impl ReferenceStateMachine for AbstractPosState { Transition::InitValidator { address, consensus_key: _, + protocol_key: _, eth_cold_key: _, eth_hot_key: _, commission_rate: _, @@ -1839,6 +1847,7 @@ impl ReferenceStateMachine for AbstractPosState { Transition::InitValidator { address, consensus_key: _, + protocol_key: _, eth_cold_key: _, eth_hot_key: _, commission_rate: _, diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 3cc24ffbbb..6a38785917 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -40,6 +40,13 @@ pub type ValidatorConsensusKeys = crate::epoched::Epoched< crate::epoched::OffsetDefaultNumPastEpochs, >; +/// Epoched validator's protocol key. +pub type ValidatorProtocolKeys = crate::epoched::Epoched< + common::PublicKey, + crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetMaxProposalPeriodPlus, +>; + /// Epoched validator's eth hot key. pub type ValidatorEthHotKeys = crate::epoched::Epoched< common::PublicKey, @@ -205,6 +212,8 @@ pub struct GenesisValidator { pub tokens: token::Amount, /// A public key used for signing validator's consensus actions pub consensus_key: common::PublicKey, + /// A public key used for signing protocol transactions + pub protocol_key: common::PublicKey, /// An Eth bridge governance public key pub eth_cold_key: common::PublicKey, /// An Eth bridge hot signing public key used for validator set updates and diff --git a/shared/src/ledger/native_vp/ibc/mod.rs b/shared/src/ledger/native_vp/ibc/mod.rs index 818aa4df79..0f6c9ef6f7 100644 --- a/shared/src/ledger/native_vp/ibc/mod.rs +++ b/shared/src/ledger/native_vp/ibc/mod.rs @@ -257,6 +257,9 @@ pub fn get_dummy_genesis_validator() let consensus_sk = common_sk_from_simple_seed(0); let consensus_key = consensus_sk.to_public(); + let protocol_sk = common_sk_from_simple_seed(1); + let protocol_key = protocol_sk.to_public(); + let commission_rate = Dec::new(1, 1).expect("expected 0.1 to be a valid decimal"); let max_commission_rate_change = @@ -278,6 +281,7 @@ pub fn get_dummy_genesis_validator() address, tokens, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, diff --git a/tx_prelude/src/proof_of_stake.rs b/tx_prelude/src/proof_of_stake.rs index cc8bcb7b63..341c819347 100644 --- a/tx_prelude/src/proof_of_stake.rs +++ b/tx_prelude/src/proof_of_stake.rs @@ -96,8 +96,6 @@ impl Ctx { &account_keys, threshold, )?; - let protocol_pk_key = key::protocol_pk_key(&validator_address); - self.write(&protocol_pk_key, &protocol_key)?; let dkg_pk_key = key::dkg_session_keys::dkg_pk_key(&validator_address); self.write(&dkg_pk_key, &dkg_key)?; let eth_cold_key = key::common::PublicKey::Secp256k1(eth_cold_key); @@ -109,6 +107,7 @@ impl Ctx { params: ¶ms, address: &validator_address, consensus_key: &consensus_key, + protocol_key: &protocol_key, eth_cold_key: ð_cold_key, eth_hot_key: ð_hot_key, current_epoch, diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 509a50f822..e51164b3a3 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -76,6 +76,7 @@ mod tests { let is_delegation = matches!(&bond.source, Some(source) if *source != bond.validator); let consensus_key = key::testing::keypair_1().ref_to(); + let protocol_key = key::testing::keypair_2().ref_to(); let commission_rate = Dec::new(5, 2).expect("Cannot fail"); let max_commission_rate_change = Dec::new(1, 2).expect("Cannot fail"); let eth_cold_key = key::testing::keypair_3().ref_to(); @@ -85,6 +86,7 @@ mod tests { address: bond.validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index b8db50ecb0..82bdd5717f 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -66,6 +66,8 @@ mod tests { pos_params: OwnedPosParams, ) -> TxResult { let consensus_key = key::testing::keypair_1().ref_to(); + let protocol_key = key::testing::keypair_2().ref_to(); + let eth_hot_key = key::common::PublicKey::Secp256k1( key::testing::gen_keypair::().ref_to(), ); @@ -76,6 +78,7 @@ mod tests { address: commission_change.validator.clone(), tokens: token::Amount::from_uint(1_000_000, 0).unwrap(), consensus_key, + protocol_key, commission_rate: initial_rate, max_commission_rate_change: max_change, eth_hot_key, diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 5e1eaeb2a3..cd085e3c2e 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -75,6 +75,8 @@ mod tests { &unbond.source, Some(source) if *source != unbond.validator); let consensus_key = key::testing::keypair_1().ref_to(); + let protocol_key = key::testing::keypair_2().ref_to(); + let eth_cold_key = key::testing::keypair_3().ref_to(); let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = Dec::new(5, 2).expect("Cannot fail"); @@ -90,6 +92,7 @@ mod tests { initial_stake }, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index e288282c42..54280fa7e2 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -78,6 +78,8 @@ mod tests { let is_delegation = matches!( &withdraw.source, Some(source) if *source != withdraw.validator); let consensus_key = key::testing::keypair_1().ref_to(); + let protocol_key = key::testing::keypair_2().ref_to(); + let eth_cold_key = key::testing::keypair_3().ref_to(); let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = Dec::new(5, 2).expect("Cannot fail"); @@ -94,6 +96,7 @@ mod tests { initial_stake }, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/wasm_source/src/vp_implicit.rs index 215dccf421..c51650b752 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/wasm_source/src/vp_implicit.rs @@ -391,6 +391,7 @@ mod tests { let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let eth_cold_key = key::testing::keypair_3().ref_to(); let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); @@ -400,6 +401,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key, @@ -470,6 +472,7 @@ mod tests { let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); let max_commission_rate_change = Dec::new(1, 2).unwrap(); @@ -477,6 +480,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key: key::common::PublicKey::Secp256k1( diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index a334576b53..5283b5e8db 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -418,6 +418,7 @@ mod tests { let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let eth_cold_key = key::testing::keypair_3().ref_to(); let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); @@ -427,6 +428,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key, @@ -495,6 +497,7 @@ mod tests { let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); let max_commission_rate_change = Dec::new(1, 2).unwrap(); @@ -502,6 +505,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key: key::common::PublicKey::Secp256k1( diff --git a/wasm/wasm_source/src/vp_validator.rs b/wasm/wasm_source/src/vp_validator.rs index 77a8c76d66..1c53daa4f1 100644 --- a/wasm/wasm_source/src/vp_validator.rs +++ b/wasm/wasm_source/src/vp_validator.rs @@ -425,6 +425,7 @@ mod tests { let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let eth_cold_key = key::testing::keypair_3().ref_to(); let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); @@ -434,6 +435,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key, @@ -508,6 +510,7 @@ mod tests { let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); let max_commission_rate_change = Dec::new(1, 2).unwrap(); @@ -515,6 +518,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key: key::common::PublicKey::Secp256k1( From b38e6f45ab98b58f5b3281425e31d85b91bdc168 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 29 Sep 2023 12:58:43 +0100 Subject: [PATCH 058/161] deps: switch to use libseck256k1 to k256 --- Cargo.lock | 77 ++----- Cargo.toml | 2 +- apps/src/lib/node/ledger/shell/mod.rs | 6 +- core/Cargo.toml | 8 +- core/src/types/key/mod.rs | 16 +- core/src/types/key/secp256k1.rs | 307 +++++++++++--------------- ethereum_bridge/Cargo.toml | 4 +- shared/Cargo.toml | 6 +- wasm/Cargo.lock | 75 ++----- wasm_for_tests/wasm_source/Cargo.lock | 75 ++----- 10 files changed, 199 insertions(+), 377 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84cbd6f48e..f54a480180 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1832,6 +1832,7 @@ dependencies = [ "digest 0.10.6", "elliptic-curve", "rfc6979", + "serdect", "signature 2.1.0", "spki", ] @@ -1898,6 +1899,7 @@ dependencies = [ "pkcs8", "rand_core 0.6.4", "sec1", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -3032,17 +3034,6 @@ dependencies = [ "hmac 0.7.1", ] -[[package]] -name = "hmac-drbg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" -dependencies = [ - "digest 0.9.0", - "generic-array 0.14.7", - "hmac 0.8.1", -] - [[package]] name = "hmac-sha512" version = "0.1.9" @@ -3482,6 +3473,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", + "serdect", "sha2 0.10.6", "signature 2.1.0", ] @@ -3587,57 +3579,13 @@ dependencies = [ "arrayref", "crunchy", "digest 0.8.1", - "hmac-drbg 0.2.0", + "hmac-drbg", "rand 0.7.3", "sha2 0.8.2", "subtle 2.4.1", "typenum", ] -[[package]] -name = "libsecp256k1" -version = "0.7.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "arrayref", - "base64 0.13.1", - "digest 0.9.0", - "hmac-drbg 0.3.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde 1.0.163", - "sha2 0.9.9", - "typenum", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle 2.4.1", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - [[package]] name = "libssh2-sys" version = "0.2.23" @@ -4028,7 +3976,7 @@ dependencies = [ "eyre", "futures", "itertools", - "libsecp256k1 0.7.0", + "k256", "loupe", "masp_primitives", "masp_proofs", @@ -4211,7 +4159,7 @@ dependencies = [ "impl-num-traits", "index-set", "itertools", - "libsecp256k1 0.7.0", + "k256", "masp_primitives", "namada_macros", "num-integer", @@ -6033,6 +5981,7 @@ dependencies = [ "der", "generic-array 0.14.7", "pkcs8", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -6211,6 +6160,16 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde 1.0.163", +] + [[package]] name = "sha1" version = "0.10.5" @@ -6804,7 +6763,7 @@ checksum = "01b874a4992538d4b2f4fbbac11b9419d685f4b39bdc3fed95b04e07bfd76040" dependencies = [ "base58 0.1.0", "hmac 0.7.1", - "libsecp256k1 0.3.5", + "libsecp256k1", "memzero", "sha2 0.8.2", ] diff --git a/Cargo.toml b/Cargo.toml index 9c731f0fdf..bd6727338b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,10 +85,10 @@ git2 = "0.13.25" ics23 = "0.9.0" index-set = {git = "https://github.com/heliaxdev/index-set", tag = "v0.7.1", features = ["serialize-borsh", "serialize-serde"]} itertools = "0.10.0" +k256 = { version = "0.13.0", default-features = false, features = ["ecdsa", "pkcs8", "precomputed-tables", "serde", "std"]} lazy_static = "1.4.0" libc = "0.2.97" libloading = "0.7.2" -libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} # branch = "murisi/namada-integration" masp_primitives = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e" } masp_proofs = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e", default-features = false, features = ["local-prover"] } diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index a1c17fe450..688a4d9699 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -1612,11 +1612,11 @@ mod test_utils { ref sig, ref recovery_id, )) => { - let mut sig_bytes = sig.serialize(); - let recovery_id_bytes = recovery_id.serialize(); + let mut sig_bytes = sig.to_vec(); + let recovery_id_bytes = recovery_id.to_byte(); sig_bytes[0] = sig_bytes[0].wrapping_add(1); let bytes: [u8; 65] = - [sig_bytes.as_slice(), [recovery_id_bytes].as_slice()] + [sig_bytes.as_slice(), &[recovery_id_bytes]] .concat() .try_into() .unwrap(); diff --git a/core/Cargo.toml b/core/Cargo.toml index ebbb35f383..0dbd4b1822 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -25,11 +25,6 @@ ferveo-tpke = [ wasm-runtime = [ "rayon", ] -# secp256k1 key signing, disabled in WASM build by default as it bloats the -# build a lot -secp256k1-sign = [ - "libsecp256k1/hmac", -] abciplus = [ "ibc", @@ -78,7 +73,7 @@ ics23.workspace = true impl-num-traits = "0.1.2" index-set.workspace = true itertools.workspace = true -libsecp256k1.workspace = true +k256.workspace = true masp_primitives.workspace = true num256.workspace = true num-integer = "0.1.45" @@ -104,7 +99,6 @@ zeroize.workspace = true [dev-dependencies] assert_matches.workspace = true -libsecp256k1 = {workspace = true, features = ["hmac"]} pretty_assertions.workspace = true proptest.workspace = true rand.workspace = true diff --git a/core/src/types/key/mod.rs b/core/src/types/key/mod.rs index 1287956b13..e6e52f108b 100644 --- a/core/src/types/key/mod.rs +++ b/core/src/types/key/mod.rs @@ -692,17 +692,15 @@ mod more_tests { fn zeroize_keypair_secp256k1() { use rand::thread_rng; - let mut sk = secp256k1::SigScheme::generate(&mut thread_rng()); - let sk_scalar = sk.0.to_scalar_ref(); - let len = sk_scalar.0.len(); - let ptr = sk_scalar.0.as_ref().as_ptr(); - - let original_data = sk_scalar.0; - + let sk = secp256k1::SigScheme::generate(&mut thread_rng()); + let (ptr, original_data) = { + let sk_scalar = sk.0.as_scalar_primitive().as_ref(); + (sk_scalar.as_ptr(), sk_scalar.to_owned()) + }; drop(sk); - assert_ne!(&original_data, unsafe { - core::slice::from_raw_parts(ptr, len) + assert_ne!(original_data.as_slice(), unsafe { + core::slice::from_raw_parts(ptr, secp256k1::SECRET_KEY_SIZE) }); } } diff --git a/core/src/types/key/secp256k1.rs b/core/src/types/key/secp256k1.rs index 6fde8af5cd..e65ae7fcbd 100644 --- a/core/src/types/key/secp256k1.rs +++ b/core/src/types/key/secp256k1.rs @@ -9,9 +9,9 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::HEXLOWER; -use ethabi::ethereum_types::U256; use ethabi::Token; -use libsecp256k1::RecoveryId; +use k256::ecdsa::RecoveryId; +use k256::elliptic_curve::sec1::ToEncodedPoint; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::de::{Error, SeqAccess, Visitor}; @@ -22,7 +22,6 @@ use super::{ ParsePublicKeyError, ParseSecretKeyError, ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, SignableBytes, VerifySigError, }; -use crate::hints; use crate::types::eth_abi::Encode; use crate::types::ethereum_events::EthAddress; use crate::types::key::StorageHasher; @@ -30,11 +29,16 @@ use crate::types::key::StorageHasher; /// The provided constant is for a traditional /// signature on this curve. For Ethereum, an extra byte is included /// that prevents malleability attacks. -pub const SIGNATURE_LENGTH: usize = libsecp256k1::util::SIGNATURE_SIZE + 1; +pub const SIGNATURE_SIZE: usize = 64 + 1; /// secp256k1 public key #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct PublicKey(pub libsecp256k1::PublicKey); +pub struct PublicKey(pub k256::PublicKey); + +/// Size of a compressed public key bytes +const COMPRESSED_PUBLIC_KEY_SIZE: usize = 33; +/// Size of a secret key bytes +pub(crate) const SECRET_KEY_SIZE: usize = 32; impl super::PublicKey for PublicKey { const TYPE: SchemeType = SigScheme::TYPE; @@ -59,26 +63,23 @@ impl super::PublicKey for PublicKey { impl BorshDeserialize for PublicKey { fn deserialize(buf: &mut &[u8]) -> std::io::Result { // deserialize the bytes first - let pk = libsecp256k1::PublicKey::parse_compressed( - buf.get(0..libsecp256k1::util::COMPRESSED_PUBLIC_KEY_SIZE) - .ok_or_else(|| std::io::Error::from(ErrorKind::UnexpectedEof))? - .try_into() - .unwrap(), - ) - .map_err(|e| { + let bytes = buf + .get(0..COMPRESSED_PUBLIC_KEY_SIZE) + .ok_or_else(|| std::io::Error::from(ErrorKind::UnexpectedEof))?; + let pk = k256::PublicKey::from_sec1_bytes(bytes).map_err(|e| { std::io::Error::new( ErrorKind::InvalidInput, format!("Error decoding secp256k1 public key: {}", e), ) })?; - *buf = &buf[libsecp256k1::util::COMPRESSED_PUBLIC_KEY_SIZE..]; + *buf = &buf[COMPRESSED_PUBLIC_KEY_SIZE..]; Ok(PublicKey(pk)) } } impl BorshSerialize for PublicKey { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { - writer.write_all(&self.0.serialize_compressed())?; + writer.write_all(&self.0.to_sec1_bytes())?; Ok(()) } } @@ -92,7 +93,7 @@ impl BorshSchema for PublicKey { ) { // Encoded as `[u8; COMPRESSED_PUBLIC_KEY_SIZE]` let elements = "u8".into(); - let length = libsecp256k1::util::COMPRESSED_PUBLIC_KEY_SIZE as u32; + let length = COMPRESSED_PUBLIC_KEY_SIZE as u32; let definition = borsh::schema::Definition::Array { elements, length }; definitions.insert(Self::declaration(), definition); } @@ -105,29 +106,25 @@ impl BorshSchema for PublicKey { #[allow(clippy::derived_hash_with_manual_eq)] impl Hash for PublicKey { fn hash(&self, state: &mut H) { - self.0.serialize_compressed().hash(state); + self.0.to_sec1_bytes().hash(state); } } impl PartialOrd for PublicKey { fn partial_cmp(&self, other: &Self) -> Option { - self.0 - .serialize_compressed() - .partial_cmp(&other.0.serialize_compressed()) + self.0.to_sec1_bytes().partial_cmp(&other.0.to_sec1_bytes()) } } impl Ord for PublicKey { fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0 - .serialize_compressed() - .cmp(&other.0.serialize_compressed()) + self.0.to_sec1_bytes().cmp(&other.0.to_sec1_bytes()) } } impl Display for PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", HEXLOWER.encode(&self.0.serialize_compressed())) + write!(f, "{}", HEXLOWER.encode(&self.0.to_sec1_bytes())) } } @@ -143,8 +140,8 @@ impl FromStr for PublicKey { } } -impl From for PublicKey { - fn from(pk: libsecp256k1::PublicKey) -> Self { +impl From for PublicKey { + fn from(pk: k256::PublicKey) -> Self { Self(pk) } } @@ -154,9 +151,7 @@ impl From<&PublicKey> for EthAddress { use tiny_keccak::Hasher; let mut hasher = tiny_keccak::Keccak::v256(); - // We're removing the first byte with - // `libsecp256k1::util::TAG_PUBKEY_FULL` - let pk_bytes = &pk.0.serialize()[1..]; + let pk_bytes = &pk.0.to_encoded_point(false).to_bytes()[1..]; hasher.update(pk_bytes); let mut output = [0_u8; 32]; hasher.finalize(&mut output); @@ -168,7 +163,7 @@ impl From<&PublicKey> for EthAddress { /// Secp256k1 secret key #[derive(Debug, Clone)] -pub struct SecretKey(pub Box); +pub struct SecretKey(pub Box); impl super::SecretKey for SecretKey { type PublicKey = PublicKey; @@ -197,7 +192,7 @@ impl Serialize for SecretKey { where S: Serializer, { - let arr = self.0.serialize(); + let arr: [u8; SECRET_KEY_SIZE] = self.0.to_bytes().into(); serde::Serialize::serialize(&arr, serializer) } } @@ -207,10 +202,10 @@ impl<'de> Deserialize<'de> for SecretKey { where D: serde::Deserializer<'de>, { - let arr_res: [u8; libsecp256k1::util::SECRET_KEY_SIZE] = + let arr_res: [u8; SECRET_KEY_SIZE] = serde::Deserialize::deserialize(deserializer)?; - let key = libsecp256k1::SecretKey::parse_slice(&arr_res) - .map_err(D::Error::custom); + let key = + k256::SecretKey::from_slice(&arr_res).map_err(D::Error::custom); Ok(SecretKey(Box::new(key.unwrap()))) } } @@ -218,23 +213,21 @@ impl<'de> Deserialize<'de> for SecretKey { impl BorshDeserialize for SecretKey { fn deserialize(buf: &mut &[u8]) -> std::io::Result { // deserialize the bytes first - Ok(SecretKey(Box::new( - libsecp256k1::SecretKey::parse( - &(BorshDeserialize::deserialize(buf)?), + let bytes: [u8; SECRET_KEY_SIZE] = BorshDeserialize::deserialize(buf)?; + let sk = k256::SecretKey::from_slice(&bytes).map_err(|e| { + std::io::Error::new( + ErrorKind::InvalidInput, + format!("Error decoding secp256k1 secret key: {}", e), ) - .map_err(|e| { - std::io::Error::new( - ErrorKind::InvalidInput, - format!("Error decoding secp256k1 secret key: {}", e), - ) - })?, - ))) + })?; + Ok(SecretKey(Box::new(sk))) } } impl BorshSerialize for SecretKey { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { - BorshSerialize::serialize(&self.0.serialize(), writer) + let bytes: [u8; SECRET_KEY_SIZE] = self.0.to_bytes().into(); + BorshSerialize::serialize(&bytes, writer) } } @@ -247,7 +240,7 @@ impl BorshSchema for SecretKey { ) { // Encoded as `[u8; SECRET_KEY_SIZE]` let elements = "u8".into(); - let length = libsecp256k1::util::SECRET_KEY_SIZE as u32; + let length = SECRET_KEY_SIZE as u32; let definition = borsh::schema::Definition::Array { elements, length }; definitions.insert(Self::declaration(), definition); } @@ -259,7 +252,7 @@ impl BorshSchema for SecretKey { impl Display for SecretKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", HEXLOWER.encode(&self.0.serialize())) + write!(f, "{}", HEXLOWER.encode(&self.0.to_bytes())) } } @@ -277,13 +270,13 @@ impl FromStr for SecretKey { impl RefTo for SecretKey { fn ref_to(&self) -> PublicKey { - PublicKey(libsecp256k1::PublicKey::from_secret_key(&self.0)) + PublicKey(self.0.public_key()) } } /// Secp256k1 signature #[derive(Clone, Debug, Eq, PartialEq)] -pub struct Signature(pub libsecp256k1::Signature, pub RecoveryId); +pub struct Signature(pub k256::ecdsa::Signature, pub RecoveryId); impl super::Signature for Signature { const TYPE: SchemeType = SigScheme::TYPE; @@ -305,15 +298,14 @@ impl super::Signature for Signature { } } -// Would ideally like Serialize, Deserialize to be implemented in libsecp256k1, +// Would ideally like Serialize, Deserialize to be implemented in k256, // may try to do so and merge upstream in the future. - impl Serialize for Signature { fn serialize(&self, serializer: S) -> Result where S: Serializer, { - let arr = self.0.serialize(); + let arr = self.0.to_bytes(); // TODO: implement the line below, currently cannot support [u8; 64] // serde::Serialize::serialize(&arr, serializer) @@ -321,7 +313,7 @@ impl Serialize for Signature { for elem in &arr[..] { seq.serialize_element(elem)?; } - seq.serialize_element(&self.1.serialize())?; + seq.serialize_element(&self.1.to_byte())?; seq.end() } } @@ -334,22 +326,25 @@ impl<'de> Deserialize<'de> for Signature { struct ByteArrayVisitor; impl<'de> Visitor<'de> for ByteArrayVisitor { - type Value = [u8; SIGNATURE_LENGTH]; + type Value = [u8; SIGNATURE_SIZE]; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str(&format!( "an array of length {}", - SIGNATURE_LENGTH, + SIGNATURE_SIZE, )) } - fn visit_seq(self, mut seq: A) -> Result<[u8; 65], A::Error> + fn visit_seq( + self, + mut seq: A, + ) -> Result<[u8; SIGNATURE_SIZE], A::Error> where A: SeqAccess<'de>, { - let mut arr = [0u8; SIGNATURE_LENGTH]; + let mut arr = [0u8; SIGNATURE_SIZE]; #[allow(clippy::needless_range_loop)] - for i in 0..SIGNATURE_LENGTH { + for i in 0..SIGNATURE_SIZE { arr[i] = seq .next_element()? .ok_or_else(|| Error::invalid_length(i, &self))?; @@ -358,14 +353,15 @@ impl<'de> Deserialize<'de> for Signature { } } - let arr_res = deserializer - .deserialize_tuple(SIGNATURE_LENGTH, ByteArrayVisitor)?; + let arr_res = + deserializer.deserialize_tuple(SIGNATURE_SIZE, ByteArrayVisitor)?; let sig_array: [u8; 64] = arr_res[..64].try_into().unwrap(); - let sig = libsecp256k1::Signature::parse_standard(&sig_array) + let sig = k256::ecdsa::Signature::from_slice(&sig_array) .map_err(D::Error::custom); Ok(Signature( sig.unwrap(), - RecoveryId::parse(arr_res[64]).map_err(Error::custom)?, + RecoveryId::from_byte(arr_res[64]) + .ok_or_else(|| Error::custom("Invalid recovery byte"))?, )) } } @@ -373,33 +369,30 @@ impl<'de> Deserialize<'de> for Signature { impl BorshDeserialize for Signature { fn deserialize(buf: &mut &[u8]) -> std::io::Result { // deserialize the bytes first - let (sig_bytes, recovery_id) = BorshDeserialize::deserialize(buf)?; + let (sig_bytes, recovery_id): ([u8; 64], u8) = + BorshDeserialize::deserialize(buf)?; Ok(Signature( - libsecp256k1::Signature::parse_standard(&sig_bytes).map_err( - |e| { - std::io::Error::new( - ErrorKind::InvalidInput, - format!("Error decoding secp256k1 signature: {}", e), - ) - }, - )?, - RecoveryId::parse(recovery_id).map_err(|e| { + k256::ecdsa::Signature::from_slice(&sig_bytes).map_err(|e| { std::io::Error::new( ErrorKind::InvalidInput, format!("Error decoding secp256k1 signature: {}", e), ) })?, + RecoveryId::from_byte(recovery_id).ok_or_else(|| { + std::io::Error::new( + ErrorKind::InvalidInput, + "Error decoding secp256k1 signature recovery byte", + ) + })?, )) } } impl BorshSerialize for Signature { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { - BorshSerialize::serialize( - &(self.0.serialize(), self.1.serialize()), - writer, - ) + let sig_bytes: [u8; 64] = self.0.to_bytes().into(); + BorshSerialize::serialize(&(sig_bytes, self.1.to_byte()), writer) } } @@ -411,11 +404,8 @@ impl BorshSchema for Signature { >, ) { // Encoded as `([u8; SIGNATURE_SIZE], u8)` - let signature = - <[u8; libsecp256k1::util::SIGNATURE_SIZE]>::declaration(); - <[u8; libsecp256k1::util::SIGNATURE_SIZE]>::add_definitions_recursively( - definitions, - ); + let signature = <[u8; SIGNATURE_SIZE]>::declaration(); + <[u8; SIGNATURE_SIZE]>::add_definitions_recursively(definitions); let recovery = "u8".into(); let definition = borsh::schema::Definition::Tuple { elements: vec![signature, recovery], @@ -429,52 +419,42 @@ impl BorshSchema for Signature { } impl Signature { - const S_MALLEABILITY_FIX: U256 = U256([ - 13822214165235122497, - 13451932020343611451, - 18446744073709551614, - 18446744073709551615, - ]); - // these constants are pulled from OpenZeppelin's ECDSA code - const S_MALLEABILITY_THRESHOLD: U256 = U256([ - 16134479119472337056, - 6725966010171805725, - 18446744073709551615, - 9223372036854775807, - ]); + /// OpenZeppelin consumes v values in the range [27, 28], + /// rather than [0, 1], the latter returned by `k256`. const V_FIX: u8 = 27; + /// Given a v signature parameter, flip its value + /// (i.e. negate the input). + /// + /// __INVARIANT__: The value of `v` must be in the range [0, 1]. + #[inline(always)] + fn flip_v(v: u8) -> u8 { + debug_assert!(v == 0 || v == 1); + v ^ 1 + } + /// Returns the `r`, `s` and `v` parameters of this [`Signature`], /// destroying the original value in the process. /// /// The returned signature is unique (i.e. non-malleable). This /// ensures OpenZeppelin considers the signature valid. pub fn into_eth_rsv(self) -> ([u8; 32], [u8; 32], u8) { - // assuming the value of v is either 0 or 1, - // the output is essentially the negated input - #[inline(always)] - fn flip_v(v: u8) -> u8 { - v ^ 1 - } - - let (v, s) = { - let s1: U256 = self.0.s.b32().into(); - let v = self.1.serialize(); - let (v, non_malleable_s) = - if hints::unlikely(s1 > Self::S_MALLEABILITY_THRESHOLD) { - // this code path seems quite rare. we often - // get non-malleable signatures, which is good - (flip_v(v) + Self::V_FIX, Self::S_MALLEABILITY_FIX - s1) - } else { - (v + Self::V_FIX, s1) - }; - let mut non_malleable_s: [u8; 32] = non_malleable_s.into(); - self.0.s.fill_b32(&mut non_malleable_s); - (v, self.0.s.b32()) + // A recovery id (dubbed v) is used by secp256k1 signatures + // to signal verifying code if a signature had been malleable + // or not (based on whether the s field of the signature was odd + // or not). In the `k256` dependency, the low-bit signifies the + // y-coordinate, associated with s, being odd. + let v = self.1.to_byte() & 1; + // Check if s needs to be normalized. In case it does, + // we must flip the value of v (e.g. 0 -> 1). + let (s, v) = if let Some(signature) = self.0.normalize_s() { + let normalized_s = signature.s(); + (normalized_s, Self::flip_v(v)) + } else { + (self.0.s(), v) }; - let r = self.0.r.b32(); - - (r, s, v) + let r = self.0.r(); + (r.to_bytes().into(), s.to_bytes().into(), v + Self::V_FIX) } } @@ -491,16 +471,14 @@ impl Encode<1> for Signature { #[allow(clippy::derived_hash_with_manual_eq)] impl Hash for Signature { fn hash(&self, state: &mut H) { - self.0.serialize().hash(state); + self.0.to_bytes().hash(state); } } impl PartialOrd for Signature { fn partial_cmp(&self, other: &Self) -> Option { - match self.0.serialize().partial_cmp(&other.0.serialize()) { - Some(Ordering::Equal) => { - self.1.serialize().partial_cmp(&other.1.serialize()) - } + match self.0.to_bytes().partial_cmp(&other.0.to_bytes()) { + Some(Ordering::Equal) => self.1.partial_cmp(&other.1), res => res, } } @@ -516,21 +494,20 @@ impl TryFrom<&[u8; 65]> for Signature { type Error = ParseSignatureError; fn try_from(sig: &[u8; 65]) -> Result { - let sig_bytes = sig[..64].try_into().unwrap(); - let recovery_id = RecoveryId::parse(sig[64]).map_err(|err| { + let recovery_id = RecoveryId::from_byte(sig[64]).ok_or_else(|| { ParseSignatureError::InvalidEncoding(std::io::Error::new( ErrorKind::Other, - err, + "Invalid recovery byte", )) })?; - libsecp256k1::Signature::parse_standard(&sig_bytes) - .map(|sig| Self(sig, recovery_id)) - .map_err(|err| { + let sig = + k256::ecdsa::Signature::from_slice(&sig[..64]).map_err(|err| { ParseSignatureError::InvalidEncoding(std::io::Error::new( ErrorKind::Other, err, )) - }) + })?; + Ok(Self(sig, recovery_id)) } } @@ -563,12 +540,12 @@ impl super::SigScheme for SigScheme { where R: CryptoRng + RngCore, { - SecretKey(Box::new(libsecp256k1::SecretKey::random(csprng))) + SecretKey(Box::new(k256::SecretKey::random(csprng))) } fn from_bytes(sk: [u8; 32]) -> SecretKey { SecretKey(Box::new( - libsecp256k1::SecretKey::parse_slice(&sk) + k256::SecretKey::from_slice(&sk) .expect("Secret key parsing should not fail."), )) } @@ -580,20 +557,12 @@ impl super::SigScheme for SigScheme { where H: 'static + StorageHasher, { - #[cfg(not(any(test, feature = "secp256k1-sign")))] - { - // to avoid `unused-variables` warn - let _ = (keypair, data); - panic!("\"secp256k1-sign\" feature must be enabled"); - } - - #[cfg(any(test, feature = "secp256k1-sign"))] - { - let message = - libsecp256k1::Message::parse(&data.signable_hash::()); - let (sig, recovery_id) = libsecp256k1::sign(&message, &keypair.0); - Signature(sig, recovery_id) - } + let sig_key = k256::ecdsa::SigningKey::from(keypair.0.as_ref()); + let msg = data.signable_hash::(); + let (sig, recovery_id) = sig_key + .sign_prehash_recoverable(&msg) + .expect("Must be able to sign"); + Signature(sig, recovery_id) } fn verify_signature_with_hasher( @@ -604,21 +573,23 @@ impl super::SigScheme for SigScheme { where H: 'static + StorageHasher, { - let message = libsecp256k1::Message::parse(&data.signable_hash::()); - let is_valid = libsecp256k1::verify(&message, &sig.0, &pk.0); - if is_valid { - Ok(()) - } else { - Err(VerifySigError::SigVerifyError(format!( + use k256::ecdsa::signature::hazmat::PrehashVerifier; + + let vrf_key = k256::ecdsa::VerifyingKey::from(&pk.0); + let msg = data.signable_hash::(); + vrf_key.verify_prehash(&msg, &sig.0).map_err(|e| { + VerifySigError::SigVerifyError(format!( "Error verifying secp256k1 signature: {}", - libsecp256k1::Error::InvalidSignature - ))) - } + e + )) + }) } } #[cfg(test)] mod test { + use k256::elliptic_curve::sec1::ToEncodedPoint; + use super::*; /// test vector from https://bitcoin.stackexchange.com/a/89848 @@ -635,9 +606,9 @@ mod test { let sk_bytes = HEXLOWER.decode(SECRET_KEY_HEX.as_bytes()).unwrap(); let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); let pk: PublicKey = sk.ref_to(); - // We're removing the first byte with - // `libsecp256k1::util::TAG_PUBKEY_FULL` - let pk_hex = HEXLOWER.encode(&pk.0.serialize()[1..]); + // We're removing the first byte with tag + let pk_hex = + HEXLOWER.encode(&pk.0.to_encoded_point(false).to_bytes()[1..]); assert_eq!(expected_pk_hex, pk_hex); let eth_addr: EthAddress = (&pk).into(); @@ -653,7 +624,7 @@ mod test { let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); let to_sign = "test".as_bytes(); let mut signature = SigScheme::sign(&sk, to_sign); - signature.1 = RecoveryId::parse(3).expect("Test failed"); + signature.1 = RecoveryId::from_byte(3).expect("Test failed"); let sig_json = serde_json::to_string(&signature).expect("Test failed"); let sig: Signature = serde_json::from_str(&sig_json).expect("Test failed"); @@ -668,28 +639,10 @@ mod test { let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); let to_sign = "test".as_bytes(); let mut signature = SigScheme::sign(&sk, to_sign); - signature.1 = RecoveryId::parse(3).expect("Test failed"); + signature.1 = RecoveryId::from_byte(3).expect("Test failed"); let sig_bytes = signature.try_to_vec().expect("Test failed"); let sig = Signature::try_from_slice(sig_bytes.as_slice()) .expect("Test failed"); assert_eq!(sig, signature); } - - /// Ensures we are using the right malleability consts. - #[test] - fn test_signature_malleability_consts() { - let s_threshold = U256::from_str_radix( - "7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0", - 16, - ) - .unwrap(); - assert_eq!(Signature::S_MALLEABILITY_THRESHOLD, s_threshold); - - let malleable_const = U256::from_str_radix( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", - 16, - ) - .unwrap(); - assert_eq!(Signature::S_MALLEABILITY_FIX, malleable_const); - } } diff --git a/ethereum_bridge/Cargo.toml b/ethereum_bridge/Cargo.toml index 1354c36a8c..85b0959106 100644 --- a/ethereum_bridge/Cargo.toml +++ b/ethereum_bridge/Cargo.toml @@ -29,7 +29,7 @@ testing = [ ] [dependencies] -namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign", "ferveo-tpke", "ethers-derive"]} +namada_core = {path = "../core", default-features = false, features = ["ferveo-tpke", "ethers-derive"]} namada_macros = {path = "../macros"} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} borsh.workspace = true @@ -46,7 +46,7 @@ tracing = "0.1.30" [dev-dependencies] # Added "testing" feature. -namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign", "ferveo-tpke", "ethers-derive", "testing"]} +namada_core = {path = "../core", default-features = false, features = ["ferveo-tpke", "ethers-derive", "testing"]} assert_matches.workspace = true data-encoding.workspace = true ethabi.workspace = true diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 3259afd2c7..73b60fb038 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -93,7 +93,7 @@ namada-sdk = [ multicore = ["masp_proofs/multicore"] [dependencies] -namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign"]} +namada_core = {path = "../core", default-features = false} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} namada_ethereum_bridge = {path = "../ethereum_bridge", default-features = false} async-trait = {version = "0.1.51", optional = true} @@ -152,14 +152,14 @@ tokio = {workspace = true, features = ["full"]} wasmtimer = "0.2.0" [dev-dependencies] -namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign", "testing", "ibc-mocks"]} +namada_core = {path = "../core", default-features = false, features = ["testing", "ibc-mocks"]} namada_ethereum_bridge = {path = "../ethereum_bridge", default-features = false, features = ["testing"]} namada_test_utils = {path = "../test_utils"} assert_matches.workspace = true async-trait.workspace = true base58.workspace = true byte-unit.workspace = true -libsecp256k1.workspace = true +k256.workspace = true pretty_assertions.workspace = true proptest.workspace = true tempfile.workspace = true diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 5dbe91c1e4..b719ee1a52 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -1447,6 +1447,7 @@ dependencies = [ "digest 0.10.6", "elliptic-curve", "rfc6979", + "serdect", "signature 2.1.0", "spki", ] @@ -1513,6 +1514,7 @@ dependencies = [ "pkcs8", "rand_core 0.6.4", "sec1", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -2523,17 +2525,6 @@ dependencies = [ "hmac 0.7.1", ] -[[package]] -name = "hmac-drbg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" -dependencies = [ - "digest 0.9.0", - "generic-array 0.14.7", - "hmac 0.8.1", -] - [[package]] name = "hmac-sha512" version = "0.1.9" @@ -2939,6 +2930,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", + "serdect", "sha2 0.10.6", "signature 2.1.0", ] @@ -2995,57 +2987,13 @@ dependencies = [ "arrayref", "crunchy", "digest 0.8.1", - "hmac-drbg 0.2.0", + "hmac-drbg", "rand 0.7.3", "sha2 0.8.2", "subtle 2.4.1", "typenum", ] -[[package]] -name = "libsecp256k1" -version = "0.7.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "arrayref", - "base64 0.13.1", - "digest 0.9.0", - "hmac-drbg 0.3.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde", - "sha2 0.9.9", - "typenum", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle 2.4.1", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - [[package]] name = "linux-raw-sys" version = "0.3.7" @@ -3393,7 +3341,7 @@ dependencies = [ "impl-num-traits", "index-set", "itertools", - "libsecp256k1 0.7.0", + "k256", "masp_primitives", "namada_macros", "num-integer", @@ -4883,6 +4831,7 @@ dependencies = [ "der", "generic-array 0.14.7", "pkcs8", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -5031,6 +4980,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + [[package]] name = "sha1" version = "0.10.5" @@ -5553,7 +5512,7 @@ checksum = "01b874a4992538d4b2f4fbbac11b9419d685f4b39bdc3fed95b04e07bfd76040" dependencies = [ "base58", "hmac 0.7.1", - "libsecp256k1 0.3.5", + "libsecp256k1", "memzero", "sha2 0.8.2", ] diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 8e3bc2bb20..ba1bb4ba7f 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -1447,6 +1447,7 @@ dependencies = [ "digest 0.10.6", "elliptic-curve", "rfc6979", + "serdect", "signature 2.1.0", "spki", ] @@ -1513,6 +1514,7 @@ dependencies = [ "pkcs8", "rand_core 0.6.4", "sec1", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -2523,17 +2525,6 @@ dependencies = [ "hmac 0.7.1", ] -[[package]] -name = "hmac-drbg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" -dependencies = [ - "digest 0.9.0", - "generic-array 0.14.7", - "hmac 0.8.1", -] - [[package]] name = "hmac-sha512" version = "0.1.9" @@ -2939,6 +2930,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", + "serdect", "sha2 0.10.6", "signature 2.1.0", ] @@ -2995,57 +2987,13 @@ dependencies = [ "arrayref", "crunchy", "digest 0.8.1", - "hmac-drbg 0.2.0", + "hmac-drbg", "rand 0.7.3", "sha2 0.8.2", "subtle 2.4.1", "typenum", ] -[[package]] -name = "libsecp256k1" -version = "0.7.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "arrayref", - "base64 0.13.1", - "digest 0.9.0", - "hmac-drbg 0.3.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde", - "sha2 0.9.9", - "typenum", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle 2.4.1", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - [[package]] name = "linux-raw-sys" version = "0.3.7" @@ -3393,7 +3341,7 @@ dependencies = [ "impl-num-traits", "index-set", "itertools", - "libsecp256k1 0.7.0", + "k256", "masp_primitives", "namada_macros", "num-integer", @@ -4876,6 +4824,7 @@ dependencies = [ "der", "generic-array 0.14.7", "pkcs8", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -5024,6 +4973,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + [[package]] name = "sha1" version = "0.10.5" @@ -5546,7 +5505,7 @@ checksum = "01b874a4992538d4b2f4fbbac11b9419d685f4b39bdc3fed95b04e07bfd76040" dependencies = [ "base58", "hmac 0.7.1", - "libsecp256k1 0.3.5", + "libsecp256k1", "memzero", "sha2 0.8.2", ] From 7e4e922cfbbf08630462de0774424013abb3983d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 29 Sep 2023 13:59:54 +0100 Subject: [PATCH 059/161] changelog: add #1958 --- .changelog/unreleased/miscellaneous/1958-k256.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/miscellaneous/1958-k256.md diff --git a/.changelog/unreleased/miscellaneous/1958-k256.md b/.changelog/unreleased/miscellaneous/1958-k256.md new file mode 100644 index 0000000000..82a38bb863 --- /dev/null +++ b/.changelog/unreleased/miscellaneous/1958-k256.md @@ -0,0 +1,2 @@ +- Switched from using `libsecp256k1` to `k256` crate. + ([\#1958](https://github.com/anoma/namada/pull/1958)) \ No newline at end of file From 609e70d62ce56842633b85aab5958d0324122416 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Thu, 28 Sep 2023 19:26:47 +0100 Subject: [PATCH 060/161] Implement ZeroizeOnDrop on ed25519 keys --- core/src/types/key/ed25519.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/core/src/types/key/ed25519.rs b/core/src/types/key/ed25519.rs index faf6076ea2..12863cca09 100644 --- a/core/src/types/key/ed25519.rs +++ b/core/src/types/key/ed25519.rs @@ -10,7 +10,7 @@ use data_encoding::HEXLOWER; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; -use zeroize::Zeroize; +use zeroize::{Zeroize, ZeroizeOnDrop}; use super::{ ParsePublicKeyError, ParseSecretKeyError, ParseSignatureError, RefTo, @@ -125,7 +125,7 @@ impl FromStr for PublicKey { } /// Ed25519 secret key -#[derive(Debug, Serialize, Deserialize, Zeroize)] +#[derive(Debug, Serialize, Deserialize, Zeroize, ZeroizeOnDrop)] pub struct SecretKey(pub Box); impl super::SecretKey for SecretKey { @@ -223,12 +223,6 @@ impl FromStr for SecretKey { } } -impl Drop for SecretKey { - fn drop(&mut self) { - self.0.zeroize(); - } -} - /// Ed25519 signature #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct Signature(pub ed25519_consensus::Signature); From 5d7c951b60d01c4f425b30a6ce53f944456038d5 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Wed, 4 Oct 2023 09:13:12 +0100 Subject: [PATCH 061/161] Changelog for #1956 --- .changelog/unreleased/miscellaneous/1958-zeroize-secret-keys.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/miscellaneous/1958-zeroize-secret-keys.md diff --git a/.changelog/unreleased/miscellaneous/1958-zeroize-secret-keys.md b/.changelog/unreleased/miscellaneous/1958-zeroize-secret-keys.md new file mode 100644 index 0000000000..f606d9d7ef --- /dev/null +++ b/.changelog/unreleased/miscellaneous/1958-zeroize-secret-keys.md @@ -0,0 +1,2 @@ +- Tag `ed25519` keys with `ZeroizeOnDrop` + ([\#1958](https://github.com/anoma/namada/pull/1958)) \ No newline at end of file From 2a141f128ba1c8cf5c7988d45952c8126f7adef2 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Thu, 14 Sep 2023 19:35:36 +0100 Subject: [PATCH 062/161] Update to upstream borsh v1.0.0-alpha.4 Co-Authored-By: mariari --- Cargo.toml | 9 +-------- test_utils/Cargo.toml | 2 +- wasm/Cargo.toml | 7 ------- wasm/tx_template/Cargo.toml | 2 +- wasm/vp_template/Cargo.toml | 2 +- wasm/wasm_source/Cargo.toml | 2 +- wasm_for_tests/wasm_source/Cargo.toml | 9 +-------- 7 files changed, 6 insertions(+), 27 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9c731f0fdf..74b115f894 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,7 +52,7 @@ bit-set = "0.5.2" blake2b-rs = "0.2.0" byte-unit = "4.0.13" byteorder = "1.4.2" -borsh = "0.9.0" +borsh = {version = "1.0.0-alpha.4", features = ["schema", "derive"]} chrono = {version = "0.4.22", default-features = false, features = ["clock", "std"]} circular-queue = "0.2.6" clap = "4.3.4" @@ -147,13 +147,6 @@ wasmparser = "0.107.0" winapi = "0.3.9" zeroize = {version = "1.5.5", features = ["zeroize_derive"]} -[patch.crates-io] -# TODO temp patch for , and more tba. -borsh = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-schema-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} - [profile.release] lto = true opt-level = 3 diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 8e0a9c6719..3b81ee176e 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -14,5 +14,5 @@ version.workspace = true [dependencies] namada_core = { path = "../core", default-features = false, features = ["abciplus"] } -borsh = "0.9.0" +borsh.workspace = true strum = {version = "0.24", features = ["derive"]} diff --git a/wasm/Cargo.toml b/wasm/Cargo.toml index 8453ea48ca..c4cb182c1a 100644 --- a/wasm/Cargo.toml +++ b/wasm/Cargo.toml @@ -7,13 +7,6 @@ members = [ "vp_template", ] -[patch.crates-io] -# TODO temp patch for , and more tba. -borsh = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-schema-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} - [profile.release] # smaller and faster wasm (https://rustwasm.github.io/book/reference/code-size.html#compiling-with-link-time-optimizations-lto) lto = true diff --git a/wasm/tx_template/Cargo.toml b/wasm/tx_template/Cargo.toml index 31865b3818..dbccfaba16 100644 --- a/wasm/tx_template/Cargo.toml +++ b/wasm/tx_template/Cargo.toml @@ -11,7 +11,7 @@ crate-type = ["cdylib"] [dependencies] namada_tx_prelude = {path = "../../tx_prelude"} -borsh = "0.9.0" +borsh = "1.0.0-alpha.4" wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } diff --git a/wasm/vp_template/Cargo.toml b/wasm/vp_template/Cargo.toml index 9566c6de63..3e8b64262d 100644 --- a/wasm/vp_template/Cargo.toml +++ b/wasm/vp_template/Cargo.toml @@ -11,7 +11,7 @@ crate-type = ["cdylib"] [dependencies] namada_vp_prelude = {path = "../../vp_prelude"} -borsh = "0.9.0" +borsh = "1.0.0-alpha.4" wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index 81f07ba049..93999dae48 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -38,7 +38,7 @@ vp_validator = ["namada_vp_prelude", "once_cell"] [dependencies] namada_tx_prelude = {path = "../../tx_prelude", optional = true} namada_vp_prelude = {path = "../../vp_prelude", optional = true} -borsh = "0.9.0" +borsh = "1.0.0-alpha.4" once_cell = {version = "1.8.0", optional = true} wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } diff --git a/wasm_for_tests/wasm_source/Cargo.toml b/wasm_for_tests/wasm_source/Cargo.toml index de8e3ce99a..07ae535c61 100644 --- a/wasm_for_tests/wasm_source/Cargo.toml +++ b/wasm_for_tests/wasm_source/Cargo.toml @@ -27,17 +27,10 @@ tx_proposal_code = [] namada_test_utils = {path = "../../test_utils"} namada_tx_prelude = {path = "../../tx_prelude"} namada_vp_prelude = {path = "../../vp_prelude"} -borsh = "0.9.1" +borsh = "1.0.0-alpha.4" wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } -[patch.crates-io] -# TODO temp patch for , and more tba. -borsh = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-schema-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} - [dev-dependencies] namada_tests = {path = "../../tests"} From f542fa243bfa63b79448c5167b91807d00374045 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Fri, 15 Sep 2023 13:44:31 +0100 Subject: [PATCH 063/161] Add borsh-ext dependency Co-Authored-By: mariari --- Cargo.toml | 1 + apps/Cargo.toml | 1 + benches/Cargo.toml | 1 + core/Cargo.toml | 1 + ethereum_bridge/Cargo.toml | 1 + shared/Cargo.toml | 1 + tests/Cargo.toml | 1 + tx_prelude/Cargo.toml | 1 + vp_prelude/Cargo.toml | 1 + 9 files changed, 9 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 74b115f894..e72c2f4c21 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,6 +53,7 @@ blake2b-rs = "0.2.0" byte-unit = "4.0.13" byteorder = "1.4.2" borsh = {version = "1.0.0-alpha.4", features = ["schema", "derive"]} +borsh-ext = {tag = "v1.0.0-alpha.4", git = "https://github.com/heliaxdev/borsh-ext"} chrono = {version = "0.4.22", default-features = false, features = ["clock", "std"]} circular-queue = "0.2.6" clap = "4.3.4" diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 1d33f55df7..ed1c562a30 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -77,6 +77,7 @@ bech32.workspace = true bimap.workspace = true blake2b-rs.workspace = true borsh.workspace = true +borsh-ext.workspace = true byte-unit.workspace = true byteorder.workspace = true clap.workspace = true diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 91a5d45333..9e03476063 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -44,6 +44,7 @@ path = "host_env.rs" [dependencies] async-trait.workspace = true borsh.workspace = true +borsh-ext.workspace = true ferveo-common.workspace = true masp_primitives.workspace = true masp_proofs.workspace = true diff --git a/core/Cargo.toml b/core/Cargo.toml index ebbb35f383..8d75285dc9 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -61,6 +61,7 @@ ark-serialize.workspace = true arse-merkle-tree.workspace = true bech32.workspace = true borsh.workspace = true +borsh-ext.workspace = true chrono.workspace = true data-encoding.workspace = true derivative.workspace = true diff --git a/ethereum_bridge/Cargo.toml b/ethereum_bridge/Cargo.toml index 1354c36a8c..c3d7dcd537 100644 --- a/ethereum_bridge/Cargo.toml +++ b/ethereum_bridge/Cargo.toml @@ -33,6 +33,7 @@ namada_core = {path = "../core", default-features = false, features = ["secp256k namada_macros = {path = "../macros"} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} borsh.workspace = true +borsh-ext.workspace = true ethers.workspace = true eyre.workspace = true itertools.workspace = true diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 3259afd2c7..e32c222eb8 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -99,6 +99,7 @@ namada_ethereum_bridge = {path = "../ethereum_bridge", default-features = false} async-trait = {version = "0.1.51", optional = true} bimap.workspace = true borsh.workspace = true +borsh-ext.workspace = true circular-queue.workspace = true clru.workspace = true data-encoding.workspace = true diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 674bce9538..693494d682 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -58,6 +58,7 @@ tracing.workspace = true namada_apps = {path = "../apps", features = ["testing"]} assert_cmd.workspace = true borsh.workspace = true +borsh-ext.workspace = true color-eyre.workspace = true data-encoding.workspace = true # NOTE: enable "print" feature to see output from builds ran by e2e tests diff --git a/tx_prelude/Cargo.toml b/tx_prelude/Cargo.toml index 5fa5d24d68..981b9c0c79 100644 --- a/tx_prelude/Cargo.toml +++ b/tx_prelude/Cargo.toml @@ -26,6 +26,7 @@ namada_macros = {path = "../macros"} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} namada_vm_env = {path = "../vm_env", default-features = false} borsh.workspace = true +borsh-ext.workspace = true masp_primitives.workspace = true sha2.workspace = true thiserror.workspace = true diff --git a/vp_prelude/Cargo.toml b/vp_prelude/Cargo.toml index ecffd8eaa4..5384395b22 100644 --- a/vp_prelude/Cargo.toml +++ b/vp_prelude/Cargo.toml @@ -26,5 +26,6 @@ namada_macros = {path = "../macros"} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} namada_vm_env = {path = "../vm_env", default-features = false} borsh.workspace = true +borsh-ext.workspace = true sha2.workspace = true thiserror.workspace = true From b5076f62fddf0d447120eb276dcc37b6a20b3cb3 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Fri, 15 Sep 2023 13:50:14 +0100 Subject: [PATCH 064/161] Update color-eyre Co-Authored-By: mariari --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e72c2f4c21..a315d41563 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,7 +58,7 @@ chrono = {version = "0.4.22", default-features = false, features = ["clock", "st circular-queue = "0.2.6" clap = "4.3.4" clru = {git = "https://github.com/marmeladema/clru-rs.git", rev = "71ca566"} -color-eyre = "0.5.10" +color-eyre = "0.6.2" concat-idents = "1.1.2" config = "0.11.0" data-encoding = "2.3.2" From e93e97e9060e69e7c2035782804404b77940fd6d Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Mon, 25 Sep 2023 10:58:31 +0100 Subject: [PATCH 065/161] Update borsh on heliaxdev deps Co-Authored-By: mariari --- Cargo.toml | 8 ++++---- wasm/wasm_source/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a315d41563..3fcf2a06b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,7 +40,7 @@ ark-bls12-381 = {version = "0.3"} ark-serialize = {version = "0.3"} ark-std = "0.3.0" # branch = "bat/arse-merkle-tree" -arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "e086b235ed6e68929bf73f617dd61cd17b000a56", default-features = false, features = ["std", "borsh"]} +arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "df7ec062e7c40d5e76b136064e9aaf8bd2490750", default-features = false, features = ["std", "borsh"]} assert_cmd = "1.0.7" assert_matches = "1.5.0" async-trait = {version = "0.1.51"} @@ -84,15 +84,15 @@ fs_extra = "1.2.0" futures = "0.3" git2 = "0.13.25" ics23 = "0.9.0" -index-set = {git = "https://github.com/heliaxdev/index-set", tag = "v0.7.1", features = ["serialize-borsh", "serialize-serde"]} +index-set = {git = "https://github.com/heliaxdev/index-set", tag = "v0.8.0", features = ["serialize-borsh", "serialize-serde"]} itertools = "0.10.0" lazy_static = "1.4.0" libc = "0.2.97" libloading = "0.7.2" libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} # branch = "murisi/namada-integration" -masp_primitives = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e" } -masp_proofs = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e", default-features = false, features = ["local-prover"] } +masp_primitives = { git = "https://github.com/anoma/masp", rev = "449a7295fe24d96456ece24c223ca9eb76b0e6ba" } +masp_proofs = { git = "https://github.com/anoma/masp", rev = "449a7295fe24d96456ece24c223ca9eb76b0e6ba", default-features = false, features = ["local-prover"] } num256 = "0.3.5" num_cpus = "1.13.0" num-derive = "0.3.3" diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index 93999dae48..82ccc18a31 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -43,7 +43,7 @@ once_cell = {version = "1.8.0", optional = true} wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } # branch = "murisi/namada-integration" -masp_primitives = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e", optional = true } +masp_primitives = { git = "https://github.com/anoma/masp", rev = "449a7295fe24d96456ece24c223ca9eb76b0e6ba", optional = true } ripemd = "0.1" [dev-dependencies] From 7eb80b13f69ffefb606448f0689a5f263aa53c8e Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Fri, 15 Sep 2023 13:45:07 +0100 Subject: [PATCH 066/161] Fix compilation errors Co-Authored-By: mariari --- apps/src/lib/cli/wallet.rs | 6 +- apps/src/lib/client/rpc.rs | 8 +- apps/src/lib/client/tx.rs | 2 +- apps/src/lib/client/utils.rs | 13 +- apps/src/lib/config/genesis.rs | 12 +- .../lib/node/ledger/shell/finalize_block.rs | 22 +-- apps/src/lib/node/ledger/shell/init_chain.rs | 4 +- apps/src/lib/node/ledger/shell/mod.rs | 11 +- .../lib/node/ledger/shell/prepare_proposal.rs | 16 +- .../lib/node/ledger/shell/process_proposal.rs | 5 +- apps/src/lib/node/ledger/shell/queries.rs | 6 +- .../lib/node/ledger/shell/vote_extensions.rs | 2 +- .../shell/vote_extensions/bridge_pool_vext.rs | 24 +-- .../shell/vote_extensions/eth_events.rs | 18 +- .../shell/vote_extensions/val_set_update.rs | 9 +- apps/src/lib/node/ledger/storage/rocksdb.rs | 11 +- apps/src/lib/node/ledger/tendermint_node.rs | 17 +- apps/src/lib/wallet/cli_utils.rs | 6 +- benches/host_env.rs | 4 +- benches/lib.rs | 8 +- core/src/ledger/governance/cli/offline.rs | 26 +-- .../src/ledger/governance/storage/proposal.rs | 1 + core/src/ledger/ibc/context/common.rs | 10 +- core/src/ledger/ibc/context/execution.rs | 5 +- core/src/ledger/storage/masp_conversions.rs | 11 +- core/src/ledger/storage/merkle_tree.rs | 12 +- core/src/ledger/storage/mockdb.rs | 10 +- core/src/ledger/storage/mod.rs | 21 +-- core/src/ledger/storage/traits.rs | 5 +- core/src/ledger/storage/wl_storage.rs | 9 +- core/src/ledger/storage_api/mod.rs | 3 +- core/src/proto/types.rs | 145 ++++++++-------- core/src/types/address.rs | 7 +- core/src/types/chain.rs | 2 +- core/src/types/eth_bridge_pool.rs | 7 +- core/src/types/ethereum_events.rs | 3 +- core/src/types/ethereum_structs.rs | 5 +- core/src/types/key/common.rs | 24 +-- core/src/types/key/dkg_session_keys.rs | 18 +- core/src/types/key/ed25519.rs | 55 +++--- core/src/types/key/mod.rs | 15 +- core/src/types/key/secp256k1.rs | 37 +++-- core/src/types/masp.rs | 5 +- core/src/types/storage.rs | 9 +- core/src/types/time.rs | 8 +- core/src/types/token.rs | 1 + core/src/types/transaction/decrypted.rs | 5 +- core/src/types/transaction/encrypted.rs | 8 +- core/src/types/transaction/mod.rs | 3 +- core/src/types/transaction/protocol.rs | 24 ++- core/src/types/transaction/wrapper.rs | 14 +- .../vote_extensions/bridge_pool_roots.rs | 1 + core/src/types/voting_power.rs | 24 ++- encoding_spec/src/main.rs | 157 ++++++++++-------- ethereum_bridge/src/parameters.rs | 3 +- .../transactions/bridge_pool_roots.rs | 11 +- .../transactions/ethereum_events/events.rs | 31 +--- .../src/protocol/transactions/read.rs | 4 +- .../src/protocol/transactions/update.rs | 11 +- .../protocol/transactions/votes/storage.rs | 49 +++--- ethereum_bridge/src/storage/vote_tallies.rs | 10 +- ethereum_bridge/src/test_utils.rs | 4 +- ethereum_bridge/src/vp.rs | 9 +- proof_of_stake/src/pos_queries.rs | 7 +- shared/src/ledger/eth_bridge/bridge_pool.rs | 4 +- .../ethereum_bridge/bridge_pool_vp.rs | 127 +++++--------- .../ledger/native_vp/ethereum_bridge/nut.rs | 6 +- .../ledger/native_vp/ethereum_bridge/vp.rs | 23 +-- shared/src/ledger/native_vp/ibc/context.rs | 36 +--- shared/src/ledger/native_vp/ibc/mod.rs | 18 +- shared/src/ledger/native_vp/multitoken.rs | 40 ++--- shared/src/ledger/protocol/mod.rs | 12 +- shared/src/ledger/queries/router.rs | 9 +- shared/src/ledger/queries/shell.rs | 12 +- shared/src/ledger/queries/shell/eth_bridge.rs | 60 +++---- shared/src/sdk/masp.rs | 42 ++--- shared/src/sdk/signing.rs | 7 +- shared/src/sdk/tx.rs | 4 +- shared/src/sdk/wallet/keys.rs | 5 +- shared/src/vm/host_env.rs | 31 ++-- shared/src/vm/wasm/memory.rs | 14 +- shared/src/vm/wasm/run.rs | 50 +++--- tests/src/e2e/eth_bridge_tests/helpers.rs | 2 +- tests/src/e2e/ledger_tests.rs | 7 +- tests/src/e2e/multitoken_tests/helpers.rs | 4 +- tests/src/native_vp/eth_bridge_pool.rs | 5 +- tests/src/vm_host_env/ibc.rs | 8 +- tests/src/vm_host_env/mod.rs | 24 ++- tests/src/vm_host_env/tx.rs | 7 +- tx_prelude/src/lib.rs | 6 +- vp_prelude/src/lib.rs | 12 +- wasm/wasm_source/src/tx_bond.rs | 3 +- wasm/wasm_source/src/tx_bridge_pool.rs | 5 +- .../src/tx_change_validator_commission.rs | 3 +- wasm/wasm_source/src/tx_unbond.rs | 3 +- wasm/wasm_source/src/tx_withdraw.rs | 3 +- wasm/wasm_source/src/vp_masp.rs | 12 +- wasm/wasm_source/src/vp_testnet_faucet.rs | 3 +- 98 files changed, 725 insertions(+), 915 deletions(-) diff --git a/apps/src/lib/cli/wallet.rs b/apps/src/lib/cli/wallet.rs index d039c7ef10..9cfa5d3fd3 100644 --- a/apps/src/lib/cli/wallet.rs +++ b/apps/src/lib/cli/wallet.rs @@ -3,7 +3,7 @@ use std::fs::File; use std::io::{self, Write}; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use color_eyre::eyre::Result; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; @@ -535,9 +535,7 @@ fn key_export( wallet .find_key(alias.to_lowercase(), None) .map(|keypair| { - let file_data = keypair - .try_to_vec() - .expect("Encoding keypair shouldn't fail"); + let file_data = keypair.serialize_to_vec(); let file_name = format!("key_{}", alias.to_lowercase()); let mut file = File::create(&file_name).unwrap(); diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index d750ccc759..9a61fc496a 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -7,7 +7,8 @@ use std::io; use std::iter::Iterator; use std::str::FromStr; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use itertools::Either; use masp_primitives::asset_type::AssetType; @@ -868,10 +869,7 @@ pub async fn query_shielded_balance< // Compute the unique asset identifier from the token address let token = token; let _asset_type = AssetType::new( - (token.clone(), epoch.0) - .try_to_vec() - .expect("token addresses should serialize") - .as_ref(), + (token.clone(), epoch.0).serialize_to_vec().as_ref(), ) .unwrap(); let token_alias = wallet.lookup_alias(&token); diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index c8c42b190b..13e5ba397d 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -599,7 +599,7 @@ const TMP_FILE_NAME: &str = "shielded.tmp"; #[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] pub struct CLIShieldedUtils { - #[borsh_skip] + #[borsh(skip)] context_dir: PathBuf, } diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 0caab25d35..9d5f17d3e3 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -5,7 +5,7 @@ use std::io::Write; use std::path::{Path, PathBuf}; use std::str::FromStr; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use flate2::read::GzDecoder; use flate2::write::GzEncoder; use flate2::Compression; @@ -384,12 +384,12 @@ pub fn id_from_pk(pk: &common::PublicKey) -> TendermintNodeId { match pk { common::PublicKey::Ed25519(_) => { let _pk: ed25519::PublicKey = pk.try_to_pk().unwrap(); - let digest = Sha256::digest(_pk.try_to_vec().unwrap().as_slice()); + let digest = Sha256::digest(_pk.serialize_to_vec().as_slice()); bytes.copy_from_slice(&digest[..TENDERMINT_NODE_ID_LENGTH]); } common::PublicKey::Secp256k1(_) => { let _pk: secp256k1::PublicKey = pk.try_to_pk().unwrap(); - let digest = Sha256::digest(_pk.try_to_vec().unwrap().as_slice()); + let digest = Sha256::digest(_pk.serialize_to_vec().as_slice()); bytes.copy_from_slice(&digest[..TENDERMINT_NODE_ID_LENGTH]); } } @@ -705,7 +705,7 @@ pub fn init_network( // Generate the chain ID first let genesis = genesis_config::load_genesis_config(config_clean.clone()); - let genesis_bytes = genesis.try_to_vec().unwrap(); + let genesis_bytes = genesis.serialize_to_vec(); let chain_id = ChainId::from_genesis(chain_id_prefix, genesis_bytes); let chain_dir = global_args.base_dir.join(chain_id.as_str()); let genesis_path = global_args @@ -1126,12 +1126,11 @@ pub fn write_tendermint_node_key( // but does not for secp256k1. let (node_keypair, key_str) = match node_sk { common::SecretKey::Ed25519(sk) => ( - [sk.try_to_vec().unwrap(), sk.ref_to().try_to_vec().unwrap()] - .concat(), + [sk.serialize_to_vec(), sk.ref_to().serialize_to_vec()].concat(), "Ed25519", ), common::SecretKey::Secp256k1(sk) => { - (sk.try_to_vec().unwrap(), "Secp256k1") + (sk.serialize_to_vec(), "Secp256k1") } }; diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index b7281fd4ce..5100b0bda5 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -729,7 +729,7 @@ pub mod genesis_config { } #[derive(Debug, BorshSerialize, BorshDeserialize)] -#[borsh_init(init)] +#[borsh(init=init)] pub struct Genesis { pub genesis_time: DateTimeUtc, pub native_token: Address, @@ -1136,7 +1136,7 @@ pub fn genesis(num_validators: u64) -> Genesis { #[cfg(test)] pub mod tests { - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada::types::address::testing::gen_established_address; use namada::types::key::*; use rand::prelude::ThreadRng; @@ -1152,7 +1152,7 @@ pub mod tests { let mut rng: ThreadRng = thread_rng(); let keypair: common::SecretKey = ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap(); - let kp_arr = keypair.try_to_vec().unwrap(); + let kp_arr = keypair.serialize_to_vec(); let (protocol_keypair, _eth_hot_bridge_keypair, dkg_keypair) = wallet::defaults::validator_keys(); @@ -1169,14 +1169,14 @@ pub mod tests { println!("address: {}", address); println!("keypair: {:?}", kp_arr); println!("protocol_keypair: {:?}", protocol_keypair); - println!("dkg_keypair: {:?}", dkg_keypair.try_to_vec().unwrap()); + println!("dkg_keypair: {:?}", dkg_keypair.serialize_to_vec()); println!( "eth_cold_gov_keypair: {:?}", - eth_cold_gov_keypair.try_to_vec().unwrap() + eth_cold_gov_keypair.serialize_to_vec() ); println!( "eth_hot_bridge_keypair: {:?}", - eth_hot_bridge_keypair.try_to_vec().unwrap() + eth_hot_bridge_keypair.serialize_to_vec() ); } } diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..43f2effaf0 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -1200,10 +1200,7 @@ mod test_finalize_block { shell .wl_storage .storage - .write( - &balance_key, - Amount::native_whole(1000).try_to_vec().unwrap(), - ) + .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) .unwrap(); // create some wrapper txs @@ -1385,10 +1382,7 @@ mod test_finalize_block { shell .wl_storage .storage - .write( - &balance_key, - Amount::native_whole(1000).try_to_vec().unwrap(), - ) + .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) .unwrap(); // create two decrypted txs @@ -1638,7 +1632,7 @@ mod test_finalize_block { &KeccakHash([1; 32]), 3.into(), ); - let value = BlockHeight(4).try_to_vec().expect("Test failed"); + let value = BlockHeight(4).serialize_to_vec(); shell .wl_storage .storage @@ -1649,10 +1643,7 @@ mod test_finalize_block { shell .wl_storage .storage - .write( - &get_nonce_key(), - Uint::from(1).try_to_vec().expect("Test failed"), - ) + .write(&get_nonce_key(), Uint::from(1).serialize_to_vec()) .expect("Test failed"); let (tx, action) = craft_tx(&mut shell); let processed_tx = ProcessedTx { @@ -1828,10 +1819,7 @@ mod test_finalize_block { shell .wl_storage .storage - .write( - &balance_key, - Amount::native_whole(1000).try_to_vec().unwrap(), - ) + .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) .unwrap(); // Add a proposal to be executed on next epoch change. diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index d6b2efe4dd..da29b58d70 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -53,7 +53,7 @@ where genesis::genesis(&self.base_dir, &self.wl_storage.storage.chain_id); #[cfg(not(any(test, feature = "dev")))] { - let genesis_bytes = genesis.try_to_vec().unwrap(); + let genesis_bytes = genesis.serialize_to_vec(); let errors = self.wl_storage.storage.chain_id.validate(genesis_bytes); use itertools::Itertools; @@ -210,7 +210,7 @@ where self.wl_storage .write_bytes( &namada::eth_bridge::storage::active_key(), - EthBridgeStatus::Disabled.try_to_vec().unwrap(), + EthBridgeStatus::Disabled.serialize_to_vec(), ) .unwrap(); } diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index a1c17fe450..17e8f11362 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -26,7 +26,8 @@ use std::path::{Path, PathBuf}; #[allow(unused_imports)] use std::rc::Rc; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; use namada::core::hints; use namada::core::ledger::eth_bridge; @@ -93,10 +94,10 @@ fn key_to_tendermint( ) -> std::result::Result { match pk { common::PublicKey::Ed25519(_) => ed25519::PublicKey::try_from_pk(pk) - .map(|pk| public_key::Sum::Ed25519(pk.try_to_vec().unwrap())), + .map(|pk| public_key::Sum::Ed25519(pk.serialize_to_vec())), common::PublicKey::Secp256k1(_) => { secp256k1::PublicKey::try_from_pk(pk) - .map(|pk| public_key::Sum::Secp256k1(pk.try_to_vec().unwrap())) + .map(|pk| public_key::Sum::Secp256k1(pk.serialize_to_vec())) } } } @@ -1973,7 +1974,7 @@ mod test_utils { .wl_storage .write_bytes( &active_key(), - EthBridgeStatus::Disabled.try_to_vec().expect("Test failed"), + EthBridgeStatus::Disabled.serialize_to_vec(), ) .expect("Test failed"); } @@ -2307,7 +2308,7 @@ mod abciplus_mempool_tests { }))); // invalid tx type, it doesn't match the // tx type declared in the header - tx.set_data(Data::new(ext.try_to_vec().expect("Test falied"))); + tx.set_data(Data::new(ext.serialize_to_vec())); tx.add_section(Section::Signature(Signature::new( tx.sechashes(), [(0, protocol_key)].into_iter().collect(), diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 3687a6d39b..61dc786372 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -493,7 +493,7 @@ mod test_prepare_proposal { #[cfg(feature = "abcipp")] use std::collections::{BTreeSet, HashMap}; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada::core::ledger::storage_api::collections::lazy_map::{ NestedSubKey, SubKey, }; @@ -584,8 +584,7 @@ mod test_prepare_proposal { bridge_pool_root: Some(bp_root), validator_set_update: None, } - .try_to_vec() - .expect("Test failed"); + .serialize_to_vec(); let vote = ExtendedVoteInfo { vote_extension, @@ -1027,7 +1026,7 @@ mod test_prepare_proposal { validator_set_update: None, }; let vote = ExtendedVoteInfo { - vote_extension: vote_extension.try_to_vec().unwrap(), + vote_extension: vote_extension.serialize_to_vec(), ..Default::default() }; // this should panic @@ -1082,10 +1081,7 @@ mod test_prepare_proposal { shell .wl_storage .storage - .write( - &balance_key, - Amount::native_whole(1_000).try_to_vec().unwrap(), - ) + .write(&balance_key, Amount::native_whole(1_000).serialize_to_vec()) .unwrap(); let mut req = RequestPrepareProposal { @@ -1150,11 +1146,11 @@ mod test_prepare_proposal { assert_eq!( received .iter() - .map(|x| x.try_to_vec().unwrap()) + .map(|x| x.serialize_to_vec()) .collect::>(), expected_txs .iter() - .map(|x| x.try_to_vec().unwrap()) + .map(|x| x.serialize_to_vec()) .collect::>(), ); } diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index ab544de3f8..8706b3e702 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -2180,10 +2180,7 @@ mod test_process_proposal { shell .wl_storage .storage - .write( - &balance_key, - Amount::native_whole(1000).try_to_vec().unwrap(), - ) + .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) .unwrap(); let mut wrapper = diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index a62c3ec4b4..0e8bfc5f55 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -1,6 +1,6 @@ //! Shell methods for querying state -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use ferveo_common::TendermintValidator; use namada::ledger::pos::into_tm_voting_power; use namada::ledger::queries::{RequestCtx, ResponseQuery}; @@ -84,9 +84,7 @@ where &self, pk: &common::PublicKey, ) -> Option> { - let pk_bytes = pk - .try_to_vec() - .expect("Serializing public key should not fail"); + let pk_bytes = pk.serialize_to_vec(); // get the current epoch let (current_epoch, _) = self.wl_storage.storage.get_current_epoch(); diff --git a/apps/src/lib/node/ledger/shell/vote_extensions.rs b/apps/src/lib/node/ledger/shell/vote_extensions.rs index 658c35a121..ab6d05a026 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions.rs @@ -87,7 +87,7 @@ where _req: request::ExtendVote, ) -> response::ExtendVote { response::ExtendVote { - vote_extension: self.craft_extension().try_to_vec().unwrap(), + vote_extension: self.craft_extension().serialize_to_vec(), } } diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs index 002bd18904..00f2d8c708 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs @@ -270,7 +270,7 @@ where mod test_bp_vote_extensions { #[cfg(feature = "abcipp")] use borsh::BorshDeserialize; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; #[cfg(not(feature = "abcipp"))] use namada::core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; #[cfg(not(feature = "abcipp"))] @@ -325,13 +325,7 @@ mod test_bp_vote_extensions { let pk_key = protocol_pk_key(&bertha_address()); shell .wl_storage - .write_bytes( - &pk_key, - bertha_keypair() - .ref_to() - .try_to_vec() - .expect("Test failed."), - ) + .write_bytes(&pk_key, bertha_keypair().ref_to().serialize_to_vec()) .expect("Test failed."); // change pipeline length to 1 @@ -481,7 +475,7 @@ mod test_bp_vote_extensions { .as_bytes() .to_vec(), height: 0, - vote_extension: vote_extension.try_to_vec().expect("Test failed"), + vote_extension: vote_extension.serialize_to_vec(), }; let res = shell.verify_vote_extension(req); assert_eq!(res.status, i32::from(VerifyStatus::Accept)); @@ -692,8 +686,7 @@ mod test_bp_vote_extensions { let address = shell.mode.get_validator_address().unwrap().clone(); shell.wl_storage.storage.block.height = 4.into(); let key = get_key_from_hash(&KeccakHash([1; 32])); - let height = - shell.wl_storage.storage.block.height.try_to_vec().unwrap(); + let height = shell.wl_storage.storage.block.height.serialize_to_vec(); shell .wl_storage .storage @@ -719,8 +712,7 @@ mod test_bp_vote_extensions { .delete(&key) .expect("Test failed"); let key = get_key_from_hash(&KeccakHash([2; 32])); - let height = - shell.wl_storage.storage.block.height.try_to_vec().unwrap(); + let height = shell.wl_storage.storage.block.height.serialize_to_vec(); shell .wl_storage .storage @@ -780,8 +772,7 @@ mod test_bp_vote_extensions { let address = shell.mode.get_validator_address().unwrap().clone(); shell.wl_storage.storage.block.height = 4.into(); let key = get_key_from_hash(&KeccakHash([1; 32])); - let height = - shell.wl_storage.storage.block.height.try_to_vec().unwrap(); + let height = shell.wl_storage.storage.block.height.serialize_to_vec(); shell .wl_storage .storage @@ -807,8 +798,7 @@ mod test_bp_vote_extensions { .delete(&key) .expect("Test failed"); let key = get_key_from_hash(&KeccakHash([2; 32])); - let height = - shell.wl_storage.storage.block.height.try_to_vec().unwrap(); + let height = shell.wl_storage.storage.block.height.serialize_to_vec(); shell .wl_storage .storage diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs index 0dd85bfd70..2c1274cb77 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs @@ -411,7 +411,7 @@ mod test_vote_extensions { #[cfg(feature = "abcipp")] use borsh::BorshDeserialize; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada::core::ledger::storage_api::collections::lazy_map::{ NestedSubKey, SubKey, }; @@ -461,7 +461,7 @@ mod test_vote_extensions { shell .wl_storage .storage - .write(&bridge_pool::get_nonce_key(), nonce.try_to_vec().unwrap()) + .write(&bridge_pool::get_nonce_key(), nonce.serialize_to_vec()) .expect("Test failed"); // write nam nonce to the eth events queue @@ -642,7 +642,7 @@ mod test_vote_extensions { .as_bytes() .to_vec(), height: 1, - vote_extension: vote_extension.try_to_vec().expect("Test failed"), + vote_extension: vote_extension.serialize_to_vec(), }; let res = shell.verify_vote_extension(req); assert_eq!(res.status, i32::from(VerifyStatus::Accept)); @@ -720,8 +720,7 @@ mod test_vote_extensions { }, validator_set_update: None, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), }; #[cfg(feature = "abcipp")] assert_eq!( @@ -899,15 +898,14 @@ mod test_vote_extensions { }; let req = request::VerifyVoteExtension { hash: vec![], - validator_address: address.try_to_vec().expect("Test failed"), + validator_address: address.serialize_to_vec(), height: 0, vote_extension: VoteExtension { ethereum_events: Some(signed_vext), bridge_pool_root: Some(bp_root), validator_set_update: None, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), }; assert_eq!( @@ -952,9 +950,9 @@ mod test_vote_extensions { #[cfg(feature = "abcipp")] let req = request::VerifyVoteExtension { hash: vec![], - validator_address: address.try_to_vec().expect("Test failed"), + validator_address: address.serialize_to_vec(), height: 0, - vote_extension: vote_ext.try_to_vec().expect("Test failed"), + vote_extension: vote_ext.serialize_to_vec(), }; #[cfg(feature = "abcipp")] assert_eq!( diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs index 03843b4717..8d6a6baae8 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs @@ -419,8 +419,7 @@ mod test_vote_extensions { bridge_pool_root: Some(bp_root), validator_set_update, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ..Default::default() }; @@ -506,8 +505,7 @@ mod test_vote_extensions { bridge_pool_root: Some(bp_root), validator_set_update, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ..Default::default() }; assert_eq!( @@ -715,8 +713,7 @@ mod test_vote_extensions { bridge_pool_root: Some(bp_root), validator_set_update: validator_set_update.clone(), } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ..Default::default() }; assert_eq!( diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index 61eb2c32e5..1e2d74fbf3 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -40,7 +40,8 @@ use std::str::FromStr; use std::sync::Mutex; use ark_serialize::Write; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use namada::core::types::ethereum_structs; use namada::ledger::storage::types::PrefixIterator; @@ -903,11 +904,9 @@ impl DB for RocksDB { let key = prefix_key .push(&"header".to_owned()) .map_err(Error::KeyError)?; - batch.0.put_cf( - block_cf, - key.to_string(), - h.try_to_vec().expect("serialization failed"), - ); + batch + .0 + .put_cf(block_cf, key.to_string(), h.serialize_to_vec()); } } // Block hash diff --git a/apps/src/lib/node/ledger/tendermint_node.rs b/apps/src/lib/node/ledger/tendermint_node.rs index 2bd5168ffa..0833f7c3a7 100644 --- a/apps/src/lib/node/ledger/tendermint_node.rs +++ b/apps/src/lib/node/ledger/tendermint_node.rs @@ -3,7 +3,7 @@ use std::path::{Path, PathBuf}; use std::process::Stdio; use std::str::FromStr; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use namada::types::chain::ChainId; use namada::types::key::*; use namada::types::storage::BlockHeight; @@ -24,7 +24,6 @@ use crate::facade::tendermint::{block, Genesis}; use crate::facade::tendermint_config::{ Error as TendermintError, TendermintConfig, }; - /// Env. var to output Tendermint log to stdout pub const ENV_VAR_TM_STDOUT: &str = "NAMADA_CMT_STDOUT"; @@ -243,19 +242,17 @@ fn validator_key_to_json( let (id_str, pk_arr, kp_arr) = match sk { common::SecretKey::Ed25519(_) => { let sk_ed: ed25519::SecretKey = sk.try_to_sk().unwrap(); - let keypair = [ - sk_ed.try_to_vec().unwrap(), - sk_ed.ref_to().try_to_vec().unwrap(), - ] - .concat(); - ("Ed25519", sk_ed.ref_to().try_to_vec().unwrap(), keypair) + let keypair = + [sk_ed.serialize_to_vec(), sk_ed.ref_to().serialize_to_vec()] + .concat(); + ("Ed25519", sk_ed.ref_to().serialize_to_vec(), keypair) } common::SecretKey::Secp256k1(_) => { let sk_sec: secp256k1::SecretKey = sk.try_to_sk().unwrap(); ( "Secp256k1", - sk_sec.ref_to().try_to_vec().unwrap(), - sk_sec.try_to_vec().unwrap(), + sk_sec.ref_to().serialize_to_vec(), + sk_sec.serialize_to_vec(), ) } }; diff --git a/apps/src/lib/wallet/cli_utils.rs b/apps/src/lib/wallet/cli_utils.rs index 72bb0acaab..f96803591a 100644 --- a/apps/src/lib/wallet/cli_utils.rs +++ b/apps/src/lib/wallet/cli_utils.rs @@ -1,7 +1,7 @@ use std::fs::File; use std::io::{self, Write}; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; use namada::sdk::masp::find_valid_diversifier; @@ -426,9 +426,7 @@ pub fn key_export(ctx: Context, args::KeyExport { alias }: args::KeyExport) { wallet .find_key(alias.to_lowercase(), None) .map(|keypair| { - let file_data = keypair - .try_to_vec() - .expect("Encoding keypair shouldn't fail"); + let file_data = keypair.serialize_to_vec(); let file_name = format!("key_{}", alias.to_lowercase()); let mut file = File::create(&file_name).unwrap(); diff --git a/benches/host_env.rs b/benches/host_env.rs index 6f385b93bc..1c611f4e18 100644 --- a/benches/host_env.rs +++ b/benches/host_env.rs @@ -1,6 +1,6 @@ use std::collections::HashSet; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use criterion::{criterion_group, criterion_main, Criterion}; use namada::core::types::account::AccountPublicKeysMap; use namada::core::types::address; @@ -19,7 +19,7 @@ fn tx_section_signature_validation(c: &mut Criterion) { key: None, shielded: None, }; - let section = Section::Data(Data::new(transfer_data.try_to_vec().unwrap())); + let section = Section::Data(Data::new(transfer_data.serialize_to_vec())); let section_hash = section.get_hash(); let pkim = AccountPublicKeysMap::from_iter([ diff --git a/benches/lib.rs b/benches/lib.rs index 47645abdf4..fb8f8b350b 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -21,6 +21,7 @@ use std::path::PathBuf; use std::sync::Once; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; use masp_primitives::zip32::ExtendedFullViewingKey; use masp_proofs::prover::LocalTxProver; @@ -436,7 +437,7 @@ pub fn generate_tx( WASM_DIR, wasm_code_path, ))); - tx.set_data(Data::new(data.try_to_vec().unwrap())); + tx.set_data(Data::new(data.serialize_to_vec())); if let Some(transaction) = shielded { tx.add_section(Section::MaspTx(transaction)); @@ -491,8 +492,7 @@ pub fn generate_foreign_key_tx(signer: &SecretKey) -> Tx { key: Key::from("bench_foreign_key".to_string().to_db_key()), value: vec![0; 64], } - .try_to_vec() - .unwrap(), + .serialize_to_vec(), )); tx.add_section(Section::Signature(Signature::new( tx.sechashes(), @@ -563,7 +563,7 @@ impl Clone for WrapperTempDir { #[derive(BorshSerialize, BorshDeserialize, Debug, Clone, Default)] pub struct BenchShieldedUtils { - #[borsh_skip] + #[borsh(skip)] context_dir: WrapperTempDir, } diff --git a/core/src/ledger/governance/cli/offline.rs b/core/src/ledger/governance/cli/offline.rs index fb56a1270a..64b13e29db 100644 --- a/core/src/ledger/governance/cli/offline.rs +++ b/core/src/ledger/governance/cli/offline.rs @@ -3,6 +3,7 @@ use std::fs::{File, ReadDir}; use std::path::PathBuf; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use serde::{Deserialize, Serialize}; use super::onchain::ProposalVote; @@ -214,14 +215,8 @@ impl OfflineVote { keypairs: Vec, account_public_keys_map: &AccountPublicKeysMap, ) -> Self { - let proposal_vote_data = self - .vote - .try_to_vec() - .expect("Conversion to bytes shouldn't fail."); - let delegations_hash = self - .delegations - .try_to_vec() - .expect("Conversion to bytes shouldn't fail."); + let proposal_vote_data = self.vote.serialize_to_vec(); + let delegations_hash = self.delegations.serialize_to_vec(); let vote_hash = Hash::sha256( [ @@ -248,18 +243,9 @@ impl OfflineVote { /// compute the hash of a proposal pub fn compute_hash(&self) -> Hash { - let proposal_hash_data = self - .proposal_hash - .try_to_vec() - .expect("Conversion to bytes shouldn't fail."); - let proposal_vote_data = self - .vote - .try_to_vec() - .expect("Conversion to bytes shouldn't fail."); - let delegations_hash = self - .delegations - .try_to_vec() - .expect("Conversion to bytes shouldn't fail."); + let proposal_hash_data = self.proposal_hash.serialize_to_vec(); + let proposal_vote_data = self.vote.serialize_to_vec(); + let delegations_hash = self.delegations.serialize_to_vec(); let vote_serialized = &[proposal_hash_data, proposal_vote_data, delegations_hash] .concat(); diff --git a/core/src/ledger/governance/storage/proposal.rs b/core/src/ledger/governance/storage/proposal.rs index 72a15b8631..c4a59389ef 100644 --- a/core/src/ledger/governance/storage/proposal.rs +++ b/core/src/ledger/governance/storage/proposal.rs @@ -55,6 +55,7 @@ impl StoragePgfFunding { PartialEq, Eq, PartialOrd, + Ord, BorshSerialize, BorshDeserialize, Serialize, diff --git a/core/src/ledger/ibc/context/common.rs b/core/src/ledger/ibc/context/common.rs index 5e963e7a5f..a9733b18ac 100644 --- a/core/src/ledger/ibc/context/common.rs +++ b/core/src/ledger/ibc/context/common.rs @@ -1,6 +1,7 @@ //! IbcCommonContext implementation for IBC -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use prost::Message; use sha2::Digest; @@ -374,10 +375,7 @@ pub trait IbcCommonContext: IbcStorageContext { }) })?; if !has_key { - let bytes = denom - .as_ref() - .try_to_vec() - .expect("encoding shouldn't fail"); + let bytes = denom.as_ref().serialize_to_vec(); self.write(&key, bytes).map_err(|_| { ContextError::ChannelError(ChannelError::Other { description: format!( @@ -434,7 +432,7 @@ pub trait IbcCommonContext: IbcStorageContext { if !has_key { // IBC denomination should be zero for U256 let denom = token::Denomination::from(0); - let bytes = denom.try_to_vec().expect("encoding shouldn't fail"); + let bytes = denom.serialize_to_vec(); self.write(&key, bytes).map_err(|_| { ContextError::ChannelError(ChannelError::Other { description: format!( diff --git a/core/src/ledger/ibc/context/execution.rs b/core/src/ledger/ibc/context/execution.rs index ec0708ce2a..0160cb1d29 100644 --- a/core/src/ledger/ibc/context/execution.rs +++ b/core/src/ledger/ibc/context/execution.rs @@ -1,6 +1,7 @@ //! ExecutionContext implementation for IBC -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use super::super::{IbcActions, IbcCommonContext}; use crate::ibc::core::events::IbcEvent; @@ -179,7 +180,7 @@ where }))? } }; - let bytes = list.try_to_vec().expect("encoding shouldn't fail"); + let bytes = list.serialize_to_vec(); self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { ContextError::ConnectionError(ConnectionError::Other { description: format!( diff --git a/core/src/ledger/storage/masp_conversions.rs b/core/src/ledger/storage/masp_conversions.rs index 624fe2aa1f..a9f6c9342d 100644 --- a/core/src/ledger/storage/masp_conversions.rs +++ b/core/src/ledger/storage/masp_conversions.rs @@ -3,6 +3,7 @@ use std::collections::BTreeMap; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; use masp_primitives::convert::AllowedConversion; use masp_primitives::merkle_tree::FrozenCommitmentTree; @@ -203,11 +204,7 @@ where .into_storage_result()?; // We cannot borrow `conversion_state` at the same time as when we call // `wl_storage.write`, so we encode it manually first - let conv_bytes = wl_storage - .storage - .conversion_state - .try_to_vec() - .into_storage_result()?; + let conv_bytes = wl_storage.storage.conversion_state.serialize_to_vec(); wl_storage.write_bytes(&state_key, conv_bytes)?; Ok(()) } @@ -218,9 +215,7 @@ pub fn encode_asset_type( denom: MaspDenom, epoch: Epoch, ) -> AssetType { - let new_asset_bytes = (addr, denom, epoch.0) - .try_to_vec() - .expect("unable to serialize address and epoch"); + let new_asset_bytes = (addr, denom, epoch.0).serialize_to_vec(); AssetType::new(new_asset_bytes.as_ref()) .expect("unable to derive asset identifier") } diff --git a/core/src/ledger/storage/merkle_tree.rs b/core/src/ledger/storage/merkle_tree.rs index eb4e34e20b..961cbc3c35 100644 --- a/core/src/ledger/storage/merkle_tree.rs +++ b/core/src/ledger/storage/merkle_tree.rs @@ -8,6 +8,7 @@ use arse_merkle_tree::{ Hash as SmtHash, Key as TreeKey, SparseMerkleTree as ArseMerkleTree, H256, }; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof, NonExistenceProof}; use thiserror::Error; @@ -152,13 +153,12 @@ impl<'a> StoreRef<'a> { /// Borsh Seriliaze the backing stores of our Merkle tree. pub fn encode(&self) -> Vec { match self { - Self::Base(store) => store.try_to_vec(), - Self::Account(store) => store.try_to_vec(), - Self::Ibc(store) => store.try_to_vec(), - Self::PoS(store) => store.try_to_vec(), - Self::BridgePool(store) => store.try_to_vec(), + Self::Base(store) => store.serialize_to_vec(), + Self::Account(store) => store.serialize_to_vec(), + Self::Ibc(store) => store.serialize_to_vec(), + Self::PoS(store) => store.serialize_to_vec(), + Self::BridgePool(store) => store.serialize_to_vec(), } - .expect("Serialization failed") } } diff --git a/core/src/ledger/storage/mockdb.rs b/core/src/ledger/storage/mockdb.rs index 971584e742..3412959a3b 100644 --- a/core/src/ledger/storage/mockdb.rs +++ b/core/src/ledger/storage/mockdb.rs @@ -6,7 +6,8 @@ use std::ops::Bound::{Excluded, Included}; use std::path::Path; use std::str::FromStr; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use super::merkle_tree::{MerkleTreeStoresRead, StoreType}; use super::{ @@ -295,10 +296,9 @@ impl DB for MockDB { let key = prefix_key .push(&"header".to_owned()) .map_err(Error::KeyError)?; - self.0.borrow_mut().insert( - key.to_string(), - h.try_to_vec().expect("serialization failed"), - ); + self.0 + .borrow_mut() + .insert(key.to_string(), h.serialize_to_vec()); } } // Block hash diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 81be7e48a6..845b2d6f4b 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -15,6 +15,7 @@ use std::cmp::Ordering; use std::format; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; pub use merkle_tree::{ MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, StoreType, }; @@ -664,8 +665,7 @@ where if is_pending_transfer_key(key) { // The tree of the bright pool stores the current height for the // pending transfer - let height = - self.block.height.try_to_vec().expect("Encoding failed"); + let height = self.block.height.serialize_to_vec(); self.block.tree.update(key, height)?; } else if !is_replay_protection_key(key) { // Update the merkle tree for all but replay-protection entries @@ -801,10 +801,7 @@ where tree.update( &new_key, if is_pending_transfer_key(&new_key) { - target_height.try_to_vec().expect( - "Serialization should never \ - fail", - ) + target_height.serialize_to_vec() } else { new.1.clone() }, @@ -826,10 +823,7 @@ where tree.update( &new_key, if is_pending_transfer_key(&new_key) { - target_height.try_to_vec().expect( - "Serialization should never \ - fail", - ) + target_height.serialize_to_vec() } else { new.1.clone() }, @@ -857,9 +851,7 @@ where tree.update( &key, if is_pending_transfer_key(&key) { - target_height.try_to_vec().expect( - "Serialization should never fail", - ) + target_height.serialize_to_vec() } else { new.1.clone() }, @@ -1055,8 +1047,7 @@ where if is_pending_transfer_key(key) { // The tree of the bright pool stores the current height for the // pending transfer - let height = - self.block.height.try_to_vec().expect("Encoding failed"); + let height = self.block.height.serialize_to_vec(); self.block.tree.update(key, height)?; } else if !is_replay_protection_key(key) { // Update the merkle tree for all but replay-protection entries diff --git a/core/src/ledger/storage/traits.rs b/core/src/ledger/storage/traits.rs index 2892110480..40b094e411 100644 --- a/core/src/ledger/storage/traits.rs +++ b/core/src/ledger/storage/traits.rs @@ -5,7 +5,8 @@ use std::fmt; use arse_merkle_tree::traits::{Hasher, Value}; use arse_merkle_tree::{Key as TreeKey, H256}; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof}; use sha2::{Digest, Sha256}; @@ -199,7 +200,7 @@ impl<'a> SubTreeRead for &'a BridgePoolTree { fn subtree_get(&self, key: &Key) -> Result, Error> { match self.get(key) { - Ok(height) => Ok(height.try_to_vec().expect("Encoding failed")), + Ok(height) => Ok(height.serialize_to_vec()), Err(err) => Err(Error::MerkleTree(err.to_string())), } } diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index 87107a35c9..e55f9be033 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -504,7 +504,8 @@ where mod tests { use std::collections::BTreeMap; - use borsh::{BorshDeserialize, BorshSerialize}; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use proptest::prelude::*; use proptest::test_runner::Config; // Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to @@ -638,16 +639,16 @@ mod tests { | Level::BlockWriteLog(WlMod::Delete | WlMod::DeletePrefix) => { } Level::TxWriteLog(WlMod::Write(val)) => { - s.write_log.write(key, val.try_to_vec().unwrap()).unwrap(); + s.write_log.write(key, val.serialize_to_vec()).unwrap(); } Level::BlockWriteLog(WlMod::Write(val)) => { s.write_log // protocol only writes at block level - .protocol_write(key, val.try_to_vec().unwrap()) + .protocol_write(key, val.serialize_to_vec()) .unwrap(); } Level::Storage(val) => { - s.storage.write(key, val.try_to_vec().unwrap()).unwrap(); + s.storage.write(key, val.serialize_to_vec()).unwrap(); } } } diff --git a/core/src/ledger/storage_api/mod.rs b/core/src/ledger/storage_api/mod.rs index 1108c44e3d..3ec75843b1 100644 --- a/core/src/ledger/storage_api/mod.rs +++ b/core/src/ledger/storage_api/mod.rs @@ -11,6 +11,7 @@ pub mod token; pub mod validation; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; pub use error::{CustomError, Error, OptionExt, Result, ResultExt}; use crate::types::address::Address; @@ -109,7 +110,7 @@ pub trait StorageWrite { key: &storage::Key, val: T, ) -> Result<()> { - let bytes = val.try_to_vec().into_storage_result()?; + let bytes = val.serialize_to_vec(); self.write_bytes(key, bytes) } diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index a6082fbbab..186fe4eaf6 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -3,14 +3,17 @@ use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap, HashSet}; use std::convert::TryFrom; use std::hash::{Hash, Hasher}; +#[cfg(feature = "ferveo-tpke")] +use std::io::Read; use std::marker::PhantomData; #[cfg(feature = "ferveo-tpke")] use ark_ec::AffineCurve; #[cfg(feature = "ferveo-tpke")] use ark_ec::PairingEngine; -use borsh::schema::{Declaration, Definition}; +use borsh::schema::{add_definition, Declaration, Definition}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXUPPER; use masp_primitives::transaction::builder::Builder; use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; @@ -129,8 +132,7 @@ impl Signable for SerializeWithBorsh { type Output = Vec; fn as_signable(data: &T) -> Vec { - data.try_to_vec() - .expect("Encoding data for signing shouldn't fail") + data.serialize_to_vec() } } @@ -183,17 +185,22 @@ impl PartialOrd for Signed { self.data.partial_cmp(&other.data) } } +impl Ord for Signed { + fn cmp(&self, other: &Self) -> Ordering { + self.data.cmp(&other.data) + } +} impl BorshSchema for Signed { fn add_definitions_recursively( - definitions: &mut HashMap, + definitions: &mut BTreeMap, ) { - let fields = borsh::schema::Fields::NamedFields(borsh::maybestd::vec![ + let fields = borsh::schema::Fields::NamedFields(vec![ ("data".to_string(), T::declaration()), - ("sig".to_string(), ::declaration()) + ("sig".to_string(), ::declaration()), ]); let definition = borsh::schema::Definition::Struct { fields }; - Self::add_definition(Self::declaration(), definition, definitions); + add_definition(Self::declaration(), definition, definitions); T::add_definitions_recursively(definitions); ::add_definitions_recursively(definitions); } @@ -265,9 +272,7 @@ impl Data { /// Hash this data section pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize data section"), - ); + hasher.update(self.serialize_to_vec()); hasher } } @@ -410,8 +415,7 @@ impl SignatureIndex { } pub fn serialize(&self) -> String { - let signature_bytes = - self.try_to_vec().expect("Signature should be serializable"); + let signature_bytes = self.serialize_to_vec(); HEXUPPER.encode(&signature_bytes) } @@ -524,10 +528,7 @@ impl Signature { /// Hash this signature section pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec() - .expect("unable to serialize multisignature section"), - ); + hasher.update(self.serialize_to_vec()); hasher } @@ -663,8 +664,7 @@ impl Ciphertext { #[cfg(feature = "ferveo-tpke")] pub fn new(sections: Vec
, pubkey: &EncryptionKey) -> Self { let mut rng = rand::thread_rng(); - let bytes = - sections.try_to_vec().expect("unable to serialize sections"); + let bytes = sections.serialize_to_vec(); Self { ciphertext: tpke::encrypt(&bytes, pubkey.0, &mut rng), } @@ -683,9 +683,7 @@ impl Ciphertext { /// Get the hash of this ciphertext section. This operation is done in such /// a way it matches the hash of the type pun pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize decrypted tx"), - ); + hasher.update(self.serialize_to_vec()); hasher } } @@ -725,34 +723,42 @@ impl borsh::ser::BorshSerialize for Ciphertext { #[cfg(feature = "ferveo-tpke")] impl borsh::BorshDeserialize for Ciphertext { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - type VecTuple = (u32, Vec, Vec, Vec); - let (_length, nonce, ciphertext, auth_tag): VecTuple = - BorshDeserialize::deserialize(buf)?; - Ok(Self { - ciphertext: tpke::Ciphertext { - nonce: ark_serialize::CanonicalDeserialize::deserialize( - &*nonce, - ) - .map_err(|err| { - std::io::Error::new(std::io::ErrorKind::InvalidData, err) - })?, - ciphertext, - auth_tag: ark_serialize::CanonicalDeserialize::deserialize( - &*auth_tag, - ) - .map_err(|err| { - std::io::Error::new(std::io::ErrorKind::InvalidData, err) - })?, - }, - }) + fn deserialize_reader(reader: &mut R) -> std::io::Result { + { + type VecTuple = (u32, Vec, Vec, Vec); + let (_length, nonce, ciphertext, auth_tag): VecTuple = + BorshDeserialize::deserialize_reader(reader)?; + Ok(Self { + ciphertext: tpke::Ciphertext { + nonce: ark_serialize::CanonicalDeserialize::deserialize( + &*nonce, + ) + .map_err(|err| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + err, + ) + })?, + ciphertext, + auth_tag: ark_serialize::CanonicalDeserialize::deserialize( + &*auth_tag, + ) + .map_err(|err| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + err, + ) + })?, + }, + }) + } } } #[cfg(feature = "ferveo-tpke")] impl borsh::BorshSchema for Ciphertext { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -785,9 +791,7 @@ struct SerializedCiphertext { impl From for SerializedCiphertext { fn from(tx: Ciphertext) -> Self { SerializedCiphertext { - payload: tx - .try_to_vec() - .expect("Unable to serialize encrypted transaction"), + payload: tx.serialize_to_vec(), } } } @@ -824,7 +828,7 @@ where T: From>, T: serde::Serialize, { - Into::::into(obj.try_to_vec().unwrap()).serialize(ser) + Into::::into(obj.serialize_to_vec()).serialize(ser) } fn serde_borsh<'de, T, S, U>(ser: S) -> std::result::Result @@ -901,16 +905,14 @@ impl MaspBuilder { /// Get the hash of this ciphertext section. This operation is done in such /// a way it matches the hash of the type pun pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize MASP builder"), - ); + hasher.update(self.serialize_to_vec()); hasher } } impl borsh::BorshSchema for MaspBuilder { fn add_definitions_recursively( - _definitions: &mut std::collections::HashMap< + _definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -962,8 +964,7 @@ impl Section { /// allowing transaction sections to cross reference. pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { // Get the index corresponding to this variant - let discriminant = - self.try_to_vec().expect("sections should serialize")[0]; + let discriminant = self.serialize_to_vec()[0]; // Use Borsh's discriminant in the Section's hash hasher.update([discriminant]); match self { @@ -1111,10 +1112,7 @@ impl Header { /// Get the hash of this transaction header. pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec() - .expect("unable to serialize transaction header"), - ); + hasher.update(self.serialize_to_vec()); hasher } @@ -1221,9 +1219,7 @@ impl Tx { /// Serialize tx to hex string pub fn serialize(&self) -> String { - let tx_bytes = self - .try_to_vec() - .expect("Transation should be serializable"); + let tx_bytes = self.serialize_to_vec(); HEXUPPER.encode(&tx_bytes) } @@ -1351,7 +1347,7 @@ impl Tx { pub fn to_bytes(&self) -> Vec { let mut bytes = vec![]; let tx: types::Tx = types::Tx { - data: self.try_to_vec().expect("encoding a transaction failed"), + data: self.serialize_to_vec(), }; tx.encode(&mut bytes) .expect("encoding a transaction failed"); @@ -1716,7 +1712,7 @@ impl Tx { /// Add wasm data to the tx builder pub fn add_data(&mut self, data: impl BorshSerialize) -> &mut Self { - let bytes = data.try_to_vec().expect("Encoding tx data shouldn't fail"); + let bytes = data.serialize_to_vec(); self.set_data(Data::new(bytes)); self } @@ -1987,16 +1983,13 @@ mod tests { // check that encryption doesn't do trivial things assert_ne!( encrypted.ciphertext.ciphertext, - plaintext.try_to_vec().expect("Test failed") + plaintext.serialize_to_vec() ); // decrypt the payload and check we got original data back let decrypted = encrypted.decrypt(privkey); assert_eq!( - decrypted - .expect("Test failed") - .try_to_vec() - .expect("Test failed"), - plaintext.try_to_vec().expect("Test failed"), + decrypted.expect("Test failed").serialize_to_vec(), + plaintext.serialize_to_vec(), ); } @@ -2014,7 +2007,7 @@ mod tests { ))]; let encrypted = Ciphertext::new(plaintext.clone(), &pubkey); // serialize via Borsh - let borsh = encrypted.try_to_vec().expect("Test failed"); + let borsh = encrypted.serialize_to_vec(); // deserialize again let new_encrypted: Ciphertext = BorshDeserialize::deserialize(&mut borsh.as_ref()) @@ -2022,11 +2015,8 @@ mod tests { // check that decryption works as expected let decrypted = new_encrypted.decrypt(privkey); assert_eq!( - decrypted - .expect("Test failed") - .try_to_vec() - .expect("Test failed"), - plaintext.try_to_vec().expect("Test failed"), + decrypted.expect("Test failed").serialize_to_vec(), + plaintext.serialize_to_vec(), ); } @@ -2051,11 +2041,8 @@ mod tests { let decrypted = new_encrypted.decrypt(privkey); // check that decryption works as expected assert_eq!( - decrypted - .expect("Test failed") - .try_to_vec() - .expect("Test failed"), - plaintext.try_to_vec().expect("Test failed"), + decrypted.expect("Test failed").serialize_to_vec(), + plaintext.serialize_to_vec(), ); } } diff --git a/core/src/types/address.rs b/core/src/types/address.rs index 416b3f059e..2605168aad 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -9,6 +9,7 @@ use std::str::FromStr; use bech32::{self, FromBase32, ToBase32, Variant}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXUPPER; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -494,9 +495,7 @@ impl EstablishedAddressGen { &mut self, rng_source: impl AsRef<[u8]>, ) -> Address { - let gen_bytes = self - .try_to_vec() - .expect("Encoding established addresses generator shouldn't fail"); + let gen_bytes = self.serialize_to_vec(); let bytes = [&gen_bytes, rng_source.as_ref()].concat(); let full_hash = Sha256::digest(&bytes); // take first 20 bytes of the hash @@ -753,7 +752,7 @@ pub mod tests { #[test] fn test_established_address_bytes_length(address in testing::arb_established_address()) { let address = Address::Established(address); - let bytes = address.try_to_vec().unwrap(); + let bytes = address.serialize_to_vec(); assert_eq!(bytes.len(), ESTABLISHED_ADDRESS_BYTES_LEN); } } diff --git a/core/src/types/chain.rs b/core/src/types/chain.rs index b14fdbbef2..43977d8812 100644 --- a/core/src/types/chain.rs +++ b/core/src/types/chain.rs @@ -95,7 +95,7 @@ impl<'de> Deserialize<'de> for ProposalBytes { impl BorshSchema for ProposalBytes { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut std::collections::BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, diff --git a/core/src/types/eth_bridge_pool.rs b/core/src/types/eth_bridge_pool.rs index 0f1a887345..8e533ea262 100644 --- a/core/src/types/eth_bridge_pool.rs +++ b/core/src/types/eth_bridge_pool.rs @@ -4,6 +4,7 @@ use std::borrow::Cow; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use ethabi::token::Token; use serde::{Deserialize, Serialize}; @@ -68,7 +69,7 @@ pub enum TransferToEthereumKind { Deserialize, BorshSerialize, BorshDeserialize, - BorshSchema, + /* BorshSchema, */ )] pub struct PendingTransferAppendix<'transfer> { /// The kind of the pending transfer to Ethereum. @@ -105,9 +106,7 @@ impl<'t> From<&'t PendingTransfer> for PendingTransferAppendix<'t> { impl<'transfer> PendingTransferAppendix<'transfer> { /// Calculate the checksum of this [`PendingTransferAppendix`]. pub fn checksum(&self) -> HashDigest { - let serialized = self - .try_to_vec() - .expect("Serializing a PendingTransferAppendix should not fail"); + let serialized = self.serialize_to_vec(); HashDigest::sha256(serialized) } } diff --git a/core/src/types/ethereum_events.rs b/core/src/types/ethereum_events.rs index 8dce0a39a4..2dc3601e5e 100644 --- a/core/src/types/ethereum_events.rs +++ b/core/src/types/ethereum_events.rs @@ -6,6 +6,7 @@ use std::ops::{Add, Sub}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use ethabi::ethereum_types::{H160, U256 as ethUint}; use ethabi::Token; use eyre::{eyre, Context}; @@ -308,7 +309,7 @@ pub enum EthereumEvent { impl EthereumEvent { /// SHA256 of the Borsh serialization of the [`EthereumEvent`]. pub fn hash(&self) -> Result { - let bytes = self.try_to_vec()?; + let bytes = self.serialize_to_vec(); Ok(Hash::sha256(bytes)) } } diff --git a/core/src/types/ethereum_structs.rs b/core/src/types/ethereum_structs.rs index bccab79d65..f029edc4b6 100644 --- a/core/src/types/ethereum_structs.rs +++ b/core/src/types/ethereum_structs.rs @@ -1,5 +1,6 @@ //! Ethereum bridge struct re-exports and types to do with ethereum. use std::fmt; +use std::io::Read; use std::num::NonZeroU64; use std::ops::{Add, AddAssign, Deref}; @@ -96,8 +97,8 @@ impl BorshSerialize for BlockHeight { } impl BorshDeserialize for BlockHeight { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let be: Vec = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let be: Vec = BorshDeserialize::deserialize_reader(reader)?; Ok(Self(Uint256::from_bytes_be(&be))) } } diff --git a/core/src/types/key/common.rs b/core/src/types/key/common.rs index b3c4f3a52f..9ca0bdaffc 100644 --- a/core/src/types/key/common.rs +++ b/core/src/types/key/common.rs @@ -5,6 +5,7 @@ use std::fmt::Display; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; @@ -48,19 +49,19 @@ impl super::PublicKey for PublicKey { pk: &PK, ) -> Result { if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParsePublicKeyError::InvalidEncoding) } else if PK::TYPE == ed25519::PublicKey::TYPE { Ok(Self::Ed25519( ed25519::PublicKey::try_from_slice( - pk.try_to_vec().unwrap().as_slice(), + pk.serialize_to_vec().as_slice(), ) .map_err(ParsePublicKeyError::InvalidEncoding)?, )) } else if PK::TYPE == secp256k1::PublicKey::TYPE { Ok(Self::Secp256k1( secp256k1::PublicKey::try_from_slice( - pk.try_to_vec().unwrap().as_slice(), + pk.serialize_to_vec().as_slice(), ) .map_err(ParsePublicKeyError::InvalidEncoding)?, )) @@ -72,7 +73,7 @@ impl super::PublicKey for PublicKey { impl Display for PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", HEXLOWER.encode(&self.try_to_vec().unwrap())) + write!(f, "{}", HEXLOWER.encode(&self.serialize_to_vec())) } } @@ -174,19 +175,19 @@ impl super::SecretKey for SecretKey { sk: &SK, ) -> Result { if SK::TYPE == Self::TYPE { - Self::try_from_slice(sk.try_to_vec().unwrap().as_ref()) + Self::try_from_slice(sk.serialize_to_vec().as_ref()) .map_err(ParseSecretKeyError::InvalidEncoding) } else if SK::TYPE == ed25519::SecretKey::TYPE { Ok(Self::Ed25519( ed25519::SecretKey::try_from_slice( - sk.try_to_vec().unwrap().as_ref(), + sk.serialize_to_vec().as_ref(), ) .map_err(ParseSecretKeyError::InvalidEncoding)?, )) } else if SK::TYPE == secp256k1::SecretKey::TYPE { Ok(Self::Secp256k1( secp256k1::SecretKey::try_from_slice( - sk.try_to_vec().unwrap().as_ref(), + sk.serialize_to_vec().as_ref(), ) .map_err(ParseSecretKeyError::InvalidEncoding)?, )) @@ -207,7 +208,7 @@ impl RefTo for SecretKey { impl Display for SecretKey { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", HEXLOWER.encode(&self.try_to_vec().unwrap())) + write!(f, "{}", HEXLOWER.encode(&self.serialize_to_vec())) } } @@ -230,6 +231,7 @@ impl FromStr for SecretKey { Eq, PartialEq, PartialOrd, + Ord, Hash, Serialize, Deserialize, @@ -263,19 +265,19 @@ impl super::Signature for Signature { sig: &SIG, ) -> Result { if SIG::TYPE == Self::TYPE { - Self::try_from_slice(sig.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(sig.serialize_to_vec().as_slice()) .map_err(ParseSignatureError::InvalidEncoding) } else if SIG::TYPE == ed25519::Signature::TYPE { Ok(Self::Ed25519( ed25519::Signature::try_from_slice( - sig.try_to_vec().unwrap().as_slice(), + sig.serialize_to_vec().as_slice(), ) .map_err(ParseSignatureError::InvalidEncoding)?, )) } else if SIG::TYPE == secp256k1::Signature::TYPE { Ok(Self::Secp256k1( secp256k1::Signature::try_from_slice( - sig.try_to_vec().unwrap().as_slice(), + sig.serialize_to_vec().as_slice(), ) .map_err(ParseSignatureError::InvalidEncoding)?, )) diff --git a/core/src/types/key/dkg_session_keys.rs b/core/src/types/key/dkg_session_keys.rs index f2cafb639c..ccca82aeba 100644 --- a/core/src/types/key/dkg_session_keys.rs +++ b/core/src/types/key/dkg_session_keys.rs @@ -1,12 +1,14 @@ //! Utilities around the DKG session keys use std::cmp::Ordering; +use std::collections::BTreeMap; use std::fmt::Display; -use std::io::{Error, ErrorKind}; +use std::io::{Error, ErrorKind, Read}; use std::str::FromStr; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use serde::{Deserialize, Serialize}; @@ -51,8 +53,8 @@ impl BorshSerialize for DkgKeypair { } impl BorshDeserialize for DkgKeypair { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let kp_bytes: Vec = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let kp_bytes: Vec = BorshDeserialize::deserialize_reader(reader)?; let kp: ferveo_common::Keypair = CanonicalDeserialize::deserialize(kp_bytes.as_slice()) .map_err(|err| Error::new(ErrorKind::InvalidInput, err))?; @@ -111,8 +113,8 @@ impl BorshSerialize for DkgPublicKey { } impl BorshDeserialize for DkgPublicKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let pk_bytes: Vec = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let pk_bytes: Vec = BorshDeserialize::deserialize_reader(reader)?; let pk: ferveo_common::PublicKey = CanonicalDeserialize::deserialize(pk_bytes.as_slice()) .map_err(|err| Error::new(ErrorKind::InvalidInput, err))?; @@ -122,7 +124,7 @@ impl BorshDeserialize for DkgPublicKey { impl BorshSchema for DkgPublicKey { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -140,9 +142,7 @@ impl BorshSchema for DkgPublicKey { impl Display for DkgPublicKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let vec = self - .try_to_vec() - .expect("Encoding public key shouldn't fail"); + let vec = self.serialize_to_vec(); write!(f, "{}", HEXLOWER.encode(&vec)) } } diff --git a/core/src/types/key/ed25519.rs b/core/src/types/key/ed25519.rs index faf6076ea2..5fdee71cf9 100644 --- a/core/src/types/key/ed25519.rs +++ b/core/src/types/key/ed25519.rs @@ -1,11 +1,14 @@ //! Ed25519 keys and related functionality +use std::cmp::Ordering; +use std::collections::BTreeMap; use std::fmt::{Debug, Display}; use std::hash::{Hash, Hasher}; -use std::io::Write; +use std::io::{Read, Write}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; @@ -38,7 +41,7 @@ impl super::PublicKey for PublicKey { _ => Err(ParsePublicKeyError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParsePublicKeyError::InvalidEncoding) } else { Err(ParsePublicKeyError::MismatchedScheme) @@ -47,17 +50,17 @@ impl super::PublicKey for PublicKey { } impl BorshDeserialize for PublicKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { Ok(PublicKey( ed25519_consensus::VerificationKey::try_from( - <[u8; PUBLIC_KEY_LENGTH] as BorshDeserialize>::deserialize( - buf, + <[u8; PUBLIC_KEY_LENGTH] as BorshDeserialize>::deserialize_reader( + reader, )? - .as_ref(), + .as_ref(), ) - .map_err(|e| { - std::io::Error::new(std::io::ErrorKind::InvalidInput, e) - })?, + .map_err(|e| { + std::io::Error::new(std::io::ErrorKind::InvalidInput, e) + })?, )) } } @@ -70,7 +73,7 @@ impl BorshSerialize for PublicKey { impl BorshSchema for PublicKey { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -142,7 +145,7 @@ impl super::SecretKey for SecretKey { _ => Err(ParseSecretKeyError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParseSecretKeyError::InvalidEncoding) } else { Err(ParseSecretKeyError::MismatchedScheme) @@ -165,17 +168,17 @@ impl Clone for SecretKey { } impl BorshDeserialize for SecretKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { Ok(SecretKey(Box::new( ed25519_consensus::SigningKey::try_from( - <[u8; SECRET_KEY_LENGTH] as BorshDeserialize>::deserialize( - buf, + <[u8; SECRET_KEY_LENGTH] as BorshDeserialize>::deserialize_reader( + reader, )? - .as_ref(), + .as_ref(), ) - .map_err(|e| { - std::io::Error::new(std::io::ErrorKind::InvalidInput, e) - })?, + .map_err(|e| { + std::io::Error::new(std::io::ErrorKind::InvalidInput, e) + })?, ))) } } @@ -188,7 +191,7 @@ impl BorshSerialize for SecretKey { impl BorshSchema for SecretKey { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -245,7 +248,7 @@ impl super::Signature for Signature { _ => Err(ParseSignatureError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParseSignatureError::InvalidEncoding) } else { Err(ParseSignatureError::MismatchedScheme) @@ -254,10 +257,10 @@ impl super::Signature for Signature { } impl BorshDeserialize for Signature { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { Ok(Signature( ed25519_consensus::Signature::try_from( - <[u8; SIGNATURE_LENGTH] as BorshDeserialize>::deserialize(buf)? + <[u8; SIGNATURE_LENGTH] as BorshDeserialize>::deserialize_reader(reader)? .as_ref(), ) .map_err(|e| { @@ -275,7 +278,7 @@ impl BorshSerialize for Signature { impl BorshSchema for Signature { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -305,6 +308,12 @@ impl PartialOrd for Signature { } } +impl Ord for Signature { + fn cmp(&self, other: &Self) -> Ordering { + self.0.to_bytes().cmp(&other.0.to_bytes()) + } +} + /// An implementation of the Ed25519 signature scheme #[derive( Debug, diff --git a/core/src/types/key/mod.rs b/core/src/types/key/mod.rs index 1287956b13..9a497d4f64 100644 --- a/core/src/types/key/mod.rs +++ b/core/src/types/key/mod.rs @@ -11,6 +11,7 @@ use std::hash::Hash; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXUPPER; use lazy_map::LazyMap; use namada_macros::StorageKeys; @@ -211,7 +212,7 @@ pub trait Signature: sig: &SIG, ) -> Result { if SIG::TYPE == Self::TYPE { - let sig_arr = sig.try_to_vec().unwrap(); + let sig_arr = sig.serialize_to_vec(); let res = Self::try_from_slice(sig_arr.as_ref()); res.map_err(ParseSignatureError::InvalidEncoding) } else { @@ -247,7 +248,7 @@ pub trait PublicKey: pk: &PK, ) -> Result { if Self::TYPE == PK::TYPE { - let pk_arr = pk.try_to_vec().unwrap(); + let pk_arr = pk.serialize_to_vec(); let res = Self::try_from_slice(pk_arr.as_ref()); res.map_err(ParsePublicKeyError::InvalidEncoding) } else { @@ -283,7 +284,7 @@ pub trait SecretKey: sk: &SK, ) -> Result { if SK::TYPE == Self::TYPE { - let sk_vec = sk.try_to_vec().unwrap(); + let sk_vec = sk.serialize_to_vec(); let res = Self::try_from_slice(sk_vec.as_ref()); res.map_err(ParseSecretKeyError::InvalidEncoding) } else { @@ -440,8 +441,7 @@ pub enum PkhFromStringError { impl From<&PK> for PublicKeyHash { fn from(pk: &PK) -> Self { - let pk_bytes = - pk.try_to_vec().expect("Public key encoding shouldn't fail"); + let pk_bytes = pk.serialize_to_vec(); let full_hash = Sha256::digest(&pk_bytes); // take first 20 bytes of the hash let mut hash: [u8; PKH_LEN] = Default::default(); @@ -630,10 +630,7 @@ macro_rules! sigscheme_test { let mut rng: ThreadRng = thread_rng(); let keypair = <$type>::generate(&mut rng); - println!( - "keypair {:?}", - keypair.try_to_vec().unwrap().as_slice() - ); + println!("keypair {:?}", keypair.serialize_to_vec().as_slice()); } /// Run `cargo test gen_keypair -- --nocapture` to generate a /// new keypair. diff --git a/core/src/types/key/secp256k1.rs b/core/src/types/key/secp256k1.rs index 6fde8af5cd..35acb38626 100644 --- a/core/src/types/key/secp256k1.rs +++ b/core/src/types/key/secp256k1.rs @@ -1,13 +1,15 @@ //! secp256k1 keys and related functionality use std::cmp::Ordering; +use std::collections::BTreeMap; use std::fmt; use std::fmt::{Debug, Display}; use std::hash::{Hash, Hasher}; -use std::io::{ErrorKind, Write}; +use std::io::{ErrorKind, Read, Write}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use ethabi::ethereum_types::U256; use ethabi::Token; @@ -48,7 +50,7 @@ impl super::PublicKey for PublicKey { _ => Err(ParsePublicKeyError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParsePublicKeyError::InvalidEncoding) } else { Err(ParsePublicKeyError::MismatchedScheme) @@ -57,10 +59,15 @@ impl super::PublicKey for PublicKey { } impl BorshDeserialize for PublicKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { + use libsecp256k1::util::COMPRESSED_PUBLIC_KEY_SIZE; // deserialize the bytes first + + let buf: &mut [u8; COMPRESSED_PUBLIC_KEY_SIZE] = + &mut [0u8; COMPRESSED_PUBLIC_KEY_SIZE]; + reader.read_exact(buf)?; let pk = libsecp256k1::PublicKey::parse_compressed( - buf.get(0..libsecp256k1::util::COMPRESSED_PUBLIC_KEY_SIZE) + buf.get(0..COMPRESSED_PUBLIC_KEY_SIZE) .ok_or_else(|| std::io::Error::from(ErrorKind::UnexpectedEof))? .try_into() .unwrap(), @@ -71,7 +78,6 @@ impl BorshDeserialize for PublicKey { format!("Error decoding secp256k1 public key: {}", e), ) })?; - *buf = &buf[libsecp256k1::util::COMPRESSED_PUBLIC_KEY_SIZE..]; Ok(PublicKey(pk)) } } @@ -85,7 +91,7 @@ impl BorshSerialize for PublicKey { impl BorshSchema for PublicKey { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -184,7 +190,7 @@ impl super::SecretKey for SecretKey { _ => Err(ParseSecretKeyError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParseSecretKeyError::InvalidEncoding) } else { Err(ParseSecretKeyError::MismatchedScheme) @@ -216,11 +222,11 @@ impl<'de> Deserialize<'de> for SecretKey { } impl BorshDeserialize for SecretKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { // deserialize the bytes first Ok(SecretKey(Box::new( libsecp256k1::SecretKey::parse( - &(BorshDeserialize::deserialize(buf)?), + &(BorshDeserialize::deserialize_reader(reader)?), ) .map_err(|e| { std::io::Error::new( @@ -240,7 +246,7 @@ impl BorshSerialize for SecretKey { impl BorshSchema for SecretKey { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -297,7 +303,7 @@ impl super::Signature for Signature { _ => Err(ParseSignatureError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParseSignatureError::InvalidEncoding) } else { Err(ParseSignatureError::MismatchedScheme) @@ -371,9 +377,10 @@ impl<'de> Deserialize<'de> for Signature { } impl BorshDeserialize for Signature { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { // deserialize the bytes first - let (sig_bytes, recovery_id) = BorshDeserialize::deserialize(buf)?; + let (sig_bytes, recovery_id) = + BorshDeserialize::deserialize_reader(reader)?; Ok(Signature( libsecp256k1::Signature::parse_standard(&sig_bytes).map_err( @@ -405,7 +412,7 @@ impl BorshSerialize for Signature { impl BorshSchema for Signature { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -669,7 +676,7 @@ mod test { let to_sign = "test".as_bytes(); let mut signature = SigScheme::sign(&sk, to_sign); signature.1 = RecoveryId::parse(3).expect("Test failed"); - let sig_bytes = signature.try_to_vec().expect("Test failed"); + let sig_bytes = signature.serialize_to_vec(); let sig = Signature::try_from_slice(sig_bytes.as_slice()) .expect("Test failed"); assert_eq!(sig, signature); diff --git a/core/src/types/masp.rs b/core/src/types/masp.rs index e5ad0202b1..9083852a81 100644 --- a/core/src/types/masp.rs +++ b/core/src/types/masp.rs @@ -6,6 +6,7 @@ use std::str::FromStr; use bech32::{FromBase32, ToBase32}; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use sha2::{Digest, Sha256}; use crate::types::address::{ @@ -147,9 +148,7 @@ impl PaymentAddress { /// Hash this payment address pub fn hash(&self) -> String { - let bytes = (self.0, self.1) - .try_to_vec() - .expect("Payment address encoding shouldn't fail"); + let bytes = (self.0, self.1).serialize_to_vec(); let mut hasher = Sha256::new(); hasher.update(bytes); // hex of the first 40 chars of the hash diff --git a/core/src/types/storage.rs b/core/src/types/storage.rs index ad0c14f499..b4c6a595dd 100644 --- a/core/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -2,13 +2,14 @@ use std::collections::VecDeque; use std::convert::{TryFrom, TryInto}; use std::fmt::Display; -use std::io::Write; +use std::io::{Read, Write}; use std::num::ParseIntError; use std::ops::{Add, AddAssign, Deref, Div, Drop, Mul, Rem, Sub}; use std::str::FromStr; use arse_merkle_tree::InternalKey; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::{BASE32HEX_NOPAD, HEXUPPER}; use ics23::CommitmentProof; use index_set::vec::VecIndexSet; @@ -320,7 +321,7 @@ pub struct Header { impl Header { /// The number of bytes when this header is encoded pub fn encoded_len(&self) -> usize { - self.try_to_vec().unwrap().len() + self.serialize_to_vec().len() } } @@ -404,13 +405,13 @@ impl BorshSerialize for StringKey { } impl BorshDeserialize for StringKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { use std::io::ErrorKind; let (original, tree_key, length): ( Vec, InternalKey, usize, - ) = BorshDeserialize::deserialize(buf)?; + ) = BorshDeserialize::deserialize_reader(reader)?; let original: [u8; IBC_KEY_LIMIT] = original.try_into().map_err(|_| { std::io::Error::new( diff --git a/core/src/types/time.rs b/core/src/types/time.rs index 0670392f94..341a81411c 100644 --- a/core/src/types/time.rs +++ b/core/src/types/time.rs @@ -1,7 +1,9 @@ //! Types for dealing with time and durations. +use std::collections::BTreeMap; use std::convert::{TryFrom, TryInto}; use std::fmt::Display; +use std::io::Read; use std::ops::{Add, Sub}; use std::str::FromStr; @@ -183,9 +185,9 @@ impl BorshSerialize for DateTimeUtc { } impl BorshDeserialize for DateTimeUtc { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { use std::io::{Error, ErrorKind}; - let raw: String = BorshDeserialize::deserialize(buf)?; + let raw: String = BorshDeserialize::deserialize_reader(reader)?; let actual = DateTime::parse_from_rfc3339(&raw) .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; Ok(Self(actual.into())) @@ -194,7 +196,7 @@ impl BorshDeserialize for DateTimeUtc { impl BorshSchema for DateTimeUtc { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, diff --git a/core/src/types/token.rs b/core/src/types/token.rs index 0ee60b4326..289602ae23 100644 --- a/core/src/types/token.rs +++ b/core/src/types/token.rs @@ -749,6 +749,7 @@ impl From for Uint { )] #[repr(u8)] #[allow(missing_docs)] +#[borsh(use_discriminant = true)] pub enum MaspDenom { Zero = 0, One, diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index bbebc85e77..3a7ba2f335 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -7,6 +7,7 @@ pub mod decrypted_tx { #[cfg(feature = "ferveo-tpke")] use ark_ec::PairingEngine; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + use borsh_ext::BorshSerializeExt; use sha2::{Digest, Sha256}; #[derive( @@ -32,9 +33,7 @@ pub mod decrypted_tx { impl DecryptedTx { /// Produce a SHA-256 hash of this header pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize decrypted tx"), - ); + hasher.update(self.serialize_to_vec()); hasher } } diff --git a/core/src/types/transaction/encrypted.rs b/core/src/types/transaction/encrypted.rs index 277ec6d3fd..8b547fea88 100644 --- a/core/src/types/transaction/encrypted.rs +++ b/core/src/types/transaction/encrypted.rs @@ -3,7 +3,7 @@ /// *Not wasm compatible* #[cfg(feature = "ferveo-tpke")] pub mod encrypted_tx { - use std::io::{Error, ErrorKind, Write}; + use std::io::{Error, ErrorKind, Read, Write}; use ark_ec::PairingEngine; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -32,8 +32,10 @@ pub mod encrypted_tx { } impl borsh::de::BorshDeserialize for EncryptionKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let key: Vec = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + let key: Vec = BorshDeserialize::deserialize_reader(reader)?; Ok(EncryptionKey( CanonicalDeserialize::deserialize(&*key) .map_err(|err| Error::new(ErrorKind::InvalidData, err))?, diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 8acb9e6c7e..895f1c43fd 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -22,6 +22,7 @@ use std::collections::BTreeSet; use std::fmt; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; pub use decrypted::*; #[cfg(feature = "ferveo-tpke")] pub use encrypted::EncryptionKey; @@ -160,7 +161,7 @@ pub enum TxType { impl TxType { /// Produce a SHA-256 hash of this header pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update(self.try_to_vec().expect("unable to serialize header")); + hasher.update(self.serialize_to_vec()); hasher } } diff --git a/core/src/types/transaction/protocol.rs b/core/src/types/transaction/protocol.rs index 1a51434b29..78feafb3bd 100644 --- a/core/src/types/transaction/protocol.rs +++ b/core/src/types/transaction/protocol.rs @@ -24,10 +24,12 @@ pub struct UpdateDkgSessionKey { #[cfg(feature = "ferveo-tpke")] mod protocol_txs { - use std::io::{ErrorKind, Write}; + use std::collections::BTreeMap; + use std::io::{ErrorKind, Read, Write}; use std::path::Path; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + use borsh_ext::BorshSerializeExt; use ferveo::dkg::pv::Message; use serde_json; @@ -77,9 +79,7 @@ mod protocol_txs { /// Produce a SHA-256 hash of this section pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize protocol"), - ); + hasher.update(self.serialize_to_vec()); hasher } } @@ -213,7 +213,7 @@ mod protocol_txs { ( $( $type:ident ),* $(,)?) => { match self { $( EthereumTxData::$type(x) => - x.try_to_vec().map(|data| (data, ProtocolTxType::$type))),* + (x.serialize_to_vec(), ProtocolTxType::$type)),* } } } @@ -225,7 +225,6 @@ mod protocol_txs { BridgePoolVext, ValSetUpdateVext, } - .expect("Should be able to borsh-serialize tx data") } /// Deserialize Ethereum protocol transaction data. @@ -330,10 +329,7 @@ mod protocol_txs { }))); outer_tx.header.chain_id = chain_id; outer_tx.set_code(Code::new(code)); - outer_tx.set_data(Data::new( - data.try_to_vec() - .expect("Serializing request should not fail"), - )); + outer_tx.set_data(Data::new(data.serialize_to_vec())); outer_tx.add_section(Section::Signature(Signature::new( vec![ outer_tx.header_hash(), @@ -375,8 +371,10 @@ mod protocol_txs { } impl BorshDeserialize for DkgMessage { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let blob: Vec = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + let blob: Vec = BorshDeserialize::deserialize_reader(reader)?; let json = String::from_utf8(blob).map_err(|err| { std::io::Error::new(ErrorKind::InvalidData, err) })?; @@ -389,7 +387,7 @@ mod protocol_txs { impl BorshSchema for DkgMessage { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index e9b49b0c07..1064833925 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -10,6 +10,7 @@ pub mod wrapper_tx { #[cfg(feature = "ferveo-tpke")] pub use ark_ec::{AffineCurve, PairingEngine}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -225,9 +226,7 @@ pub mod wrapper_tx { /// Produce a SHA-256 hash of this section pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize wrapper"), - ); + hasher.update(self.serialize_to_vec()); hasher } @@ -305,12 +304,7 @@ pub mod wrapper_tx { key: None, shielded: Some(masp_hash), }; - let data = transfer.try_to_vec().map_err(|_| { - WrapperTxErr::InvalidUnshield( - "Error while serializing the unshield transfer data" - .to_string(), - ) - })?; + let data = transfer.serialize_to_vec(); tx.set_data(Data::new(data)); tx.set_code(Code::from_hash(transfer_code_hash)); @@ -349,7 +343,7 @@ pub mod wrapper_tx { ); // Test borsh roundtrip - let borsh = limit.try_to_vec().expect("Test failed"); + let borsh = limit.serialize_to_vec(); assert_eq!( limit, BorshDeserialize::deserialize(&mut borsh.as_ref()) diff --git a/core/src/types/vote_extensions/bridge_pool_roots.rs b/core/src/types/vote_extensions/bridge_pool_roots.rs index 5670d3967c..22718521f3 100644 --- a/core/src/types/vote_extensions/bridge_pool_roots.rs +++ b/core/src/types/vote_extensions/bridge_pool_roots.rs @@ -21,6 +21,7 @@ use crate::types::storage::BlockHeight; Clone, PartialEq, PartialOrd, + Ord, Eq, Hash, BorshSerialize, diff --git a/core/src/types/voting_power.rs b/core/src/types/voting_power.rs index a28eedc1a4..292fadb6f9 100644 --- a/core/src/types/voting_power.rs +++ b/core/src/types/voting_power.rs @@ -1,6 +1,8 @@ //! This module contains types related with validator voting power calculations. +use std::collections::BTreeMap; use std::fmt::{Display, Formatter}; +use std::io::Read; use std::iter::Sum; use std::ops::{Add, AddAssign, Mul}; @@ -247,26 +249,30 @@ impl BorshSerialize for FractionalVotingPower { } impl BorshDeserialize for FractionalVotingPower { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let (numer, denom): (Uint, Uint) = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let (numer, denom): (Uint, Uint) = + BorshDeserialize::deserialize_reader(reader)?; Ok(FractionalVotingPower(Ratio::::new(numer, denom))) } } impl BorshSchema for FractionalVotingPower { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, ) { - let fields = - borsh::schema::Fields::UnnamedFields(borsh::maybestd::vec![ - Uint::declaration(), - Uint::declaration() - ]); + let fields = borsh::schema::Fields::UnnamedFields(vec![ + Uint::declaration(), + Uint::declaration(), + ]); let definition = borsh::schema::Definition::Struct { fields }; - Self::add_definition(Self::declaration(), definition, definitions); + borsh::schema::add_definition( + Self::declaration(), + definition, + definitions, + ); } fn declaration() -> borsh::schema::Declaration { diff --git a/encoding_spec/src/main.rs b/encoding_spec/src/main.rs index 5889b03b8d..4365fd4a01 100644 --- a/encoding_spec/src/main.rs +++ b/encoding_spec/src/main.rs @@ -15,10 +15,12 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] -use std::collections::HashSet; +use std::collections::{BTreeMap, HashSet}; use std::io::Write; +use std::iter::Extend; -use borsh::{schema, BorshSchema}; +use borsh::schema::{BorshSchemaContainer, Declaration, Definition}; +use borsh::{schema, schema_container_of}; use itertools::Itertools; use lazy_static::lazy_static; use madato::types::TableRow; @@ -56,179 +58,188 @@ lazy_static! { ]); } +fn btree(b: &BorshSchemaContainer) -> BTreeMap { + b.definitions() + .map(|(x, y)| (x.clone(), y.clone())) + .collect() +} + fn main() -> Result<(), Box> { let mut file = std::fs::File::create(OUTPUT_PATH).unwrap(); write_generated_code_notice(&mut file)?; // Top-level definitions are displayed at the top - let address_schema = Address::schema_container(); - let token_amount_schema = token::Amount::schema_container(); - let epoch_schema = Epoch::schema_container(); - let parameters_schema = Parameters::schema_container(); + + let address_schema = schema_container_of::
(); + let token_amount_schema = schema_container_of::(); + let epoch_schema = schema_container_of::(); + let parameters_schema = schema_container_of::(); // TODO update after - let public_key_schema = PublicKey::schema_container(); + let public_key_schema = schema_container_of::(); // TODO update after - let signature_schema = Signature::schema_container(); + let signature_schema = schema_container_of::(); let init_account_schema = - transaction::account::InitAccount::schema_container(); + schema_container_of::(); let init_validator_schema = - transaction::pos::InitValidator::schema_container(); - let token_transfer_schema = token::Transfer::schema_container(); + schema_container_of::(); + let token_transfer_schema = schema_container_of::(); let update_account = - transaction::account::UpdateAccount::schema_container(); - let pos_bond_schema = pos::Bond::schema_container(); - let pos_withdraw_schema = pos::Withdraw::schema_container(); - let wrapper_tx_schema = transaction::WrapperTx::schema_container(); + schema_container_of::(); + let pos_bond_schema = schema_container_of::(); + let pos_withdraw_schema = schema_container_of::(); + let wrapper_tx_schema = schema_container_of::(); // TODO derive BorshSchema after - // let tx_result_schema = transaction::TxResult::schema_container(); - let tx_type_schema = transaction::TxType::schema_container(); - let prefix_value_schema = storage::PrefixValue::schema_container(); + // let tx_result_schema = schema_container_of::(); + let tx_type_schema = schema_container_of::(); + let prefix_value_schema = schema_container_of::(); // PoS // TODO add after // TODO imported from `use namada::ledger::pos::Bonds;` - // let pos_bonds_schema = Bonds::schema_container(); + // let pos_bonds_schema = schema_container_of::(); // Merge type definitions - let mut definitions = address_schema.definitions; + + let mut definitions = btree(&address_schema); + // TODO check for conflicts (same name, different declaration) - definitions.extend(token_amount_schema.definitions); - definitions.extend(epoch_schema.definitions); - definitions.extend(parameters_schema.definitions); - definitions.extend(public_key_schema.definitions); - definitions.extend(signature_schema.definitions); - definitions.extend(init_account_schema.definitions); - definitions.extend(init_validator_schema.definitions); - definitions.extend(token_transfer_schema.definitions); - definitions.extend(update_account.definitions); - definitions.extend(pos_bond_schema.definitions); - definitions.extend(pos_withdraw_schema.definitions); - definitions.extend(wrapper_tx_schema.definitions); - // definitions.extend(tx_result_schema.definitions); - definitions.extend(tx_type_schema.definitions); - definitions.extend(prefix_value_schema.definitions); - // definitions.extend(pos_bonds_schema.definitions); + definitions.extend(btree(&token_amount_schema)); + definitions.extend(btree(&epoch_schema)); + definitions.extend(btree(¶meters_schema)); + definitions.extend(btree(&public_key_schema)); + definitions.extend(btree(&signature_schema)); + definitions.extend(btree(&init_account_schema)); + definitions.extend(btree(&init_validator_schema)); + definitions.extend(btree(&token_transfer_schema)); + definitions.extend(btree(&update_account)); + definitions.extend(btree(&pos_bond_schema)); + definitions.extend(btree(&pos_withdraw_schema)); + definitions.extend(btree(&wrapper_tx_schema)); + // definitions.extend(btree(&tx_result_schema)); + definitions.extend(btree(&tx_type_schema)); + definitions.extend(btree(&prefix_value_schema)); + // definitions.extend(btree(&pos_bonds_schema)); let mut tables: Vec = Vec::with_capacity(definitions.len()); // Add the top-level definitions first let address_definition = - definitions.remove(&address_schema.declaration).unwrap(); + definitions.remove(address_schema.declaration()).unwrap(); let address_table = - definition_to_table(address_schema.declaration, address_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/address/enum.Address.html"); + definition_to_table( address_schema.declaration(), address_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/address/enum.Address.html"); tables.push(address_table); let token_amount_definition = definitions - .remove(&token_amount_schema.declaration) + .remove(token_amount_schema.declaration()) .unwrap(); let token_amount_table = definition_to_table( - token_amount_schema.declaration, + token_amount_schema.declaration(), token_amount_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/token/struct.Amount.html"); tables.push(token_amount_table); let epoch_definition = - definitions.remove(&epoch_schema.declaration).unwrap(); + definitions.remove(epoch_schema.declaration()).unwrap(); let epoch_table = - definition_to_table(epoch_schema.declaration, epoch_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/storage/struct.Epoch.html"); + definition_to_table(epoch_schema.declaration(), epoch_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/storage/struct.Epoch.html"); tables.push(epoch_table); let parameters_definition = - definitions.remove(¶meters_schema.declaration).unwrap(); + definitions.remove(parameters_schema.declaration()).unwrap(); let parameters_table = - definition_to_table(parameters_schema.declaration, parameters_definition).with_rust_doc_link("file:///Users/tz/dev/namada/target/doc/namada/ledger/parameters/struct.Parameters.html"); + definition_to_table(parameters_schema.declaration(), parameters_definition).with_rust_doc_link("file:///Users/tz/dev/namada/target/doc/namada/ledger/parameters/struct.Parameters.html"); tables.push(parameters_table); let public_key_definition = - definitions.remove(&public_key_schema.declaration).unwrap(); + definitions.remove(public_key_schema.declaration()).unwrap(); let public_key_table = - definition_to_table(public_key_schema.declaration, public_key_definition).with_rust_doc_link( + definition_to_table(public_key_schema.declaration(), public_key_definition).with_rust_doc_link( // TODO update after "https://dev.namada.net/master/rustdoc/namada/types/key/ed25519/struct.PublicKey.html"); tables.push(public_key_table); let signature_definition = - definitions.remove(&signature_schema.declaration).unwrap(); + definitions.remove(signature_schema.declaration()).unwrap(); let signature_table = - definition_to_table(signature_schema.declaration, signature_definition).with_rust_doc_link( + definition_to_table(signature_schema.declaration(), signature_definition).with_rust_doc_link( // TODO update after "https://dev.namada.net/master/rustdoc/namada/types/key/ed25519/struct.Signature.html"); tables.push(signature_table); let init_account_definition = definitions - .remove(&init_account_schema.declaration) + .remove(init_account_schema.declaration()) .unwrap(); let init_account_table = definition_to_table( - init_account_schema.declaration, + init_account_schema.declaration(), init_account_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/struct.InitAccount.html"); tables.push(init_account_table); let init_validator_definition = definitions - .remove(&init_validator_schema.declaration) + .remove(init_validator_schema.declaration()) .unwrap(); let init_validator_table = definition_to_table( - init_validator_schema.declaration, + init_validator_schema.declaration(), init_validator_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/struct.InitValidator.html"); tables.push(init_validator_table); let token_transfer_definition = definitions - .remove(&token_transfer_schema.declaration) + .remove(token_transfer_schema.declaration()) .unwrap(); let token_transfer_table = definition_to_table( - token_transfer_schema.declaration, + token_transfer_schema.declaration(), token_transfer_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/token/struct.Transfer.html"); tables.push(token_transfer_table); let update_account_definition = - definitions.remove(&update_account.declaration).unwrap(); + definitions.remove(update_account.declaration()).unwrap(); let update_accoun_table = - definition_to_table(update_account.declaration, update_account_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/struct.UpdateVp.html"); + definition_to_table(update_account.declaration(), update_account_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/struct.UpdateVp.html"); tables.push(update_accoun_table); let pos_bond_definition = - definitions.remove(&pos_bond_schema.declaration).unwrap(); + definitions.remove(pos_bond_schema.declaration()).unwrap(); let pos_bond_table = - definition_to_table(pos_bond_schema.declaration, pos_bond_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/pos/struct.Bond.html"); + definition_to_table(pos_bond_schema.declaration(), pos_bond_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/pos/struct.Bond.html"); tables.push(pos_bond_table); let pos_withdraw_definition = definitions - .remove(&pos_withdraw_schema.declaration) + .remove(pos_withdraw_schema.declaration()) .unwrap(); let pos_withdraw_table = definition_to_table( - pos_withdraw_schema.declaration, + pos_withdraw_schema.declaration(), pos_withdraw_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/pos/struct.Withdraw.html"); tables.push(pos_withdraw_table); let wrapper_tx_definition = - definitions.remove(&wrapper_tx_schema.declaration).unwrap(); + definitions.remove(wrapper_tx_schema.declaration()).unwrap(); let wrapper_tx_table = definition_to_table( - wrapper_tx_schema.declaration, + wrapper_tx_schema.declaration(), wrapper_tx_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/wrapper/wrapper_tx/struct.WrapperTx.html"); tables.push(wrapper_tx_table); // let tx_result_definition = - // definitions.remove(&tx_result_schema.declaration).unwrap(); + // definitions.remove(tx_result_schema.declaration()).unwrap(); // let tx_result_table = - // definition_to_table(tx_result_schema.declaration, + // definition_to_table(tx_result_schema.declaration(), // tx_result_definition).with_rust_doc_link("TODO"); // tables.push(tx_result_table); let tx_type_definition = - definitions.remove(&tx_type_schema.declaration).unwrap(); + definitions.remove(tx_type_schema.declaration()).unwrap(); let tx_type_table = - definition_to_table(tx_type_schema.declaration, tx_type_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/tx_types/enum.TxType.html"); + definition_to_table(tx_type_schema.declaration(), tx_type_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/tx_types/enum.TxType.html"); tables.push(tx_type_table); let prefix_value_definition = definitions - .remove(&prefix_value_schema.declaration) + .remove(prefix_value_schema.declaration()) .unwrap(); let prefix_value_table = - definition_to_table(prefix_value_schema.declaration, prefix_value_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/prefix_values/enum.TxType.html"); + definition_to_table(prefix_value_schema.declaration(), prefix_value_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/prefix_values/enum.TxType.html"); tables.push(prefix_value_table); // Add PoS definitions @@ -243,7 +254,7 @@ fn main() -> Result<(), Box> { .into_iter() .sorted_by_key(|(key, _val)| key.clone()) { - tables.push(definition_to_table(declaration, defition)) + tables.push(definition_to_table(&declaration, defition)) } // Print the tables to markdown @@ -271,7 +282,7 @@ struct Table { rows: Option>, } -fn definition_to_table(name: String, def: schema::Definition) -> Table { +fn definition_to_table(name: &Declaration, def: schema::Definition) -> Table { let (desc, rows) = match def { schema::Definition::Array { length, elements } => { let rows = None; @@ -345,7 +356,11 @@ fn definition_to_table(name: String, def: schema::Definition) -> Table { } } }; - Table { name, desc, rows } + Table { + name: name.to_string(), + desc, + rows, + } } /// Format a type to markdown. For internal types, adds anchors. diff --git a/ethereum_bridge/src/parameters.rs b/ethereum_bridge/src/parameters.rs index c86ef1e6ed..599ef0c5f8 100644 --- a/ethereum_bridge/src/parameters.rs +++ b/ethereum_bridge/src/parameters.rs @@ -367,6 +367,7 @@ where #[cfg(test)] mod tests { + use borsh_ext::BorshSerializeExt; use eyre::Result; use namada_core::ledger::storage::testing::TestWlStorage; use namada_core::types::ethereum_events::EthAddress; @@ -474,7 +475,7 @@ mod tests { wl_storage .write_bytes( &bridge_storage::min_confirmations_key(), - MinimumConfirmations::default().try_to_vec().unwrap(), + MinimumConfirmations::default().serialize_to_vec(), ) .unwrap(); diff --git a/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs b/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs index 3271efeed5..9cb4377cef 100644 --- a/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs +++ b/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs @@ -18,7 +18,6 @@ use crate::protocol::transactions::{utils, votes, ChangedKeys}; use crate::storage::eth_bridge_queries::EthBridgeQueries; use crate::storage::proof::BridgePoolRootProof; use crate::storage::vote_tallies::{self, BridgePoolRoot}; - /// Applies a tally of signatures on over the Ethereum /// bridge pool root and nonce. Note that every signature /// passed into this function will be for the same @@ -222,7 +221,8 @@ mod test_apply_bp_roots_to_storage { use std::collections::BTreeSet; use assert_matches::assert_matches; - use borsh::{BorshDeserialize, BorshSerialize}; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::eth_bridge::storage::bridge_pool::{ get_key_from_hash, get_nonce_key, }; @@ -281,7 +281,7 @@ mod test_apply_bp_roots_to_storage { &KeccakHash([1; 32]), 100.into(), ); - let value = BlockHeight(101).try_to_vec().expect("Test failed"); + let value = BlockHeight(101).serialize_to_vec(); wl_storage .storage .block @@ -289,10 +289,7 @@ mod test_apply_bp_roots_to_storage { .update(&get_key_from_hash(&KeccakHash([1; 32])), value) .expect("Test failed"); wl_storage - .write_bytes( - &get_nonce_key(), - Uint::from(42).try_to_vec().expect("Test failed"), - ) + .write_bytes(&get_nonce_key(), Uint::from(42).serialize_to_vec()) .expect("Test failed"); TestPackage { validators: [validator_a, validator_b, validator_c], diff --git a/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index d878a56d11..fa2be67104 100644 --- a/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -564,7 +564,7 @@ mod tests { use std::collections::HashMap; use assert_matches::assert_matches; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use eyre::Result; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_pending_key; use namada_core::ledger::parameters::{ @@ -702,7 +702,7 @@ mod tests { let key = get_pending_key(&transfer); wl_storage .storage - .write(&key, transfer.try_to_vec().expect("Test failed")) + .write(&key, transfer.serialize_to_vec()) .expect("Test failed"); pending_transfers.push(transfer); @@ -743,10 +743,7 @@ mod tests { let payer_key = balance_key(&transfer.gas_fee.token, &payer); let payer_balance = Amount::from(0); wl_storage - .write_bytes( - &payer_key, - payer_balance.try_to_vec().expect("Test failed"), - ) + .write_bytes(&payer_key, payer_balance.serialize_to_vec()) .expect("Test failed"); let escrow_key = balance_key(&transfer.gas_fee.token, &BRIDGE_POOL_ADDRESS); @@ -761,36 +758,24 @@ mod tests { let sender_key = balance_key(&nam(), &transfer.transfer.sender); let sender_balance = Amount::from(0); wl_storage - .write_bytes( - &sender_key, - sender_balance.try_to_vec().expect("Test failed"), - ) + .write_bytes(&sender_key, sender_balance.serialize_to_vec()) .expect("Test failed"); let escrow_key = balance_key(&nam(), &BRIDGE_ADDRESS); let escrow_balance = Amount::from(10); wl_storage - .write_bytes( - &escrow_key, - escrow_balance.try_to_vec().expect("Test failed"), - ) + .write_bytes(&escrow_key, escrow_balance.serialize_to_vec()) .expect("Test failed"); } else { let token = transfer.token_address(); let sender_key = balance_key(&token, &transfer.transfer.sender); let sender_balance = Amount::from(0); wl_storage - .write_bytes( - &sender_key, - sender_balance.try_to_vec().expect("Test failed"), - ) + .write_bytes(&sender_key, sender_balance.serialize_to_vec()) .expect("Test failed"); let escrow_key = balance_key(&token, &BRIDGE_POOL_ADDRESS); let escrow_balance = Amount::from(10); wl_storage - .write_bytes( - &escrow_key, - escrow_balance.try_to_vec().expect("Test failed"), - ) + .write_bytes(&escrow_key, escrow_balance.serialize_to_vec()) .expect("Test failed"); update::amount( wl_storage, @@ -1162,7 +1147,7 @@ mod tests { let key = get_pending_key(&transfer); wl_storage .storage - .write(&key, transfer.try_to_vec().expect("Test failed")) + .write(&key, transfer.serialize_to_vec()) .expect("Test failed"); wl_storage .storage diff --git a/ethereum_bridge/src/protocol/transactions/read.rs b/ethereum_bridge/src/protocol/transactions/read.rs index 550be149ca..257c045e33 100644 --- a/ethereum_bridge/src/protocol/transactions/read.rs +++ b/ethereum_bridge/src/protocol/transactions/read.rs @@ -55,7 +55,7 @@ where #[cfg(test)] mod tests { use assert_matches::assert_matches; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::storage::testing::TestWlStorage; use namada_core::ledger::storage_api::StorageWrite; use namada_core::types::storage; @@ -81,7 +81,7 @@ mod tests { let amount = Amount::from(1_000_000); let mut fake_storage = TestWlStorage::default(); fake_storage - .write_bytes(&key, amount.try_to_vec().unwrap()) + .write_bytes(&key, amount.serialize_to_vec()) .unwrap(); let amt = read::amount_or_default(&fake_storage, &key).unwrap(); diff --git a/ethereum_bridge/src/protocol/transactions/update.rs b/ethereum_bridge/src/protocol/transactions/update.rs index 8316a72df4..69530c0315 100644 --- a/ethereum_bridge/src/protocol/transactions/update.rs +++ b/ethereum_bridge/src/protocol/transactions/update.rs @@ -18,7 +18,7 @@ where { let mut amount = super::read::amount_or_default(wl_storage, key)?; update(&mut amount); - wl_storage.write_bytes(key, amount.try_to_vec()?)?; + wl_storage.write_bytes(key, borsh::to_vec(&amount)?)?; Ok(amount) } @@ -35,13 +35,14 @@ where { let mut value = super::read::value(wl_storage, key)?; update(&mut value); - wl_storage.write_bytes(key, value.try_to_vec()?)?; + wl_storage.write_bytes(key, borsh::to_vec(&value)?)?; Ok(value) } #[cfg(test)] mod tests { - use borsh::{BorshDeserialize, BorshSerialize}; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use eyre::{eyre, Result}; use namada_core::ledger::storage::testing::TestWlStorage; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; @@ -52,9 +53,9 @@ mod tests { fn test_value() -> Result<()> { let key = storage::Key::parse("some arbitrary key") .expect("could not set up test"); - let value = 21; + let value = 21u64; let mut wl_storage = TestWlStorage::default(); - let serialized = value.try_to_vec().expect("could not set up test"); + let serialized = value.serialize_to_vec(); wl_storage .write_bytes(&key, serialized) .expect("could not set up test"); diff --git a/ethereum_bridge/src/protocol/transactions/votes/storage.rs b/ethereum_bridge/src/protocol/transactions/votes/storage.rs index 832797ae1b..fe43c37c80 100644 --- a/ethereum_bridge/src/protocol/transactions/votes/storage.rs +++ b/ethereum_bridge/src/protocol/transactions/votes/storage.rs @@ -1,4 +1,5 @@ use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use eyre::{Result, WrapErr}; use namada_core::hints; use namada_core::ledger::storage::{ @@ -23,16 +24,19 @@ where H: 'static + StorageHasher + Sync, T: BorshSerialize, { - wl_storage.write_bytes(&keys.body(), &body.try_to_vec()?)?; - wl_storage.write_bytes(&keys.seen(), &tally.seen.try_to_vec()?)?; - wl_storage.write_bytes(&keys.seen_by(), &tally.seen_by.try_to_vec()?)?; + wl_storage.write_bytes(&keys.body(), &body.serialize_to_vec())?; + wl_storage.write_bytes(&keys.seen(), &tally.seen.serialize_to_vec())?; wl_storage - .write_bytes(&keys.voting_power(), &tally.voting_power.try_to_vec()?)?; + .write_bytes(&keys.seen_by(), &tally.seen_by.serialize_to_vec())?; + wl_storage.write_bytes( + &keys.voting_power(), + &tally.voting_power.serialize_to_vec(), + )?; if !already_present { // add the current epoch for the inserted event wl_storage.write_bytes( &keys.voting_started_epoch(), - &wl_storage.storage.get_current_epoch().0.try_to_vec()?, + &wl_storage.storage.get_current_epoch().0.serialize_to_vec(), )?; } Ok(()) @@ -205,28 +209,18 @@ mod tests { assert!(result.is_ok()); let body = wl_storage.read_bytes(&keys.body()).unwrap(); - assert_eq!(body, Some(event.try_to_vec().unwrap())); + assert_eq!(body, Some(event.serialize_to_vec())); let seen = wl_storage.read_bytes(&keys.seen()).unwrap(); - assert_eq!(seen, Some(tally.seen.try_to_vec().unwrap())); + assert_eq!(seen, Some(tally.seen.serialize_to_vec())); let seen_by = wl_storage.read_bytes(&keys.seen_by()).unwrap(); - assert_eq!(seen_by, Some(tally.seen_by.try_to_vec().unwrap())); + assert_eq!(seen_by, Some(tally.seen_by.serialize_to_vec())); let voting_power = wl_storage.read_bytes(&keys.voting_power()).unwrap(); - assert_eq!( - voting_power, - Some(tally.voting_power.try_to_vec().unwrap()) - ); + assert_eq!(voting_power, Some(tally.voting_power.serialize_to_vec())); let epoch = wl_storage.read_bytes(&keys.voting_started_epoch()).unwrap(); assert_eq!( epoch, - Some( - wl_storage - .storage - .get_current_epoch() - .0 - .try_to_vec() - .unwrap() - ) + Some(wl_storage.storage.get_current_epoch().0.serialize_to_vec()) ); } @@ -249,29 +243,24 @@ mod tests { seen: false, }; wl_storage - .write_bytes(&keys.body(), &event.try_to_vec().unwrap()) + .write_bytes(&keys.body(), &event.serialize_to_vec()) .unwrap(); wl_storage - .write_bytes(&keys.seen(), &tally.seen.try_to_vec().unwrap()) + .write_bytes(&keys.seen(), &tally.seen.serialize_to_vec()) .unwrap(); wl_storage - .write_bytes(&keys.seen_by(), &tally.seen_by.try_to_vec().unwrap()) + .write_bytes(&keys.seen_by(), &tally.seen_by.serialize_to_vec()) .unwrap(); wl_storage .write_bytes( &keys.voting_power(), - &tally.voting_power.try_to_vec().unwrap(), + &tally.voting_power.serialize_to_vec(), ) .unwrap(); wl_storage .write_bytes( &keys.voting_started_epoch(), - &wl_storage - .storage - .get_block_height() - .0 - .try_to_vec() - .unwrap(), + &wl_storage.storage.get_block_height().0.serialize_to_vec(), ) .unwrap(); diff --git a/ethereum_bridge/src/storage/vote_tallies.rs b/ethereum_bridge/src/storage/vote_tallies.rs index ec03c498d6..edb60114e8 100644 --- a/ethereum_bridge/src/storage/vote_tallies.rs +++ b/ethereum_bridge/src/storage/vote_tallies.rs @@ -1,6 +1,6 @@ //! Functionality for accessing keys to do with tallying votes -use std::io::Write; +use std::io::{Read, Write}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; @@ -199,11 +199,11 @@ impl BorshSerialize for BridgePoolRoot { } impl BorshDeserialize for BridgePoolRoot { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - as BorshDeserialize>::deserialize( - buf, + fn deserialize_reader(reader: &mut R) -> std::io::Result { + as BorshDeserialize>::deserialize_reader( + reader, ) - .map(BridgePoolRoot) + .map(BridgePoolRoot) } } diff --git a/ethereum_bridge/src/test_utils.rs b/ethereum_bridge/src/test_utils.rs index 9c24e9edfa..4d2283ad8e 100644 --- a/ethereum_bridge/src/test_utils.rs +++ b/ethereum_bridge/src/test_utils.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use std::num::NonZeroU64; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; use namada_core::ledger::eth_bridge::storage::whitelist; use namada_core::ledger::storage::mockdb::MockDBWriteBatch; @@ -241,7 +241,7 @@ pub fn commit_bridge_pool_root_at_height( root: &KeccakHash, height: BlockHeight, ) { - let value = height.try_to_vec().expect("Encoding failed"); + let value = height.serialize_to_vec(); storage .block .tree diff --git a/ethereum_bridge/src/vp.rs b/ethereum_bridge/src/vp.rs index 1c06de83a1..ed678ff03b 100644 --- a/ethereum_bridge/src/vp.rs +++ b/ethereum_bridge/src/vp.rs @@ -1,4 +1,4 @@ -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use namada_core::ledger::storage::{self as ledger_storage, StorageHasher}; use namada_core::ledger::storage_api::StorageWrite; use namada_core::types::token::{balance_key, Amount}; @@ -17,12 +17,7 @@ where &namada_core::ledger::eth_bridge::ADDRESS, ); wl_storage - .write_bytes( - &escrow_key, - Amount::default() - .try_to_vec() - .expect("Serializing an amount shouldn't fail."), - ) + .write_bytes(&escrow_key, Amount::default().serialize_to_vec()) .expect( "Initializing the escrow balance of the Ethereum Bridge VP \ shouldn't fail.", diff --git a/proof_of_stake/src/pos_queries.rs b/proof_of_stake/src/pos_queries.rs index 190548570b..a629afce73 100644 --- a/proof_of_stake/src/pos_queries.rs +++ b/proof_of_stake/src/pos_queries.rs @@ -1,7 +1,7 @@ //! Storage API for querying data about Proof-of-stake related //! data. This includes validator and epoch related data. -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; use namada_core::ledger::parameters::storage::get_max_proposal_bytes_key; use namada_core::ledger::parameters::EpochDuration; use namada_core::ledger::storage::WlStorage; @@ -172,9 +172,8 @@ where pk: &key::common::PublicKey, epoch: Option, ) -> Result { - let pk_bytes = pk - .try_to_vec() - .expect("Serializing public key should not fail"); + let pk_bytes = + borsh::to_vec(pk).expect("Serializing public key should not fail"); let epoch = epoch .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); self.get_consensus_validators(Some(epoch)) diff --git a/shared/src/ledger/eth_bridge/bridge_pool.rs b/shared/src/ledger/eth_bridge/bridge_pool.rs index b9573cab97..ad1e1702a1 100644 --- a/shared/src/ledger/eth_bridge/bridge_pool.rs +++ b/shared/src/ledger/eth_bridge/bridge_pool.rs @@ -5,7 +5,7 @@ use std::cmp::Ordering; use std::collections::HashMap; use std::sync::Arc; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; @@ -270,7 +270,7 @@ where } } - let data = args.try_to_vec().unwrap(); + let data = args.serialize_to_vec(); let response = RPC .shell() .eth_bridge() diff --git a/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index 5efea15ac5..6d07f5a613 100644 --- a/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -641,7 +641,8 @@ where mod test_bridge_pool_vp { use std::env::temp_dir; - use borsh::BorshSerialize; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_signed_root_key; use namada_core::ledger::gas::TxGasMeter; use namada_core::types::address; @@ -756,11 +757,11 @@ mod test_bridge_pool_vp { let mut writelog = WriteLog::default(); // setup the initial bridge pool storage writelog - .write(&get_signed_root_key(), Hash([0; 32]).try_to_vec().unwrap()) + .write(&get_signed_root_key(), Hash([0; 32]).serialize_to_vec()) .expect("Test failed"); let transfer = initial_pool(); writelog - .write(&get_pending_key(&transfer), transfer.try_to_vec().unwrap()) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .expect("Test failed"); // whitelist wnam let key = whitelist::Key { @@ -769,7 +770,7 @@ mod test_bridge_pool_vp { } .into(); writelog - .write(&key, true.try_to_vec().unwrap()) + .write(&key, true.serialize_to_vec()) .expect("Test failed"); let key = whitelist::Key { asset: wnam(), @@ -777,7 +778,7 @@ mod test_bridge_pool_vp { } .into(); writelog - .write(&key, Amount::max().try_to_vec().unwrap()) + .write(&key, Amount::max().serialize_to_vec()) .expect("Test failed"); // set up users with ERC20 and NUT balances update_balances( @@ -843,10 +844,7 @@ mod test_bridge_pool_vp { // write the changes to the log let account_key = balance_key(&nam(), &balance.owner); write_log - .write( - &account_key, - updated_balance.try_to_vec().expect("Test failed"), - ) + .write(&account_key, updated_balance.serialize_to_vec()) .expect("Test failed"); // changed keys @@ -889,10 +887,10 @@ mod test_bridge_pool_vp { // write the changes to the log write_log - .write(&account_key, new_gas_balance.try_to_vec().unwrap()) + .write(&account_key, new_gas_balance.serialize_to_vec()) .expect("Test failed"); write_log - .write(&token_key, new_token_balance.try_to_vec().unwrap()) + .write(&token_key, new_token_balance.serialize_to_vec()) .expect("Test failed"); // return the keys changed @@ -1059,7 +1057,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1080,7 +1078,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1101,7 +1099,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1122,7 +1120,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1144,7 +1142,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1165,7 +1163,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1186,7 +1184,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1207,7 +1205,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1254,7 +1252,7 @@ mod test_bridge_pool_vp { payer: bertha_address(), }, }; - log.write(&get_pending_key(transfer), t.try_to_vec().unwrap()) + log.write(&get_pending_key(transfer), t.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, @@ -1286,7 +1284,7 @@ mod test_bridge_pool_vp { payer: bertha_address(), }, }; - log.write(&get_pending_key(&t), transfer.try_to_vec().unwrap()) + log.write(&get_pending_key(&t), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, @@ -1306,7 +1304,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([ @@ -1333,10 +1331,7 @@ mod test_bridge_pool_vp { let mut keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1418,10 +1413,7 @@ mod test_bridge_pool_vp { let mut keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1486,10 +1478,7 @@ mod test_bridge_pool_vp { let mut keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1500,9 +1489,7 @@ mod test_bridge_pool_vp { .write_log .write( &account_key, - Amount::from(BERTHA_WEALTH - 200) - .try_to_vec() - .expect("Test failed"), + Amount::from(BERTHA_WEALTH - 200).serialize_to_vec(), ) .expect("Test failed"); assert!(keys_changed.insert(account_key)); @@ -1511,9 +1498,7 @@ mod test_bridge_pool_vp { .write_log .write( &bp_account_key, - Amount::from(ESCROWED_AMOUNT + 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); assert!(keys_changed.insert(bp_account_key)); @@ -1521,9 +1506,7 @@ mod test_bridge_pool_vp { .write_log .write( &eb_account_key, - Amount::from(ESCROWED_AMOUNT + 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); assert!(keys_changed.insert(eb_account_key)); @@ -1580,10 +1563,7 @@ mod test_bridge_pool_vp { let keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1594,9 +1574,7 @@ mod test_bridge_pool_vp { .write_log .write( &account_key, - Amount::from(BERTHA_WEALTH - 200) - .try_to_vec() - .expect("Test failed"), + Amount::from(BERTHA_WEALTH - 200).serialize_to_vec(), ) .expect("Test failed"); let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); @@ -1604,17 +1582,12 @@ mod test_bridge_pool_vp { .write_log .write( &bp_account_key, - Amount::from(ESCROWED_AMOUNT + 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); wl_storage .write_log - .write( - &eb_account_key, - Amount::from(10).try_to_vec().expect("Test failed"), - ) + .write(&eb_account_key, Amount::from(10).serialize_to_vec()) .expect("Test failed"); let verifiers = BTreeSet::default(); @@ -1649,10 +1622,7 @@ mod test_bridge_pool_vp { let eb_account_key = balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); wl_storage - .write_bytes( - &eb_account_key, - Amount::default().try_to_vec().expect("Test failed"), - ) + .write_bytes(&eb_account_key, Amount::default().serialize_to_vec()) .expect("Test failed"); // initialize the gas payers account let gas_payer_balance_key = @@ -1660,9 +1630,7 @@ mod test_bridge_pool_vp { wl_storage .write_bytes( &gas_payer_balance_key, - Amount::from(BERTHA_WEALTH) - .try_to_vec() - .expect("Test failed"), + Amount::from(BERTHA_WEALTH).serialize_to_vec(), ) .expect("Test failed"); wl_storage.write_log.commit_tx(); @@ -1688,10 +1656,7 @@ mod test_bridge_pool_vp { let keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1702,18 +1667,14 @@ mod test_bridge_pool_vp { .write_log .write( &account_key, - Amount::from(BERTHA_WEALTH - 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(BERTHA_WEALTH - 100).serialize_to_vec(), ) .expect("Test failed"); wl_storage .write_log .write( &gas_payer_balance_key, - Amount::from(BERTHA_WEALTH - 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(BERTHA_WEALTH - 100).serialize_to_vec(), ) .expect("Test failed"); let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); @@ -1721,17 +1682,12 @@ mod test_bridge_pool_vp { .write_log .write( &bp_account_key, - Amount::from(ESCROWED_AMOUNT + 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); wl_storage .write_log - .write( - &eb_account_key, - Amount::from(10).try_to_vec().expect("Test failed"), - ) + .write(&eb_account_key, Amount::from(10).serialize_to_vec()) .expect("Test failed"); let verifiers = BTreeSet::default(); // create the data to be given to the vp @@ -1780,10 +1736,7 @@ mod test_bridge_pool_vp { let mut keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1868,7 +1821,7 @@ mod test_bridge_pool_vp { transfer.transfer.asset = wnam(); log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1890,7 +1843,7 @@ mod test_bridge_pool_vp { transfer.transfer.asset = wnam(); log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) diff --git a/shared/src/ledger/native_vp/ethereum_bridge/nut.rs b/shared/src/ledger/native_vp/ethereum_bridge/nut.rs index 1f7f313521..6dbb79d788 100644 --- a/shared/src/ledger/native_vp/ethereum_bridge/nut.rs +++ b/shared/src/ledger/native_vp/ethereum_bridge/nut.rs @@ -121,7 +121,7 @@ mod test_nuts { use std::env::temp_dir; use assert_matches::assert_matches; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::storage::testing::TestWlStorage; use namada_core::ledger::storage_api::StorageWrite; use namada_core::types::address::testing::arb_non_internal_address; @@ -157,13 +157,13 @@ mod test_nuts { wl.write_log .write( &src_balance_key, - Amount::from(100_u64).try_to_vec().expect("Test failed"), + Amount::from(100_u64).serialize_to_vec(), ) .expect("Test failed"); wl.write_log .write( &dst_balance_key, - Amount::from(200_u64).try_to_vec().expect("Test failed"), + Amount::from(200_u64).serialize_to_vec(), ) .expect("Test failed"); diff --git a/shared/src/ledger/native_vp/ethereum_bridge/vp.rs b/shared/src/ledger/native_vp/ethereum_bridge/vp.rs index 4d006229a1..9f5f6dd19c 100644 --- a/shared/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/shared/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -164,7 +164,7 @@ mod tests { use std::default::Default; use std::env::temp_dir; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::eth_bridge; use namada_core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; @@ -220,8 +220,7 @@ mod tests { .write_bytes( &balance_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -365,8 +364,7 @@ mod tests { .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -379,8 +377,7 @@ mod tests { Amount::from( BRIDGE_POOL_ESCROW_INITIAL_BALANCE + ESCROW_AMOUNT, ) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -417,8 +414,7 @@ mod tests { .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -429,8 +425,7 @@ mod tests { .write( &escrow_key, Amount::from(BRIDGE_POOL_ESCROW_INITIAL_BALANCE) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -468,8 +463,7 @@ mod tests { .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -482,8 +476,7 @@ mod tests { Amount::from( BRIDGE_POOL_ESCROW_INITIAL_BALANCE + ESCROW_AMOUNT, ) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); diff --git a/shared/src/ledger/native_vp/ibc/context.rs b/shared/src/ledger/native_vp/ibc/context.rs index 5af2afebf7..519e1ae233 100644 --- a/shared/src/ledger/native_vp/ibc/context.rs +++ b/shared/src/ledger/native_vp/ibc/context.rs @@ -2,7 +2,7 @@ use std::collections::{BTreeSet, HashMap, HashSet}; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use namada_core::ledger::ibc::storage::is_ibc_key; use namada_core::ledger::ibc::{IbcCommonContext, IbcStorageContext}; use namada_core::ledger::storage::write_log::StorageModification; @@ -150,14 +150,8 @@ where .unwrap_or_default(); dest_bal.receive(&amount.amount); - self.write( - &src_key, - src_bal.try_to_vec().expect("encoding shouldn't failed"), - )?; - self.write( - &dest_key, - dest_bal.try_to_vec().expect("encoding shouldn't failed"), - ) + self.write(&src_key, src_bal.serialize_to_vec())?; + self.write(&dest_key, dest_bal.serialize_to_vec()) } fn mint_token( @@ -182,21 +176,13 @@ where .unwrap_or_default(); minted_bal.receive(&amount.amount); - self.write( - &target_key, - target_bal.try_to_vec().expect("encoding shouldn't failed"), - )?; - self.write( - &minted_key, - minted_bal.try_to_vec().expect("encoding shouldn't failed"), - )?; + self.write(&target_key, target_bal.serialize_to_vec())?; + self.write(&minted_key, minted_bal.serialize_to_vec())?; let minter_key = token::minter_key(token); self.write( &minter_key, - Address::Internal(InternalAddress::Ibc) - .try_to_vec() - .expect("encoding shouldn't failed"), + Address::Internal(InternalAddress::Ibc).serialize_to_vec(), ) } @@ -222,14 +208,8 @@ where .unwrap_or_default(); minted_bal.spend(&amount.amount); - self.write( - &target_key, - target_bal.try_to_vec().expect("encoding shouldn't failed"), - )?; - self.write( - &minted_key, - minted_bal.try_to_vec().expect("encoding shouldn't failed"), - ) + self.write(&target_key, target_bal.serialize_to_vec())?; + self.write(&minted_key, minted_bal.serialize_to_vec()) } /// Get the current height of this chain diff --git a/shared/src/ledger/native_vp/ibc/mod.rs b/shared/src/ledger/native_vp/ibc/mod.rs index 3b6521905b..f370e79823 100644 --- a/shared/src/ledger/native_vp/ibc/mod.rs +++ b/shared/src/ledger/native_vp/ibc/mod.rs @@ -291,7 +291,7 @@ mod tests { use std::convert::TryFrom; use std::str::FromStr; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::gas::TxGasMeter; use prost::Message; use sha2::Digest; @@ -423,7 +423,7 @@ mod tests { }; wl_storage .write_log - .write(&epoch_duration_key, epoch_duration.try_to_vec().unwrap()) + .write(&epoch_duration_key, epoch_duration.serialize_to_vec()) .expect("write failed"); // max_expected_time_per_block let time = DurationSecs::from(Duration::new(60, 0)); @@ -1004,7 +1004,7 @@ mod tests { // client connection list let client_conn_key = client_connections_key(&msg.client_id_on_a); let conn_list = conn_id.to_string(); - let bytes = conn_list.try_to_vec().expect("encoding failed"); + let bytes = conn_list.serialize_to_vec(); wl_storage .write_log .write(&client_conn_key, bytes) @@ -1115,7 +1115,7 @@ mod tests { // client connection list let client_conn_key = client_connections_key(&msg.client_id_on_a); let conn_list = conn_id.to_string(); - let bytes = conn_list.try_to_vec().expect("encoding failed"); + let bytes = conn_list.serialize_to_vec(); wl_storage .write_log .write(&client_conn_key, bytes) @@ -1230,7 +1230,7 @@ mod tests { // client connection list let client_conn_key = client_connections_key(&msg.client_id_on_b); let conn_list = conn_id.to_string(); - let bytes = conn_list.try_to_vec().expect("encoding failed"); + let bytes = conn_list.serialize_to_vec(); wl_storage .write_log .write(&client_conn_key, bytes) @@ -1981,7 +1981,7 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&balance_key, amount.try_to_vec().unwrap()) + .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); wl_storage.write_log.commit_tx(); wl_storage.commit_block().expect("commit failed"); @@ -2192,7 +2192,7 @@ mod tests { )); let trace_hash = calc_hash(coin.denom.to_string()); let denom_key = ibc_denom_key(&trace_hash); - let bytes = coin.denom.to_string().try_to_vec().unwrap(); + let bytes = coin.denom.to_string().serialize_to_vec(); wl_storage .write_log .write(&denom_key, bytes) @@ -2455,7 +2455,7 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&balance_key, amount.try_to_vec().unwrap()) + .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); // commitment let transfer_msg = MsgTransfer { @@ -2606,7 +2606,7 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&balance_key, amount.try_to_vec().unwrap()) + .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); // commitment let sender = established_address_1(); diff --git a/shared/src/ledger/native_vp/multitoken.rs b/shared/src/ledger/native_vp/multitoken.rs index 564024fd8f..d3782c9548 100644 --- a/shared/src/ledger/native_vp/multitoken.rs +++ b/shared/src/ledger/native_vp/multitoken.rs @@ -139,7 +139,7 @@ where mod tests { use std::collections::BTreeSet; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::gas::TxGasMeter; use super::*; @@ -187,14 +187,14 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .storage - .write(&sender_key, amount.try_to_vec().unwrap()) + .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); // transfer 10 let amount = Amount::native_whole(90); wl_storage .write_log - .write(&sender_key, amount.try_to_vec().unwrap()) + .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(sender_key); let receiver = established_address_2(); @@ -202,7 +202,7 @@ mod tests { let amount = Amount::native_whole(10); wl_storage .write_log - .write(&receiver_key, amount.try_to_vec().unwrap()) + .write(&receiver_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(receiver_key); @@ -243,14 +243,14 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .storage - .write(&sender_key, amount.try_to_vec().unwrap()) + .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); // transfer 10 let amount = Amount::native_whole(90); wl_storage .write_log - .write(&sender_key, amount.try_to_vec().unwrap()) + .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(sender_key); let receiver = established_address_2(); @@ -259,7 +259,7 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&receiver_key, amount.try_to_vec().unwrap()) + .write(&receiver_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(receiver_key); @@ -303,14 +303,14 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&target_key, amount.try_to_vec().unwrap()) + .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); wl_storage .write_log - .write(&minted_key, amount.try_to_vec().unwrap()) + .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -319,7 +319,7 @@ mod tests { let minter_key = minter_key(&token); wl_storage .write_log - .write(&minter_key, minter.try_to_vec().unwrap()) + .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); @@ -363,14 +363,14 @@ mod tests { let amount = Amount::native_whole(1000); wl_storage .write_log - .write(&target_key, amount.try_to_vec().unwrap()) + .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&nam()); let amount = Amount::native_whole(100); wl_storage .write_log - .write(&minted_key, amount.try_to_vec().unwrap()) + .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -379,7 +379,7 @@ mod tests { let minter_key = minter_key(&nam()); wl_storage .write_log - .write(&minter_key, minter.try_to_vec().unwrap()) + .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); @@ -425,14 +425,14 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&target_key, amount.try_to_vec().unwrap()) + .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); wl_storage .write_log - .write(&minted_key, amount.try_to_vec().unwrap()) + .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -478,14 +478,14 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&target_key, amount.try_to_vec().unwrap()) + .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); wl_storage .write_log - .write(&minted_key, amount.try_to_vec().unwrap()) + .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -494,7 +494,7 @@ mod tests { let minter_key = minter_key(&token); wl_storage .write_log - .write(&minter_key, minter.try_to_vec().unwrap()) + .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); @@ -535,7 +535,7 @@ mod tests { let minter = established_address_1(); wl_storage .write_log - .write(&minter_key, minter.try_to_vec().unwrap()) + .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); @@ -580,7 +580,7 @@ mod tests { .unwrap(); wl_storage .write_log - .write(&key, 0.try_to_vec().unwrap()) + .write(&key, 0.serialize_to_vec()) .expect("write failed"); keys_changed.insert(key); diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index a23b026eea..6100f0cd56 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -2,7 +2,7 @@ use std::collections::BTreeSet; use std::panic; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use eyre::{eyre, WrapErr}; use masp_primitives::transaction::Transaction; use namada_core::ledger::gas::TxGasMeter; @@ -510,12 +510,12 @@ where Some(new_dest_balance) => { wl_storage .write_log_mut() - .write(&src_key, new_src_balance.try_to_vec().unwrap()) + .write(&src_key, new_src_balance.serialize_to_vec()) .map_err(|e| Error::FeeError(e.to_string()))?; - match wl_storage.write_log_mut().write( - &dest_key, - new_dest_balance.try_to_vec().unwrap(), - ) { + match wl_storage + .write_log_mut() + .write(&dest_key, new_dest_balance.serialize_to_vec()) + { Ok(_) => Ok(()), Err(e) => Err(Error::FeeError(e.to_string())), } diff --git a/shared/src/ledger/queries/router.rs b/shared/src/ledger/queries/router.rs index 799a34e5bd..4ed2ffa161 100644 --- a/shared/src/ledger/queries/router.rs +++ b/shared/src/ledger/queries/router.rs @@ -90,7 +90,7 @@ macro_rules! handle_match { // queries::Storage`, you're probably missing the marker `(sub _)` let data = $handle($ctx, $( $matched_args ),* )?; // Encode the returned data with borsh - let data = borsh::BorshSerialize::try_to_vec(&data).into_storage_result()?; + let data = borsh::to_vec(&data).into_storage_result()?; return Ok($crate::ledger::queries::EncodedResponseQuery { data, info: Default::default(), @@ -834,13 +834,14 @@ macro_rules! router { /// ``` #[cfg(test)] mod test_rpc_handlers { - use borsh::BorshSerialize; + + use borsh_ext::BorshSerializeExt; use crate::ledger::queries::{ EncodedResponseQuery, RequestCtx, RequestQuery, ResponseQuery, }; use crate::ledger::storage::{DBIter, StorageHasher, DB}; - use crate::ledger::storage_api::{self, ResultExt}; + use crate::ledger::storage_api; use crate::types::storage::Epoch; use crate::types::token; @@ -949,7 +950,7 @@ mod test_rpc_handlers { D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let data = "c".to_owned().try_to_vec().into_storage_result()?; + let data = "c".to_owned().serialize_to_vec(); Ok(ResponseQuery { data, ..ResponseQuery::default() diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index a766846916..4965028672 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -1,6 +1,7 @@ pub(super) mod eth_bridge; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::Node; @@ -188,7 +189,7 @@ where data.gas_used = cumulated_gas; // NOTE: the keys changed by the wrapper transaction (if any) are not // returned from this function - let data = data.try_to_vec().into_storage_result()?; + let data = data.serialize_to_vec(); Ok(EncodedResponseQuery { data, proof: None, @@ -415,7 +416,7 @@ where } else { None }; - let data = data.try_to_vec().into_storage_result()?; + let data = data.serialize_to_vec(); Ok(EncodedResponseQuery { data, proof, @@ -561,7 +562,8 @@ where #[cfg(test)] mod test { - use borsh::{BorshDeserialize, BorshSerialize}; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use namada_test_utils::TestWasms; use crate::ledger::queries::testing::TestClient; @@ -609,7 +611,7 @@ mod test { client .wl_storage .storage - .write(&len_key, (tx_no_op.len() as u64).try_to_vec().unwrap()) + .write(&len_key, (tx_no_op.len() as u64).serialize_to_vec()) .unwrap(); // Request last committed epoch diff --git a/shared/src/ledger/queries/shell/eth_bridge.rs b/shared/src/ledger/queries/shell/eth_bridge.rs index 0bbc0aa679..bfd3e939f2 100644 --- a/shared/src/ledger/queries/shell/eth_bridge.rs +++ b/shared/src/ledger/queries/shell/eth_bridge.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; use namada_core::ledger::storage::merkle_tree::StoreRef; use namada_core::ledger::storage::{DBIter, StorageHasher, StoreType, DB}; @@ -425,7 +426,7 @@ where )), appendices: with_appendix.then_some(appendices), }; - let data = rsp.try_to_vec().into_storage_result()?; + let data = rsp.serialize_to_vec(); Ok(EncodedResponseQuery { data, ..Default::default() @@ -671,7 +672,6 @@ mod test_ethbridge_router { use std::collections::BTreeMap; use assert_matches::assert_matches; - use borsh::BorshSerialize; use namada_core::ledger::eth_bridge::storage::bridge_pool::{ get_pending_key, get_signed_root_key, BridgePoolTree, }; @@ -915,7 +915,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -958,7 +958,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -977,7 +977,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1023,7 +1023,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -1044,7 +1044,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1053,9 +1053,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_signed_root_key(), - (signed_root.clone(), BlockHeight::from(0)) - .try_to_vec() - .unwrap(), + (signed_root.clone(), BlockHeight::from(0)).serialize_to_vec(), ) .expect("Test failed"); @@ -1074,8 +1072,7 @@ mod test_ethbridge_router { relayer: Cow::Owned(bertha_address()), with_appendix: false, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ), None, false, @@ -1141,7 +1138,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -1166,7 +1163,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1175,7 +1172,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_signed_root_key(), - (signed_root, BlockHeight::from(0)).try_to_vec().unwrap(), + (signed_root, BlockHeight::from(0)).serialize_to_vec(), ) .expect("Test failed"); @@ -1199,8 +1196,7 @@ mod test_ethbridge_router { relayer: Cow::Owned(bertha_address()), with_appendix: false, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ), None, false, @@ -1237,7 +1233,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -1258,7 +1254,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1267,7 +1263,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_signed_root_key(), - (signed_root, BlockHeight::from(0)).try_to_vec().unwrap(), + (signed_root, BlockHeight::from(0)).serialize_to_vec(), ) .expect("Test failed"); @@ -1311,7 +1307,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -1326,10 +1322,7 @@ mod test_ethbridge_router { let voting_power = FractionalVotingPower::HALF; client .wl_storage - .write_bytes( - ð_msg_key.body(), - eth_event.try_to_vec().expect("Test failed"), - ) + .write_bytes(ð_msg_key.body(), eth_event.serialize_to_vec()) .expect("Test failed"); client .wl_storage @@ -1339,8 +1332,7 @@ mod test_ethbridge_router { 0.into(), voting_power * dummy_validator_stake, )]) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); client @@ -1362,7 +1354,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1414,7 +1406,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -1435,7 +1427,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1444,7 +1436,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_signed_root_key(), - (signed_root, BlockHeight::from(0)).try_to_vec().unwrap(), + (signed_root, BlockHeight::from(0)).serialize_to_vec(), ) .expect("Test failed"); @@ -1463,8 +1455,7 @@ mod test_ethbridge_router { relayer: Cow::Owned(bertha_address()), with_appendix: false, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ), None, false, @@ -1490,8 +1481,7 @@ mod test_ethbridge_router { relayer: Cow::Owned(bertha_address()), with_appendix: false, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ), None, false, diff --git a/shared/src/sdk/masp.rs b/shared/src/sdk/masp.rs index 739f941b9a..83a1eeec74 100644 --- a/shared/src/sdk/masp.rs +++ b/shared/src/sdk/masp.rs @@ -10,6 +10,7 @@ use std::path::PathBuf; // use async_std::io::prelude::WriteExt; // use async_std::io::{self}; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use itertools::Either; use masp_primitives::asset_type::AssetType; #[cfg(feature = "mainnet")] @@ -563,7 +564,7 @@ pub type TransactionDelta = HashMap; #[derive(BorshSerialize, BorshDeserialize, Debug)] pub struct ShieldedContext { /// Location where this shielded context is saved - #[borsh_skip] + #[borsh(skip)] pub utils: U, /// The last transaction index to be processed in this context pub last_txidx: u64, @@ -1590,12 +1591,8 @@ impl ShieldedContext { "source address should be transparent".to_string(), ) })? - .try_to_vec() - .map_err(|_| { - Error::from(EncodingError::Encode( - "source address".to_string(), - )) - })?; + .serialize_to_vec(); + let hash = ripemd::Ripemd160::digest(sha2::Sha256::digest( source_enc.as_ref(), )); @@ -1639,12 +1636,7 @@ impl ShieldedContext { "source address should be transparent".to_string(), ) })? - .try_to_vec() - .map_err(|_| { - Error::from(EncodingError::Encode( - "target address".to_string(), - )) - })?; + .serialize_to_vec(); let hash = ripemd::Ripemd160::digest(sha2::Sha256::digest( target_enc.as_ref(), )); @@ -1735,10 +1727,9 @@ impl ShieldedContext { let builder_clone = builder.clone().map_builder(WalletMap); #[cfg(feature = "testing")] - let builder_bytes = BorshSerialize::try_to_vec(&builder_clone) - .map_err(|e| { - Error::from(EncodingError::Conversion(e.to_string())) - })?; + let builder_bytes = borsh::to_vec(&builder_clone).map_err(|e| { + Error::from(EncodingError::Conversion(e.to_string())) + })?; let build_transfer = || -> Result> { @@ -1798,12 +1789,9 @@ impl ShieldedContext { // Build and return the constructed transaction let built = build_transfer()?; if let LoadOrSaveProofs::Save = load_or_save { - let built_bytes = BorshSerialize::try_to_vec(&built) - .map_err(|e| { - Error::from(EncodingError::Conversion( - e.to_string(), - )) - })?; + let built_bytes = borsh::to_vec(&built).map_err(|e| { + Error::from(EncodingError::Conversion(e.to_string())) + })?; tokio::fs::write(&saved_filepath, built_bytes) .await .map_err(|e| Error::Other(e.to_string()))?; @@ -1971,12 +1959,8 @@ pub fn make_asset_type( ) -> Result { // Typestamp the chosen token with the current epoch let token_bytes = match epoch { - None => (token, denom) - .try_to_vec() - .map_err(|e| Error::from(EncodingError::Encode(e.to_string())))?, - Some(epoch) => (token, denom, epoch.0) - .try_to_vec() - .map_err(|e| Error::from(EncodingError::Encode(e.to_string())))?, + None => (token, denom).serialize_to_vec(), + Some(epoch) => (token, denom, epoch.0).serialize_to_vec(), }; // Generate the unique asset identifier from the unique token address AssetType::new(token_bytes.as_ref()) diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 042be03a63..e5078a3949 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -2,7 +2,8 @@ use std::collections::{BTreeMap, HashMap}; use std::path::PathBuf; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use itertools::Itertools; use masp_primitives::asset_type::AssetType; @@ -897,9 +898,7 @@ pub async fn to_ledger_vector< .collect(); let mut tv = LedgerVector { - blob: HEXLOWER.encode(&tx.try_to_vec().map_err(|_| { - Error::Other("unable to serialize transaction".to_string()) - })?), + blob: HEXLOWER.encode(&tx.serialize_to_vec()), index: 0, valid: true, name: "Custom 0".to_string(), diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 9d7fe0cfe4..175a1a8643 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -2231,8 +2231,6 @@ where } fn proposal_to_vec(proposal: OnChainProposal) -> Result> { - proposal - .content - .try_to_vec() + borsh::to_vec(&proposal.content) .map_err(|e| Error::from(EncodingError::Conversion(e.to_string()))) } diff --git a/shared/src/sdk/wallet/keys.rs b/shared/src/sdk/wallet/keys.rs index 867a2b1ad0..44a0779d26 100644 --- a/shared/src/sdk/wallet/keys.rs +++ b/shared/src/sdk/wallet/keys.rs @@ -5,6 +5,7 @@ use std::marker::PhantomData; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use orion::{aead, kdf}; use serde::{Deserialize, Serialize}; @@ -201,9 +202,7 @@ impl EncryptedKeypair { let salt = encryption_salt(); let encryption_key = encryption_key(&salt, &password); - let data = keypair - .try_to_vec() - .expect("Serializing keypair shouldn't fail"); + let data = keypair.serialize_to_vec(); let encrypted_keypair = aead::seal(&encryption_key, &data) .expect("Encryption of data shouldn't fail"); diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index 7806d1abef..5b8133000b 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -5,6 +5,7 @@ use std::convert::TryInto; use std::num::TryFromIntError; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use namada_core::ledger::gas::{GasMetering, TxGasMeter}; use namada_core::types::internal::KeyVal; use thiserror::Error; @@ -727,11 +728,10 @@ where tx_charge_gas(env, iter_gas + log_gas)?; match log_val { Some(write_log::StorageModification::Write { ref value }) => { - let key_val = KeyVal { + let key_val = borsh::to_vec(&KeyVal { key, val: value.clone(), - } - .try_to_vec() + }) .map_err(TxRuntimeError::EncodingError)?; let len: i64 = key_val .len() @@ -750,11 +750,10 @@ where continue; } Some(write_log::StorageModification::Temp { ref value }) => { - let key_val = KeyVal { + let key_val = borsh::to_vec(&KeyVal { key, val: value.clone(), - } - .try_to_vec() + }) .map_err(TxRuntimeError::EncodingError)?; let len: i64 = key_val .len() @@ -765,8 +764,7 @@ where return Ok(len); } None => { - let key_val = KeyVal { key, val } - .try_to_vec() + let key_val = borsh::to_vec(&KeyVal { key, val }) .map_err(TxRuntimeError::EncodingError)?; let len: i64 = key_val .len() @@ -995,7 +993,7 @@ where for event in write_log.get_ibc_events() { if event.event_type == event_type { let value = - event.try_to_vec().map_err(TxRuntimeError::EncodingError)?; + borsh::to_vec(event).map_err(TxRuntimeError::EncodingError)?; let len: i64 = value .len() .try_into() @@ -1341,8 +1339,7 @@ where if let Some(iter) = iterators.get_mut(iter_id) { let gas_meter = unsafe { env.ctx.gas_meter.get() }; if let Some((key, val)) = vp_host_fns::iter_next(gas_meter, iter)? { - let key_val = KeyVal { key, val } - .try_to_vec() + let key_val = borsh::to_vec(&KeyVal { key, val }) .map_err(vp_host_fns::RuntimeError::EncodingError)?; let len: i64 = key_val .len() @@ -1452,8 +1449,7 @@ where let code_hash = Hash::try_from(&code_hash[..]) .map_err(|e| TxRuntimeError::InvalidVpCodeHash(e.to_string()))?; let (addr, gas) = write_log.init_account(&storage.address_gen, code_hash); - let addr_bytes = - addr.try_to_vec().map_err(TxRuntimeError::EncodingError)?; + let addr_bytes = addr.serialize_to_vec(); tx_charge_gas(env, gas)?; let gas = env .memory @@ -1616,8 +1612,7 @@ where .map_err(TxRuntimeError::StorageError)?; Ok(match header { Some(h) => { - let value = - h.try_to_vec().map_err(TxRuntimeError::EncodingError)?; + let value = h.serialize_to_vec(); let len: i64 = value .len() .try_into() @@ -1692,9 +1687,7 @@ where vp_host_fns::add_gas(gas_meter, gas)?; Ok(match header { Some(h) => { - let value = h - .try_to_vec() - .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let value = h.serialize_to_vec(); let len: i64 = value .len() .try_into() @@ -2391,7 +2384,7 @@ where H: StorageHasher, CA: WasmCacheAccess, { - let bytes = val.try_to_vec().map_err(TxRuntimeError::EncodingError)?; + let bytes = borsh::to_vec(val).map_err(TxRuntimeError::EncodingError)?; namada_core::ledger::ibc::IbcStorageContext::write(ctx, key, bytes)?; Ok(()) } diff --git a/shared/src/vm/wasm/memory.rs b/shared/src/vm/wasm/memory.rs index 3e0c7975c9..9b04160556 100644 --- a/shared/src/vm/wasm/memory.rs +++ b/shared/src/vm/wasm/memory.rs @@ -5,7 +5,7 @@ use std::ptr::NonNull; use std::str::Utf8Error; use std::sync::Arc; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use namada_core::ledger::gas::VM_MEMORY_ACCESS_GAS_PER_BYTE; use thiserror::Error; use wasmer::{ @@ -86,7 +86,7 @@ pub fn write_tx_inputs( tx_data: &Tx, ) -> Result { let tx_data_ptr = 0; - let tx_data_bytes = tx_data.try_to_vec().map_err(Error::EncodingError)?; + let tx_data_bytes = tx_data.serialize_to_vec(); let tx_data_len = tx_data_bytes.len() as _; write_memory_bytes(memory, tx_data_ptr, tx_data_bytes)?; @@ -129,20 +129,18 @@ pub fn write_vp_inputs( }: VpInput, ) -> Result { let addr_ptr = 0; - let addr_bytes = addr.try_to_vec().map_err(Error::EncodingError)?; + let addr_bytes = addr.serialize_to_vec(); let addr_len = addr_bytes.len() as _; - let data_bytes = data.try_to_vec().map_err(Error::EncodingError)?; + let data_bytes = data.serialize_to_vec(); let data_ptr = addr_ptr + addr_len; let data_len = data_bytes.len() as _; - let keys_changed_bytes = - keys_changed.try_to_vec().map_err(Error::EncodingError)?; + let keys_changed_bytes = keys_changed.serialize_to_vec(); let keys_changed_ptr = data_ptr + data_len; let keys_changed_len = keys_changed_bytes.len() as _; - let verifiers_bytes = - verifiers.try_to_vec().map_err(Error::EncodingError)?; + let verifiers_bytes = verifiers.serialize_to_vec(); let verifiers_ptr = keys_changed_ptr + keys_changed_len; let verifiers_len = verifiers_bytes.len() as _; diff --git a/shared/src/vm/wasm/run.rs b/shared/src/vm/wasm/run.rs index 7678ceb434..9740469b95 100644 --- a/shared/src/vm/wasm/run.rs +++ b/shared/src/vm/wasm/run.rs @@ -552,7 +552,7 @@ fn get_gas_rules() -> wasm_instrument::gas_metering::ConstantCostRules { #[cfg(test)] mod tests { - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use itertools::Either; use namada_test_utils::TestWasms; use test_log::test; @@ -631,7 +631,7 @@ mod tests { let code_hash = Hash::sha256(&tx_code); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - let code_len = (tx_code.len() as u64).try_to_vec().unwrap(); + let code_len = (tx_code.len() as u64).serialize_to_vec(); write_log.write(&key, tx_code.clone()).unwrap(); write_log.write(&len_key, code_len).unwrap(); @@ -640,7 +640,7 @@ mod tests { // Allocating `2^23` (8 MiB) should be below the memory limit and // shouldn't fail - let tx_data = 2_usize.pow(23).try_to_vec().unwrap(); + let tx_data = 2_usize.pow(23).serialize_to_vec(); let (mut vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let (mut tx_cache, _) = @@ -661,7 +661,7 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail - let tx_data = 2_usize.pow(24).try_to_vec().unwrap(); + let tx_data = 2_usize.pow(24).serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); @@ -700,7 +700,7 @@ mod tests { let code_hash = Hash::sha256(&vp_eval); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - let code_len = (vp_eval.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_eval.len() as u64).serialize_to_vec(); storage.write(&key, vp_eval).unwrap(); storage.write(&len_key, code_len).unwrap(); // This code will allocate memory of the given size @@ -709,7 +709,7 @@ mod tests { let limit_code_hash = Hash::sha256(&vp_memory_limit); let key = Key::wasm_code(&limit_code_hash); let len_key = Key::wasm_code_len(&limit_code_hash); - let code_len = (vp_memory_limit.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_memory_limit.len() as u64).serialize_to_vec(); storage.write(&key, vp_memory_limit).unwrap(); storage.write(&len_key, code_len).unwrap(); @@ -718,7 +718,7 @@ mod tests { // Allocating `2^23` (8 MiB) should be below the memory limit and // shouldn't fail - let input = 2_usize.pow(23).try_to_vec().unwrap(); + let input = 2_usize.pow(23).serialize_to_vec(); let mut tx = Tx::new(storage.chain_id.clone(), None); tx.add_code(vec![]).add_serialized_data(input); @@ -751,7 +751,7 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail - let input = 2_usize.pow(24).try_to_vec().unwrap(); + let input = 2_usize.pow(24).serialize_to_vec(); let mut tx = Tx::new(storage.chain_id.clone(), None); tx.add_code(vec![]).add_data(input); @@ -801,7 +801,7 @@ mod tests { let vp_code = TestWasms::VpMemoryLimit.read_bytes(); // store the wasm code let code_hash = Hash::sha256(&vp_code); - let code_len = (vp_code.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); storage.write(&key, vp_code).unwrap(); @@ -812,7 +812,7 @@ mod tests { // Allocating `2^23` (8 MiB) should be below the memory limit and // shouldn't fail - let tx_data = 2_usize.pow(23).try_to_vec().unwrap(); + let tx_data = 2_usize.pow(23).serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.header.chain_id = storage.chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); @@ -834,7 +834,7 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail - let tx_data = 2_usize.pow(24).try_to_vec().unwrap(); + let tx_data = 2_usize.pow(24).serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.header.chain_id = storage.chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); @@ -869,7 +869,7 @@ mod tests { let code_hash = Hash::sha256(&tx_no_op); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - let code_len = (tx_no_op.len() as u64).try_to_vec().unwrap(); + let code_len = (tx_no_op.len() as u64).serialize_to_vec(); write_log.write(&key, tx_no_op.clone()).unwrap(); write_log.write(&len_key, code_len).unwrap(); @@ -934,7 +934,7 @@ mod tests { let code_hash = Hash::sha256(&vp_code); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - let code_len = (vp_code.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_code.len() as u64).serialize_to_vec(); storage.write(&key, vp_code).unwrap(); storage.write(&len_key, code_len).unwrap(); @@ -996,7 +996,7 @@ mod tests { let tx_read_key = TestWasms::TxReadStorageKey.read_bytes(); // store the wasm code let code_hash = Hash::sha256(&tx_read_key); - let code_len = (tx_read_key.len() as u64).try_to_vec().unwrap(); + let code_len = (tx_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); write_log.write(&key, tx_read_key.clone()).unwrap(); @@ -1012,8 +1012,8 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.try_to_vec().unwrap()).unwrap(); - let tx_data = key.try_to_vec().unwrap(); + storage.write(&key, value.serialize_to_vec()).unwrap(); + let tx_data = key.serialize_to_vec(); let (mut vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let (mut tx_cache, _) = @@ -1053,7 +1053,7 @@ mod tests { let vp_read_key = TestWasms::VpReadStorageKey.read_bytes(); // store the wasm code let code_hash = Hash::sha256(&vp_read_key); - let code_len = (vp_read_key.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); storage.write(&key, vp_read_key).unwrap(); @@ -1069,8 +1069,8 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.try_to_vec().unwrap()).unwrap(); - let tx_data = key.try_to_vec().unwrap(); + storage.write(&key, value.serialize_to_vec()).unwrap(); + let tx_data = key.serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.header.chain_id = storage.chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); @@ -1113,7 +1113,7 @@ mod tests { let vp_eval = TestWasms::VpEval.read_bytes(); // store the wasm code let code_hash = Hash::sha256(&vp_eval); - let code_len = (vp_eval.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_eval.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); storage.write(&key, vp_eval).unwrap(); @@ -1122,7 +1122,7 @@ mod tests { let vp_read_key = TestWasms::VpReadStorageKey.read_bytes(); // store the wasm code let read_code_hash = Hash::sha256(&vp_read_key); - let code_len = (vp_read_key.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&read_code_hash); let len_key = Key::wasm_code_len(&read_code_hash); storage.write(&key, vp_read_key).unwrap(); @@ -1138,8 +1138,8 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.try_to_vec().unwrap()).unwrap(); - let input = 2_usize.pow(23).try_to_vec().unwrap(); + storage.write(&key, value.serialize_to_vec()).unwrap(); + let input = 2_usize.pow(23).serialize_to_vec(); let mut tx = Tx::new(storage.chain_id.clone(), None); tx.add_code(vec![]).add_serialized_data(input); @@ -1216,7 +1216,7 @@ mod tests { // store the tx code let code_hash = Hash::sha256(&tx_code); - let code_len = (tx_code.len() as u64).try_to_vec().unwrap(); + let code_len = (tx_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); write_log.write(&key, tx_code).unwrap(); @@ -1279,7 +1279,7 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); // store the vp code let code_hash = Hash::sha256(&vp_code); - let code_len = (vp_code.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); storage.write(&key, vp_code).unwrap(); diff --git a/tests/src/e2e/eth_bridge_tests/helpers.rs b/tests/src/e2e/eth_bridge_tests/helpers.rs index a52273366c..04047a1722 100644 --- a/tests/src/e2e/eth_bridge_tests/helpers.rs +++ b/tests/src/e2e/eth_bridge_tests/helpers.rs @@ -55,7 +55,7 @@ impl EventsEndpointClient { /// Sends an Ethereum event to the Namada node. Returns `Ok` iff the event /// was successfully sent. pub async fn send(&mut self, event: &EthereumEvent) -> Result<()> { - let event = event.try_to_vec()?; + let event = event.serialize_to_vec()?; let req = Request::builder() .method(Method::POST) diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 2f5bbe4ea7..eef959c834 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -16,7 +16,7 @@ use std::str::FromStr; use std::sync::Arc; use std::time::{Duration, Instant}; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use color_eyre::eyre::Result; use data_encoding::HEXLOWER; use namada::types::address::Address; @@ -437,8 +437,7 @@ fn ledger_txs_and_queries() -> Result<()> { key: None, shielded: None, } - .try_to_vec() - .unwrap(); + .serialize_to_vec(); let tx_data_path = test.test_dir.path().join("tx.data"); std::fs::write(&tx_data_path, transfer).unwrap(); let tx_data_path = tx_data_path.to_string_lossy(); @@ -665,7 +664,7 @@ fn ledger_txs_and_queries() -> Result<()> { &validator_one_rpc, ], // expect hex encoded of borsh encoded bytes - HEXLOWER.encode(&christel_balance.try_to_vec().unwrap()), + HEXLOWER.encode(&christel_balance.serialize_to_vec()), ), ]; for (query_args, expected) in &query_args_and_expected_response { diff --git a/tests/src/e2e/multitoken_tests/helpers.rs b/tests/src/e2e/multitoken_tests/helpers.rs index 7ce90bcc87..0856691dd5 100644 --- a/tests/src/e2e/multitoken_tests/helpers.rs +++ b/tests/src/e2e/multitoken_tests/helpers.rs @@ -102,9 +102,9 @@ pub fn mint_red_tokens( test, TxWriteData { key: red_balance_key, - value: amount.try_to_vec()?, + value: amount.serialize_to_vec()?, } - .try_to_vec()?, + .serialize_to_vec()?, )?; let tx_data_path = tx_data_path.to_string_lossy().to_string(); diff --git a/tests/src/native_vp/eth_bridge_pool.rs b/tests/src/native_vp/eth_bridge_pool.rs index 364dcd074c..90452bc52f 100644 --- a/tests/src/native_vp/eth_bridge_pool.rs +++ b/tests/src/native_vp/eth_bridge_pool.rs @@ -2,7 +2,8 @@ mod test_bridge_pool_vp { use std::path::PathBuf; - use borsh::{BorshDeserialize, BorshSerialize}; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use namada::core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; use namada::ledger::eth_bridge::{ wrapped_erc20s, Contracts, Erc20WhitelistEntry, EthereumBridgeConfig, @@ -124,7 +125,7 @@ mod test_bridge_pool_vp { } fn create_tx(transfer: PendingTransfer, keypair: &common::SecretKey) -> Tx { - let data = transfer.try_to_vec().expect("Test failed"); + let data = transfer.serialize_to_vec(); let wasm_code = wasm_loader::read_wasm_or_exit(wasm_dir(), ADD_TRANSFER_WASM); diff --git a/tests/src/vm_host_env/ibc.rs b/tests/src/vm_host_env/ibc.rs index b8d88961ea..25f8d09fcf 100644 --- a/tests/src/vm_host_env/ibc.rs +++ b/tests/src/vm_host_env/ibc.rs @@ -85,7 +85,7 @@ use namada::types::token::{self, Amount, DenominatedAmount}; use namada::vm::{wasm, WasmCacheRwAccess}; use namada_core::ledger::gas::TxGasMeter; use namada_test_utils::TestWasms; -use namada_tx_prelude::BorshSerialize; +use namada_tx_prelude::borsh_ext::BorshSerializeExt; use crate::tx::*; @@ -243,11 +243,11 @@ pub fn init_storage() -> (Address, Address) { tx_host_env::with(|env| { env.wl_storage .storage - .write(&denom_key, &token_denom.try_to_vec().unwrap()) + .write(&denom_key, &token_denom.serialize_to_vec()) .unwrap(); env.wl_storage .storage - .write(&key, &init_bal.try_to_vec().unwrap()) + .write(&key, &init_bal.serialize_to_vec()) .unwrap(); }); @@ -257,7 +257,7 @@ pub fn init_storage() -> (Address, Address) { min_num_of_blocks: 10, min_duration: DurationSecs(100), }; - let bytes = epoch_duration.try_to_vec().unwrap(); + let bytes = epoch_duration.serialize_to_vec(); tx_host_env::with(|env| { env.wl_storage.storage.write(&key, &bytes).unwrap(); }); diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index 68ebd76dff..e95513364e 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -21,6 +21,7 @@ mod tests { use std::collections::BTreeSet; use std::panic; + use borsh_ext::BorshSerializeExt; use itertools::Itertools; use namada::ibc::core::Msg; use namada::ledger::ibc::storage as ibc_storage; @@ -41,9 +42,7 @@ mod tests { use namada_test_utils::TestWasms; use namada_tx_prelude::address::InternalAddress; use namada_tx_prelude::chain::ChainId; - use namada_tx_prelude::{ - Address, BorshSerialize, StorageRead, StorageWrite, - }; + use namada_tx_prelude::{Address, StorageRead, StorageWrite}; use namada_vp_prelude::account::AccountPublicKeysMap; use namada_vp_prelude::VpEnv; use prost::Message; @@ -577,7 +576,7 @@ mod tests { // evaluating the VP template which always returns `true` should pass let code = TestWasms::VpAlwaysTrue.read_bytes(); let code_hash = Hash::sha256(&code); - let code_len = (code.len() as u64).try_to_vec().unwrap(); + let code_len = (code.len() as u64).serialize_to_vec(); vp_host_env::with(|env| { // store wasm codes let key = Key::wasm_code(&code_hash); @@ -600,7 +599,7 @@ mod tests { // pass let code = TestWasms::VpAlwaysFalse.read_bytes(); let code_hash = Hash::sha256(&code); - let code_len = (code.len() as u64).try_to_vec().unwrap(); + let code_len = (code.len() as u64).serialize_to_vec(); vp_host_env::with(|env| { // store wasm codes let key = Key::wasm_code(&code_hash); @@ -1250,20 +1249,18 @@ mod tests { let ibc_token = ibc_storage::ibc_token(&denom); let balance_key = token::balance_key(&ibc_token, &sender); let init_bal = Amount::from_u64(100); - writes.insert(balance_key.clone(), init_bal.try_to_vec().unwrap()); + writes.insert(balance_key.clone(), init_bal.serialize_to_vec()); let minted_key = token::minted_balance_key(&ibc_token); - writes.insert(minted_key.clone(), init_bal.try_to_vec().unwrap()); + writes.insert(minted_key.clone(), init_bal.serialize_to_vec()); let minter_key = token::minter_key(&ibc_token); writes.insert( minter_key, - Address::Internal(InternalAddress::Ibc) - .try_to_vec() - .unwrap(), + Address::Internal(InternalAddress::Ibc).serialize_to_vec(), ); // original denom let hash = ibc_storage::calc_hash(&denom); let denom_key = ibc_storage::ibc_denom_key(hash); - writes.insert(denom_key, denom.try_to_vec().unwrap()); + writes.insert(denom_key, denom.serialize_to_vec()); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { env.wl_storage @@ -1505,8 +1502,7 @@ mod tests { ); let val = Amount::from_uint(100, ibc::ANY_DENOMINATION) .unwrap() - .try_to_vec() - .unwrap(); + .serialize_to_vec(); tx_host_env::with(|env| { env.wl_storage .storage @@ -1608,7 +1604,7 @@ mod tests { denom, &address::Address::Internal(address::InternalAddress::Ibc), ); - let val = Amount::from_u64(100).try_to_vec().unwrap(); + let val = Amount::from_u64(100).serialize_to_vec(); tx_host_env::with(|env| { env.wl_storage .storage diff --git a/tests/src/vm_host_env/tx.rs b/tests/src/vm_host_env/tx.rs index 582b668b79..6bf46c96c1 100644 --- a/tests/src/vm_host_env/tx.rs +++ b/tests/src/vm_host_env/tx.rs @@ -18,7 +18,8 @@ use namada::vm::prefix_iter::PrefixIterators; use namada::vm::wasm::run::Error; use namada::vm::wasm::{self, TxCache, VpCache}; use namada::vm::{self, WasmCacheRwAccess}; -use namada_tx_prelude::{storage_api, BorshSerialize, Ctx}; +use namada_tx_prelude::borsh_ext::BorshSerializeExt; +use namada_tx_prelude::{storage_api, Ctx}; use namada_vp_prelude::key::common; use tempfile::TempDir; @@ -186,7 +187,7 @@ impl TestTxEnv { let storage_key = key::threshold_key(address); self.wl_storage .storage - .write(&storage_key, threshold.try_to_vec().unwrap()) + .write(&storage_key, threshold.serialize_to_vec()) .unwrap(); } @@ -216,7 +217,7 @@ impl TestTxEnv { let storage_key = token::balance_key(token, target); self.wl_storage .storage - .write(&storage_key, amount.try_to_vec().unwrap()) + .write(&storage_key, amount.serialize_to_vec()) .unwrap(); } diff --git a/tx_prelude/src/lib.rs b/tx_prelude/src/lib.rs index 87d91e07a3..fc11d301a5 100644 --- a/tx_prelude/src/lib.rs +++ b/tx_prelude/src/lib.rs @@ -17,6 +17,8 @@ use core::slice; use std::marker::PhantomData; pub use borsh::{BorshDeserialize, BorshSerialize}; +pub use borsh_ext; +use borsh_ext::BorshSerializeExt; pub use namada_core::ledger::eth_bridge; pub use namada_core::ledger::governance::storage as gov_storage; pub use namada_core::ledger::parameters::storage as parameters_storage; @@ -248,7 +250,7 @@ impl TxEnv for Ctx { key: &storage::Key, val: T, ) -> Result<(), Error> { - let buf = val.try_to_vec().unwrap(); + let buf = val.serialize_to_vec(); self.write_bytes_temp(key, buf) } @@ -319,7 +321,7 @@ impl TxEnv for Ctx { } fn emit_ibc_event(&mut self, event: &ibc::IbcEvent) -> Result<(), Error> { - let event = BorshSerialize::try_to_vec(event).unwrap(); + let event = borsh::to_vec(event).unwrap(); unsafe { namada_tx_emit_ibc_event(event.as_ptr() as _, event.len() as _) }; diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index 0962628363..9a9244c3a0 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -14,6 +14,8 @@ use std::convert::TryFrom; use std::marker::PhantomData; pub use borsh::{BorshDeserialize, BorshSerialize}; +pub use borsh_ext; +use borsh_ext::BorshSerializeExt; pub use namada_core::ledger::governance::storage as gov_storage; pub use namada_core::ledger::parameters; pub use namada_core::ledger::pgf::storage as pgf_storage; @@ -91,10 +93,10 @@ pub fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { let targets = [*tx.data_sechash(), *tx.code_sechash()]; // Serialize parameters - let max_signatures = max_signatures_per_transaction.try_to_vec().unwrap(); - let public_keys_map = public_keys_index_map.try_to_vec().unwrap(); - let targets = targets.try_to_vec().unwrap(); - let signer = owner.try_to_vec().unwrap(); + let max_signatures = max_signatures_per_transaction.serialize_to_vec(); + let public_keys_map = public_keys_index_map.serialize_to_vec(); + let targets = targets.serialize_to_vec(); + let signer = owner.serialize_to_vec(); let valid = unsafe { namada_vp_verify_tx_section_signature( @@ -305,7 +307,7 @@ impl<'view> VpEnv<'view> for Ctx { } fn eval(&self, vp_code_hash: Hash, input_data: Tx) -> Result { - let input_data_bytes = BorshSerialize::try_to_vec(&input_data).unwrap(); + let input_data_bytes = borsh::to_vec(&input_data).unwrap(); let result = unsafe { namada_vp_eval( vp_code_hash.0.as_ptr() as _, diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 3453747161..775e7d0e91 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -32,6 +32,7 @@ mod tests { arb_established_address, arb_non_internal_address, }; use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::borsh_ext::BorshSerializeExt; use namada_tx_prelude::chain::ChainId; use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; @@ -106,7 +107,7 @@ mod tests { }); let tx_code = vec![]; - let tx_data = bond.try_to_vec().unwrap(); + let tx_data = bond.serialize_to_vec(); let mut tx = Tx::new(ChainId::default(), None); tx.add_code(tx_code) .add_serialized_data(tx_data) diff --git a/wasm/wasm_source/src/tx_bridge_pool.rs b/wasm/wasm_source/src/tx_bridge_pool.rs index 88d757998f..7354fb654a 100644 --- a/wasm/wasm_source/src/tx_bridge_pool.rs +++ b/wasm/wasm_source/src/tx_bridge_pool.rs @@ -1,8 +1,9 @@ //! A tx for adding a transfer request across the Ethereum bridge //! into the bridge pool. -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; use eth_bridge::storage::{bridge_pool, native_erc20_key}; use eth_bridge_pool::{GasFee, PendingTransfer, TransferToEthereum}; +use namada_tx_prelude::borsh_ext::BorshSerializeExt; use namada_tx_prelude::*; #[transaction(gas = 100000)] @@ -64,7 +65,7 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { log_string("Escrow succeeded"); // add transfer into the pool let pending_key = bridge_pool::get_pending_key(&transfer); - ctx.write_bytes(&pending_key, transfer.try_to_vec().unwrap()) + ctx.write_bytes(&pending_key, transfer.serialize_to_vec()) .wrap_err("Could not write transfer to bridge pool")?; Ok(()) } diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index c1e1b35226..581d9d8dcf 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -28,6 +28,7 @@ mod tests { use namada_tests::native_vp::TestNativeVpEnv; use namada_tests::tx::*; use namada_tx_prelude::address::testing::arb_established_address; + use namada_tx_prelude::borsh_ext::BorshSerializeExt; use namada_tx_prelude::chain::ChainId; use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; @@ -85,7 +86,7 @@ mod tests { init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let tx_code = vec![]; - let tx_data = commission_change.try_to_vec().unwrap(); + let tx_data = commission_change.serialize_to_vec(); let mut tx = Tx::new(ChainId::default(), None); tx.add_code(tx_code) .add_serialized_data(tx_data) diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 7e08c0dcda..7ca5f46ebf 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -30,6 +30,7 @@ mod tests { use namada_tests::native_vp::TestNativeVpEnv; use namada_tests::tx::*; use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::borsh_ext::BorshSerializeExt; use namada_tx_prelude::chain::ChainId; use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; @@ -125,7 +126,7 @@ mod tests { tx_host_env::commit_tx_and_block(); let tx_code = vec![]; - let tx_data = unbond.try_to_vec().unwrap(); + let tx_data = unbond.serialize_to_vec(); let mut tx = Tx::new(ChainId::default(), None); tx.add_code(tx_code) .add_serialized_data(tx_data) diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index c8fa649c43..d9f47dba23 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -32,6 +32,7 @@ mod tests { arb_established_address, arb_non_internal_address, }; use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::borsh_ext::BorshSerializeExt; use namada_tx_prelude::chain::ChainId; use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; @@ -169,7 +170,7 @@ mod tests { ); let tx_code = vec![]; - let tx_data = withdraw.try_to_vec().unwrap(); + let tx_data = withdraw.serialize_to_vec(); let mut tx = Tx::new(ChainId::default(), None); tx.add_code(tx_code) .add_serialized_data(tx_data) diff --git a/wasm/wasm_source/src/vp_masp.rs b/wasm/wasm_source/src/vp_masp.rs index cb66211118..b33d367c1c 100644 --- a/wasm/wasm_source/src/vp_masp.rs +++ b/wasm/wasm_source/src/vp_masp.rs @@ -4,6 +4,7 @@ use masp_primitives::asset_type::AssetType; use masp_primitives::transaction::components::I128Sum; /// Multi-asset shielded pool VP. use namada_vp_prelude::address::masp; +use namada_vp_prelude::borsh_ext::BorshSerializeExt; use namada_vp_prelude::storage::Epoch; use namada_vp_prelude::*; use ripemd::{Digest, Ripemd160}; @@ -16,9 +17,7 @@ fn asset_type_from_epoched_address( denom: token::MaspDenom, ) -> AssetType { // Timestamp the chosen token with the current epoch - let token_bytes = (token, denom, epoch.0) - .try_to_vec() - .expect("token should serialize"); + let token_bytes = (token, denom, epoch.0).serialize_to_vec(); // Generate the unique asset identifier from the unique token address AssetType::new(token_bytes.as_ref()).expect("unable to create asset type") } @@ -208,10 +207,7 @@ fn validate_tx( transparent_tx_pool -= transp_amt; // Satisfies 4. - let target_enc = transfer - .target - .try_to_vec() - .expect("target address encoding"); + let target_enc = transfer.target.serialize_to_vec(); let hash = Ripemd160::digest(sha256(&target_enc).0.as_slice()); @@ -267,7 +263,7 @@ fn validate_tx( _ => {} } // Do the expensive proof verification in the VM at the end. - ctx.verify_masp(shielded_tx.try_to_vec().unwrap()) + ctx.verify_masp(shielded_tx.serialize_to_vec()) } else { reject() } diff --git a/wasm/wasm_source/src/vp_testnet_faucet.rs b/wasm/wasm_source/src/vp_testnet_faucet.rs index 7298c0b126..088297ece8 100644 --- a/wasm/wasm_source/src/vp_testnet_faucet.rs +++ b/wasm/wasm_source/src/vp_testnet_faucet.rs @@ -118,6 +118,7 @@ mod tests { use namada_tests::vp::*; use namada_tx_prelude::{StorageWrite, TxEnv}; use namada_vp_prelude::account::AccountPublicKeysMap; + use namada_vp_prelude::borsh_ext::BorshSerializeExt; use namada_vp_prelude::key::RefTo; use proptest::prelude::*; use storage::testing::arb_account_storage_key_no_vp; @@ -376,7 +377,7 @@ mod tests { // Construct a PoW solution like a client would let challenge = testnet_pow::Challenge::new(&mut tx_env.wl_storage, &vp_owner, target.clone()).unwrap(); let solution = challenge.solve(); - let solution_bytes = solution.try_to_vec().unwrap(); + let solution_bytes = solution.serialize_to_vec(); let amount = token::DenominatedAmount { amount, From 134798e2d7ed00a6112cc59e1f0cd58f765fd3e6 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Fri, 15 Sep 2023 13:48:05 +0100 Subject: [PATCH 067/161] Add Cargo lock files Co-Authored-By: mariari --- Cargo.lock | 436 ++++++++++++++------------ wasm/Cargo.lock | 350 +++++++++++---------- wasm_for_tests/wasm_source/Cargo.lock | 346 +++++++++++--------- 3 files changed, 613 insertions(+), 519 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84cbd6f48e..9f443fb583 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -37,18 +37,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if 1.0.0", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - [[package]] name = "aes" version = "0.8.3" @@ -497,16 +485,16 @@ checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" [[package]] name = "bellman" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4dd656ef4fdf7debb6d87d4dd92642fcbcdb78cbf6600c13e25c87e4d1a3807" +checksum = "9afceed28bac7f9f5a508bca8aeeff51cdfa4770c0b967ac55c621e2ddfd6171" dependencies = [ "bitvec 1.0.1", "blake2s_simd", "byteorder", "crossbeam-channel 0.5.8", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "lazy_static", "log", "num_cpus", @@ -557,14 +545,14 @@ dependencies = [ [[package]] name = "bip0039" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0830ae4cc96b0617cc912970c2b17e89456fecbf55e8eed53a956f37ab50c41" +checksum = "bef0f0152ec5cf17f49a5866afaa3439816207fd4f0a224c0211ffaf5e278426" dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.9.0", + "hmac 0.12.1", + "pbkdf2 0.10.1", "rand 0.8.5", - "sha2 0.9.9", + "sha2 0.10.6", "unicode-normalization", "zeroize", ] @@ -679,7 +667,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -703,16 +691,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-modes" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb03d1bed155d89dce0f845b7899b18a9a163e148fd004e1c28421a783e2d8e" -dependencies = [ - "block-padding 0.2.1", - "cipher 0.3.0", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -722,20 +700,14 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "bls12_381" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c196a77437e7cc2fb515ce413a6401291578b5afc8ecb29a3c7ab957f05941" +checksum = "d7bc6d6292be3a19e6379786dac800f551e5865a5bb51ebbe3064ab80433f403" dependencies = [ - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "pairing", "rand_core 0.6.4", "subtle 2.4.1", @@ -743,10 +715,11 @@ dependencies = [ [[package]] name = "borsh" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" dependencies = [ - "borsh-derive 0.9.4", + "borsh-derive 0.9.3", "hashbrown 0.11.2", ] @@ -760,13 +733,24 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "borsh" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41898277107b0d3f027593697912977397eba6ac39a55bdd2eb02c1d5d5013b5" +dependencies = [ + "borsh-derive 1.0.0-alpha.4", + "cfg_aliases", +] + [[package]] name = "borsh-derive" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" dependencies = [ - "borsh-derive-internal 0.9.4", - "borsh-schema-derive-internal 0.9.4", + "borsh-derive-internal 0.9.3", + "borsh-schema-derive-internal 0.9.3", "proc-macro-crate 0.1.5", "proc-macro2", "syn 1.0.109", @@ -785,10 +769,25 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413cb435569fe499e89235f758304e0e7198016baa351d8f5827ea0f40526ce0" +dependencies = [ + "once_cell", + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 2.0.15", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ "proc-macro2", "quote", @@ -806,10 +805,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-ext" +version = "1.0.0-alpha.4" +source = "git+https://github.com/heliaxdev/borsh-ext?tag=v1.0.0-alpha.4#6bebf357002f96574ac37a28f547b6c88e91b799" +dependencies = [ + "borsh 1.0.0-alpha.4", +] + [[package]] name = "borsh-schema-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ "proc-macro2", "quote", @@ -961,6 +969,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "cc" version = "1.0.79" @@ -991,6 +1008,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" version = "0.8.2" @@ -1202,27 +1225,27 @@ dependencies = [ [[package]] name = "color-eyre" -version = "0.5.11" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f1885697ee8a177096d42f158922251a41973117f6d8a234cee94b9509157b7" +checksum = "5a667583cca8c4f8436db8de46ea8233c42a7d9ae424a82d338f2e4675229204" dependencies = [ "backtrace", "color-spantrace", "eyre", "indenter", "once_cell", - "owo-colors 1.3.0", + "owo-colors", "tracing-error", ] [[package]] name = "color-spantrace" -version = "0.1.6" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6eee477a4a8a72f4addd4de416eb56d54bc307b284d6601bafdee1f4ea462d1" +checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" dependencies = [ "once_cell", - "owo-colors 1.3.0", + "owo-colors", "tracing-core 0.1.31", "tracing-error", ] @@ -1409,7 +1432,7 @@ dependencies = [ "clap", "criterion-plot", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits 0.2.15", "once_cell", "oorandom", @@ -1430,7 +1453,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -1545,16 +1568,6 @@ dependencies = [ "subtle 2.4.1", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.4.1", -] - [[package]] name = "ct-codecs" version = "1.1.1" @@ -1892,9 +1905,9 @@ dependencies = [ "base16ct", "crypto-bigint", "digest 0.10.6", - "ff 0.13.0", + "ff", "generic-array 0.14.7", - "group 0.13.0", + "group", "pkcs8", "rand_core 0.6.4", "sec1", @@ -2024,7 +2037,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ - "aes 0.8.3", + "aes", "ctr", "digest 0.10.6", "hex", @@ -2409,14 +2422,14 @@ dependencies = [ "bincode", "blake2", "blake2b_simd", - "borsh 0.9.4", + "borsh 0.9.3", "digest 0.10.6", "ed25519-dalek", "either", "ferveo-common", "group-threshold-cryptography", "hex", - "itertools", + "itertools 0.10.5", "measure_time", "miracl_core", "num 0.4.0", @@ -2443,23 +2456,13 @@ dependencies = [ "serde_bytes", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "bitvec 1.0.1", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "ff" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec 1.0.1", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2557,12 +2560,12 @@ dependencies = [ [[package]] name = "fpe" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd910db5f9ca4dc3116f8c46367825807aa2b942f72565f16b4be0b208a00a9e" +checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" dependencies = [ - "block-modes", - "cipher 0.3.0", + "cbc", + "cipher 0.4.4", "libm", "num-bigint 0.4.3", "num-integer", @@ -2795,25 +2798,14 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "memuse", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.0", + "ff", + "memuse", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2833,7 +2825,7 @@ dependencies = [ "blake2b_simd", "chacha20 0.8.2", "hex", - "itertools", + "itertools 0.10.5", "miracl_core", "rand 0.8.5", "rand_core 0.6.4", @@ -3002,16 +2994,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.12.1" @@ -3338,10 +3320,10 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "index-set" -version = "0.7.1" -source = "git+https://github.com/heliaxdev/index-set?tag=v0.7.1#dc24cdbbe3664514d59f1a4c4031863fc565f1c2" +version = "0.8.0" +source = "git+https://github.com/heliaxdev/index-set?tag=v0.8.0#0c218cc300c1bb7a1acf34f21b6e9d489df5fda8" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "serde 1.0.163", ] @@ -3421,7 +3403,7 @@ checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", - "rustix 0.37.1", + "rustix 0.37.13", "windows-sys 0.48.0", ] @@ -3434,6 +3416,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.6" @@ -3460,14 +3451,14 @@ dependencies = [ [[package]] name = "jubjub" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a575df5f985fe1cd5b2b05664ff6accfc46559032b954529fd225a2168d27b0f" +checksum = "8499f7a74008aafbecb2a2e608a3e13e4dd3e84df198b604451efe93f2de6e61" dependencies = [ "bitvec 1.0.1", "bls12_381", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -3748,9 +3739,9 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "0.2.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=449a7295fe24d96456ece24c223ca9eb76b0e6ba#449a7295fe24d96456ece24c223ca9eb76b0e6ba" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "chacha20 0.9.1", "chacha20poly1305", "cipher 0.4.4", @@ -3761,19 +3752,19 @@ dependencies = [ [[package]] name = "masp_primitives" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=449a7295fe24d96456ece24c223ca9eb76b0e6ba#449a7295fe24d96456ece24c223ca9eb76b0e6ba" dependencies = [ - "aes 0.7.5", + "aes", "bip0039", "bitvec 1.0.1", "blake2b_simd", "blake2s_simd", "bls12_381", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "byteorder", - "ff 0.12.1", + "ff", "fpe", - "group 0.12.1", + "group", "hex", "incrementalmerkletree", "jubjub", @@ -3784,7 +3775,7 @@ dependencies = [ "num-traits 0.2.15", "rand 0.8.5", "rand_core 0.6.4", - "sha2 0.9.9", + "sha2 0.10.6", "subtle 2.4.1", "zcash_encoding", ] @@ -3792,15 +3783,15 @@ dependencies = [ [[package]] name = "masp_proofs" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=449a7295fe24d96456ece24c223ca9eb76b0e6ba#449a7295fe24d96456ece24c223ca9eb76b0e6ba" dependencies = [ "bellman", "blake2b_simd", "bls12_381", "directories", "getrandom 0.2.9", - "group 0.12.1", - "itertools", + "group", + "itertools 0.11.0", "jubjub", "lazy_static", "masp_primitives", @@ -3962,14 +3953,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "windows-sys 0.45.0", ] [[package]] @@ -4016,7 +4007,8 @@ dependencies = [ "async-trait", "base58 0.2.0", "bimap", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "byte-unit", "circular-queue", "clru", @@ -4027,7 +4019,7 @@ dependencies = [ "ethers", "eyre", "futures", - "itertools", + "itertools 0.10.5", "libsecp256k1 0.7.0", "loupe", "masp_primitives", @@ -4038,7 +4030,7 @@ dependencies = [ "namada_test_utils", "num256", "orion", - "owo-colors 3.5.0", + "owo-colors", "parity-wasm", "parse_duration", "paste", @@ -4062,7 +4054,7 @@ dependencies = [ "tokio", "toml 0.5.9", "tracing 0.1.37", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "wasm-instrument", "wasmer", "wasmer-cache", @@ -4088,7 +4080,8 @@ dependencies = [ "bimap", "bit-set", "blake2b-rs", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "byte-unit", "byteorder", "bytes", @@ -4109,7 +4102,7 @@ dependencies = [ "flate2", "futures", "git2", - "itertools", + "itertools 0.10.5", "lazy_static", "libc", "libloading", @@ -4157,7 +4150,7 @@ dependencies = [ "tracing 0.1.37", "tracing-appender", "tracing-log", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "warp", "winapi", "zeroize", @@ -4168,7 +4161,8 @@ name = "namada_benchmarks" version = "0.23.0" dependencies = [ "async-trait", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "criterion", "ferveo-common", "masp_primitives", @@ -4182,7 +4176,7 @@ dependencies = [ "sha2 0.9.9", "tempfile", "tokio", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -4194,7 +4188,8 @@ dependencies = [ "ark-serialize", "assert_matches", "bech32 0.8.1", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "chrono", "data-encoding", "derivative", @@ -4210,7 +4205,7 @@ dependencies = [ "ics23", "impl-num-traits", "index-set", - "itertools", + "itertools 0.10.5", "libsecp256k1 0.7.0", "masp_primitives", "namada_macros", @@ -4238,7 +4233,7 @@ dependencies = [ "toml 0.5.9", "tonic-build", "tracing 0.1.37", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "uint", "zeroize", ] @@ -4247,8 +4242,8 @@ dependencies = [ name = "namada_encoding_spec" version = "0.23.0" dependencies = [ - "borsh 0.9.4", - "itertools", + "borsh 1.0.0-alpha.4", + "itertools 0.10.5", "lazy_static", "madato", "namada", @@ -4259,12 +4254,13 @@ name = "namada_ethereum_bridge" version = "0.23.0" dependencies = [ "assert_matches", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "data-encoding", "ethabi", "ethers", "eyre", - "itertools", + "itertools 0.10.5", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -4291,10 +4287,10 @@ dependencies = [ name = "namada_proof_of_stake" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "data-encoding", "derivative", - "itertools", + "itertools 0.10.5", "namada_core", "once_cell", "proptest", @@ -4302,14 +4298,14 @@ dependencies = [ "test-log", "thiserror", "tracing 0.1.37", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] name = "namada_test_utils" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "namada_core", "strum", ] @@ -4320,7 +4316,8 @@ version = "0.23.0" dependencies = [ "assert_cmd", "async-trait", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "chrono", "clap", "color-eyre", @@ -4333,7 +4330,7 @@ dependencies = [ "file-serve", "fs_extra", "hyper", - "itertools", + "itertools 0.10.5", "lazy_static", "namada", "namada_apps", @@ -4358,14 +4355,15 @@ dependencies = [ "tokio", "toml 0.5.9", "tracing 0.1.37", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] name = "namada_tx_prelude" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "masp_primitives", "namada_core", "namada_macros", @@ -4379,7 +4377,7 @@ dependencies = [ name = "namada_vm_env" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "masp_primitives", "namada_core", ] @@ -4388,7 +4386,8 @@ dependencies = [ name = "namada_vp_prelude" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -4687,9 +4686,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oorandom" @@ -4805,12 +4804,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "owo-colors" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2386b4ebe91c2f7f51082d4cefa145d030e33a1842a96b12e4885cc3c01f7a55" - [[package]] name = "owo-colors" version = "3.5.0" @@ -4819,11 +4812,11 @@ checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "pairing" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group 0.12.1", + "group", ] [[package]] @@ -4903,6 +4896,19 @@ dependencies = [ "subtle 2.4.1", ] +[[package]] +name = "pasta_curves" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" +dependencies = [ + "ff", + "group", + "rand 0.8.5", + "static_assertions", + "subtle 2.4.1", +] + [[package]] name = "paste" version = "1.0.12" @@ -4920,11 +4926,11 @@ dependencies = [ [[package]] name = "pbkdf2" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" +checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" dependencies = [ - "crypto-mac 0.11.1", + "digest 0.10.6", "password-hash", ] @@ -5116,7 +5122,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", - "itertools", + "itertools 0.10.5", "predicates-core", ] @@ -5281,7 +5287,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -5302,7 +5308,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -5478,21 +5484,36 @@ dependencies = [ ] [[package]] -name = "redjubjub" -version = "0.5.0" +name = "reddsa" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6039ff156887caf92df308cbaccdc058c9d3155a913da046add6e48c4cdbd91d" +checksum = "78a5191930e84973293aa5f532b513404460cd2216c1cfb76d08748c15b40b02" dependencies = [ "blake2b_simd", "byteorder", - "digest 0.9.0", + "group", + "hex", "jubjub", + "pasta_curves", "rand_core 0.6.4", "serde 1.0.163", "thiserror", "zeroize", ] +[[package]] +name = "redjubjub" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a60db2c3bc9c6fd1e8631fee75abc008841d27144be744951d6b9b75f9b569c" +dependencies = [ + "rand_core 0.6.4", + "reddsa", + "serde 1.0.163", + "thiserror", + "zeroize", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -5780,16 +5801,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.1" +version = "0.37.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4790277f605573dd24b6751701e0823582a63c7cafc095e427e6c66e45dd75e" +checksum = "f79bef90eb6d984c72722595b5b1348ab39275a5e5123faca6863bf07d75a4e0" dependencies = [ "bitflags 1.2.1", "errno", "io-lifetimes", "libc", "linux-raw-sys 0.3.7", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -6367,10 +6388,10 @@ dependencies = [ [[package]] name = "sparse-merkle-tree" version = "0.3.1-pre" -source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=e086b235ed6e68929bf73f617dd61cd17b000a56#e086b235ed6e68929bf73f617dd61cd17b000a56" +source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=df7ec062e7c40d5e76b136064e9aaf8bd2490750#df7ec062e7c40d5e76b136064e9aaf8bd2490750" dependencies = [ "blake2b-rs", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "cfg-if 1.0.0", "ics23", "sha2 0.9.9", @@ -6500,6 +6521,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6eef0000c4a12ecdfd7873ea84a8b5aab5e44db72e38e07b028a25386f29a5" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.15", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -6552,7 +6585,7 @@ dependencies = [ "cfg-if 1.0.0", "fastrand", "redox_syscall 0.3.5", - "rustix 0.37.1", + "rustix 0.37.13", "windows-sys 0.45.0", ] @@ -7160,7 +7193,7 @@ checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" dependencies = [ "crossbeam-channel 0.5.8", "time", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -7204,12 +7237,12 @@ dependencies = [ [[package]] name = "tracing-error" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4d7c0b83d4a500748fa5879461652b361edf5c9d51ede2a2ac03875ca185e24" +checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" dependencies = [ "tracing 0.1.37", - "tracing-subscriber 0.2.25", + "tracing-subscriber", ] [[package]] @@ -7252,17 +7285,6 @@ dependencies = [ "tracing-core 0.1.31", ] -[[package]] -name = "tracing-subscriber" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" -dependencies = [ - "sharded-slab", - "thread_local", - "tracing-core 0.1.31", -] - [[package]] name = "tracing-subscriber" version = "0.3.17" @@ -8292,8 +8314,8 @@ dependencies = [ [[package]] name = "zcash_encoding" -version = "0.0.0" -source = "git+https://github.com/zcash/librustzcash?rev=43c18d0#43c18d000fcbe45363b2d53585d5102841eff99e" +version = "0.2.0" +source = "git+https://github.com/zcash/librustzcash?rev=bd7f9d7#bd7f9d7c3ce5cfd14af169ffe0e1c5c903162f46" dependencies = [ "byteorder", "nonempty", diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 5dbe91c1e4..584eddc9ab 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -37,18 +37,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if 1.0.0", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - [[package]] name = "aes" version = "0.8.3" @@ -377,15 +365,15 @@ checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" [[package]] name = "bellman" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4dd656ef4fdf7debb6d87d4dd92642fcbcdb78cbf6600c13e25c87e4d1a3807" +checksum = "9afceed28bac7f9f5a508bca8aeeff51cdfa4770c0b967ac55c621e2ddfd6171" dependencies = [ "bitvec 1.0.1", "blake2s_simd", "byteorder", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "pairing", "rand_core 0.6.4", "subtle 2.4.1", @@ -411,14 +399,14 @@ dependencies = [ [[package]] name = "bip0039" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0830ae4cc96b0617cc912970c2b17e89456fecbf55e8eed53a956f37ab50c41" +checksum = "bef0f0152ec5cf17f49a5866afaa3439816207fd4f0a224c0211ffaf5e278426" dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.9.0", + "hmac 0.12.1", + "pbkdf2 0.10.1", "rand 0.8.5", - "sha2 0.9.9", + "sha2 0.10.6", "unicode-normalization", "zeroize", ] @@ -517,7 +505,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -541,16 +529,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-modes" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb03d1bed155d89dce0f845b7899b18a9a163e148fd004e1c28421a783e2d8e" -dependencies = [ - "block-padding 0.2.1", - "cipher 0.3.0", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -560,20 +538,14 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "bls12_381" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c196a77437e7cc2fb515ce413a6401291578b5afc8ecb29a3c7ab957f05941" +checksum = "d7bc6d6292be3a19e6379786dac800f551e5865a5bb51ebbe3064ab80433f403" dependencies = [ - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "pairing", "rand_core 0.6.4", "subtle 2.4.1", @@ -581,10 +553,11 @@ dependencies = [ [[package]] name = "borsh" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" dependencies = [ - "borsh-derive 0.9.4", + "borsh-derive 0.9.3", "hashbrown 0.11.2", ] @@ -598,13 +571,24 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "borsh" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41898277107b0d3f027593697912977397eba6ac39a55bdd2eb02c1d5d5013b5" +dependencies = [ + "borsh-derive 1.0.0-alpha.4", + "cfg_aliases", +] + [[package]] name = "borsh-derive" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" dependencies = [ - "borsh-derive-internal 0.9.4", - "borsh-schema-derive-internal 0.9.4", + "borsh-derive-internal 0.9.3", + "borsh-schema-derive-internal 0.9.3", "proc-macro-crate 0.1.5", "proc-macro2", "syn 1.0.109", @@ -623,10 +607,25 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413cb435569fe499e89235f758304e0e7198016baa351d8f5827ea0f40526ce0" +dependencies = [ + "once_cell", + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 2.0.16", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ "proc-macro2", "quote", @@ -644,10 +643,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-ext" +version = "1.0.0-alpha.4" +source = "git+https://github.com/heliaxdev/borsh-ext?tag=v1.0.0-alpha.4#6bebf357002f96574ac37a28f547b6c88e91b799" +dependencies = [ + "borsh 1.0.0-alpha.4", +] + [[package]] name = "borsh-schema-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ "proc-macro2", "quote", @@ -761,6 +769,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "cc" version = "1.0.79" @@ -779,6 +796,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" version = "0.8.2" @@ -1200,16 +1223,6 @@ dependencies = [ "subtle 2.4.1", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.4.1", -] - [[package]] name = "ct-codecs" version = "1.1.1" @@ -1507,9 +1520,9 @@ dependencies = [ "base16ct", "crypto-bigint", "digest 0.10.6", - "ff 0.13.0", + "ff", "generic-array 0.14.7", - "group 0.13.0", + "group", "pkcs8", "rand_core 0.6.4", "sec1", @@ -1627,7 +1640,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ - "aes 0.8.3", + "aes", "ctr", "digest 0.10.6", "hex", @@ -1985,14 +1998,14 @@ dependencies = [ "bincode", "blake2", "blake2b_simd", - "borsh 0.9.4", + "borsh 0.9.3", "digest 0.10.6", "ed25519-dalek", "either", "ferveo-common", "group-threshold-cryptography", "hex", - "itertools", + "itertools 0.10.5", "measure_time", "miracl_core", "num 0.4.0", @@ -2019,23 +2032,13 @@ dependencies = [ "serde_bytes", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "bitvec 1.0.1", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "ff" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec 1.0.1", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2085,12 +2088,12 @@ dependencies = [ [[package]] name = "fpe" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd910db5f9ca4dc3116f8c46367825807aa2b942f72565f16b4be0b208a00a9e" +checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" dependencies = [ - "block-modes", - "cipher 0.3.0", + "cbc", + "cipher 0.4.4", "libm", "num-bigint 0.4.3", "num-integer", @@ -2296,25 +2299,14 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "memuse", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.0", + "ff", + "memuse", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2334,7 +2326,7 @@ dependencies = [ "blake2b_simd", "chacha20 0.8.2", "hex", - "itertools", + "itertools 0.10.5", "miracl_core", "rand 0.8.5", "rand_core 0.6.4", @@ -2493,16 +2485,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.12.1" @@ -2804,10 +2786,10 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "index-set" -version = "0.7.1" -source = "git+https://github.com/heliaxdev/index-set?tag=v0.7.1#dc24cdbbe3664514d59f1a4c4031863fc565f1c2" +version = "0.8.0" +source = "git+https://github.com/heliaxdev/index-set?tag=v0.8.0#0c218cc300c1bb7a1acf34f21b6e9d489df5fda8" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "serde", ] @@ -2900,6 +2882,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.6" @@ -2917,14 +2908,14 @@ dependencies = [ [[package]] name = "jubjub" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a575df5f985fe1cd5b2b05664ff6accfc46559032b954529fd225a2168d27b0f" +checksum = "8499f7a74008aafbecb2a2e608a3e13e4dd3e84df198b604451efe93f2de6e61" dependencies = [ "bitvec 1.0.1", "bls12_381", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -3101,9 +3092,9 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "0.2.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=449a7295fe24d96456ece24c223ca9eb76b0e6ba#449a7295fe24d96456ece24c223ca9eb76b0e6ba" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "chacha20 0.9.1", "chacha20poly1305", "cipher 0.4.4", @@ -3114,19 +3105,19 @@ dependencies = [ [[package]] name = "masp_primitives" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=449a7295fe24d96456ece24c223ca9eb76b0e6ba#449a7295fe24d96456ece24c223ca9eb76b0e6ba" dependencies = [ - "aes 0.7.5", + "aes", "bip0039", "bitvec 1.0.1", "blake2b_simd", "blake2s_simd", "bls12_381", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "byteorder", - "ff 0.12.1", + "ff", "fpe", - "group 0.12.1", + "group", "hex", "incrementalmerkletree", "jubjub", @@ -3137,7 +3128,7 @@ dependencies = [ "num-traits", "rand 0.8.5", "rand_core 0.6.4", - "sha2 0.9.9", + "sha2 0.10.6", "subtle 2.4.1", "zcash_encoding", ] @@ -3145,15 +3136,15 @@ dependencies = [ [[package]] name = "masp_proofs" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=449a7295fe24d96456ece24c223ca9eb76b0e6ba#449a7295fe24d96456ece24c223ca9eb76b0e6ba" dependencies = [ "bellman", "blake2b_simd", "bls12_381", "directories", "getrandom 0.2.9", - "group 0.12.1", - "itertools", + "group", + "itertools 0.11.0", "jubjub", "lazy_static", "masp_primitives", @@ -3315,7 +3306,8 @@ version = "0.23.0" dependencies = [ "async-trait", "bimap", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "circular-queue", "clru", "data-encoding", @@ -3325,7 +3317,7 @@ dependencies = [ "ethers", "eyre", "futures", - "itertools", + "itertools 0.10.5", "loupe", "masp_primitives", "masp_proofs", @@ -3376,7 +3368,8 @@ dependencies = [ "ark-ec", "ark-serialize", "bech32 0.8.1", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "chrono", "data-encoding", "derivative", @@ -3392,7 +3385,7 @@ dependencies = [ "ics23", "impl-num-traits", "index-set", - "itertools", + "itertools 0.10.5", "libsecp256k1 0.7.0", "masp_primitives", "namada_macros", @@ -3425,10 +3418,11 @@ dependencies = [ name = "namada_ethereum_bridge" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "ethers", "eyre", - "itertools", + "itertools 0.10.5", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -3454,7 +3448,7 @@ dependencies = [ name = "namada_proof_of_stake" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "data-encoding", "derivative", "namada_core", @@ -3468,7 +3462,7 @@ dependencies = [ name = "namada_test_utils" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "namada_core", "strum", ] @@ -3507,7 +3501,8 @@ dependencies = [ name = "namada_tx_prelude" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "masp_primitives", "namada_core", "namada_macros", @@ -3521,7 +3516,7 @@ dependencies = [ name = "namada_vm_env" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "masp_primitives", "namada_core", ] @@ -3530,7 +3525,8 @@ dependencies = [ name = "namada_vp_prelude" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -3543,7 +3539,7 @@ dependencies = [ name = "namada_wasm" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "getrandom 0.2.9", "masp_primitives", "namada", @@ -3768,9 +3764,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -3835,11 +3831,11 @@ checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "pairing" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group 0.12.1", + "group", ] [[package]] @@ -3919,6 +3915,19 @@ dependencies = [ "subtle 2.4.1", ] +[[package]] +name = "pasta_curves" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" +dependencies = [ + "ff", + "group", + "rand 0.8.5", + "static_assertions", + "subtle 2.4.1", +] + [[package]] name = "paste" version = "1.0.12" @@ -3936,11 +3945,11 @@ dependencies = [ [[package]] name = "pbkdf2" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" +checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" dependencies = [ - "crypto-mac 0.11.1", + "digest 0.10.6", "password-hash", ] @@ -4209,7 +4218,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -4230,7 +4239,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -4397,21 +4406,36 @@ dependencies = [ ] [[package]] -name = "redjubjub" -version = "0.5.0" +name = "reddsa" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6039ff156887caf92df308cbaccdc058c9d3155a913da046add6e48c4cdbd91d" +checksum = "78a5191930e84973293aa5f532b513404460cd2216c1cfb76d08748c15b40b02" dependencies = [ "blake2b_simd", "byteorder", - "digest 0.9.0", + "group", + "hex", "jubjub", + "pasta_curves", "rand_core 0.6.4", "serde", "thiserror", "zeroize", ] +[[package]] +name = "redjubjub" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a60db2c3bc9c6fd1e8631fee75abc008841d27144be744951d6b9b75f9b569c" +dependencies = [ + "rand_core 0.6.4", + "reddsa", + "serde", + "thiserror", + "zeroize", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -5171,9 +5195,9 @@ dependencies = [ [[package]] name = "sparse-merkle-tree" version = "0.3.1-pre" -source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=e086b235ed6e68929bf73f617dd61cd17b000a56#e086b235ed6e68929bf73f617dd61cd17b000a56" +source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=df7ec062e7c40d5e76b136064e9aaf8bd2490750#df7ec062e7c40d5e76b136064e9aaf8bd2490750" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "cfg-if 1.0.0", "ics23", "sha2 0.9.9", @@ -5297,6 +5321,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6eef0000c4a12ecdfd7873ea84a8b5aab5e44db72e38e07b028a25386f29a5" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.16", +] + [[package]] name = "tap" version = "1.0.1" @@ -5767,7 +5803,7 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" name = "tx_template" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "getrandom 0.2.9", "namada_tests", "namada_tx_prelude", @@ -5890,7 +5926,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" name = "vp_template" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "getrandom 0.2.9", "namada_tests", "namada_vp_prelude", @@ -6670,8 +6706,8 @@ dependencies = [ [[package]] name = "zcash_encoding" -version = "0.0.0" -source = "git+https://github.com/zcash/librustzcash?rev=43c18d0#43c18d000fcbe45363b2d53585d5102841eff99e" +version = "0.2.0" +source = "git+https://github.com/zcash/librustzcash?rev=bd7f9d7#bd7f9d7c3ce5cfd14af169ffe0e1c5c903162f46" dependencies = [ "byteorder", "nonempty", diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 8e3bc2bb20..10c899ecae 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -37,18 +37,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if 1.0.0", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - [[package]] name = "aes" version = "0.8.3" @@ -377,15 +365,15 @@ checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" [[package]] name = "bellman" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4dd656ef4fdf7debb6d87d4dd92642fcbcdb78cbf6600c13e25c87e4d1a3807" +checksum = "9afceed28bac7f9f5a508bca8aeeff51cdfa4770c0b967ac55c621e2ddfd6171" dependencies = [ "bitvec 1.0.1", "blake2s_simd", "byteorder", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "pairing", "rand_core 0.6.4", "subtle 2.4.1", @@ -411,14 +399,14 @@ dependencies = [ [[package]] name = "bip0039" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0830ae4cc96b0617cc912970c2b17e89456fecbf55e8eed53a956f37ab50c41" +checksum = "bef0f0152ec5cf17f49a5866afaa3439816207fd4f0a224c0211ffaf5e278426" dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.9.0", + "hmac 0.12.1", + "pbkdf2 0.10.1", "rand 0.8.5", - "sha2 0.9.9", + "sha2 0.10.6", "unicode-normalization", "zeroize", ] @@ -517,7 +505,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -541,16 +529,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-modes" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb03d1bed155d89dce0f845b7899b18a9a163e148fd004e1c28421a783e2d8e" -dependencies = [ - "block-padding 0.2.1", - "cipher 0.3.0", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -560,20 +538,14 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "bls12_381" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c196a77437e7cc2fb515ce413a6401291578b5afc8ecb29a3c7ab957f05941" +checksum = "d7bc6d6292be3a19e6379786dac800f551e5865a5bb51ebbe3064ab80433f403" dependencies = [ - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "pairing", "rand_core 0.6.4", "subtle 2.4.1", @@ -581,10 +553,11 @@ dependencies = [ [[package]] name = "borsh" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" dependencies = [ - "borsh-derive 0.9.4", + "borsh-derive 0.9.3", "hashbrown 0.11.2", ] @@ -598,13 +571,24 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "borsh" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41898277107b0d3f027593697912977397eba6ac39a55bdd2eb02c1d5d5013b5" +dependencies = [ + "borsh-derive 1.0.0-alpha.4", + "cfg_aliases", +] + [[package]] name = "borsh-derive" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" dependencies = [ - "borsh-derive-internal 0.9.4", - "borsh-schema-derive-internal 0.9.4", + "borsh-derive-internal 0.9.3", + "borsh-schema-derive-internal 0.9.3", "proc-macro-crate 0.1.5", "proc-macro2", "syn 1.0.109", @@ -623,10 +607,25 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413cb435569fe499e89235f758304e0e7198016baa351d8f5827ea0f40526ce0" +dependencies = [ + "once_cell", + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 2.0.16", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ "proc-macro2", "quote", @@ -644,10 +643,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-ext" +version = "1.0.0-alpha.4" +source = "git+https://github.com/heliaxdev/borsh-ext?tag=v1.0.0-alpha.4#6bebf357002f96574ac37a28f547b6c88e91b799" +dependencies = [ + "borsh 1.0.0-alpha.4", +] + [[package]] name = "borsh-schema-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ "proc-macro2", "quote", @@ -761,6 +769,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "cc" version = "1.0.79" @@ -779,6 +796,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" version = "0.8.2" @@ -1200,16 +1223,6 @@ dependencies = [ "subtle 2.4.1", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.4.1", -] - [[package]] name = "ct-codecs" version = "1.1.1" @@ -1507,9 +1520,9 @@ dependencies = [ "base16ct", "crypto-bigint", "digest 0.10.6", - "ff 0.13.0", + "ff", "generic-array 0.14.7", - "group 0.13.0", + "group", "pkcs8", "rand_core 0.6.4", "sec1", @@ -1627,7 +1640,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ - "aes 0.8.3", + "aes", "ctr", "digest 0.10.6", "hex", @@ -1985,14 +1998,14 @@ dependencies = [ "bincode", "blake2", "blake2b_simd", - "borsh 0.9.4", + "borsh 0.9.3", "digest 0.10.6", "ed25519-dalek", "either", "ferveo-common", "group-threshold-cryptography", "hex", - "itertools", + "itertools 0.10.5", "measure_time", "miracl_core", "num 0.4.0", @@ -2019,23 +2032,13 @@ dependencies = [ "serde_bytes", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "bitvec 1.0.1", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "ff" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec 1.0.1", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2085,12 +2088,12 @@ dependencies = [ [[package]] name = "fpe" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd910db5f9ca4dc3116f8c46367825807aa2b942f72565f16b4be0b208a00a9e" +checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" dependencies = [ - "block-modes", - "cipher 0.3.0", + "cbc", + "cipher 0.4.4", "libm", "num-bigint 0.4.3", "num-integer", @@ -2296,25 +2299,14 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "memuse", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.0", + "ff", + "memuse", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2334,7 +2326,7 @@ dependencies = [ "blake2b_simd", "chacha20 0.8.2", "hex", - "itertools", + "itertools 0.10.5", "miracl_core", "rand 0.8.5", "rand_core 0.6.4", @@ -2493,16 +2485,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.12.1" @@ -2804,10 +2786,10 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "index-set" -version = "0.7.1" -source = "git+https://github.com/heliaxdev/index-set?tag=v0.7.1#dc24cdbbe3664514d59f1a4c4031863fc565f1c2" +version = "0.8.0" +source = "git+https://github.com/heliaxdev/index-set?tag=v0.8.0#0c218cc300c1bb7a1acf34f21b6e9d489df5fda8" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "serde", ] @@ -2900,6 +2882,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.6" @@ -2917,14 +2908,14 @@ dependencies = [ [[package]] name = "jubjub" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a575df5f985fe1cd5b2b05664ff6accfc46559032b954529fd225a2168d27b0f" +checksum = "8499f7a74008aafbecb2a2e608a3e13e4dd3e84df198b604451efe93f2de6e61" dependencies = [ "bitvec 1.0.1", "bls12_381", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -3101,9 +3092,9 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "0.2.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=449a7295fe24d96456ece24c223ca9eb76b0e6ba#449a7295fe24d96456ece24c223ca9eb76b0e6ba" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "chacha20 0.9.1", "chacha20poly1305", "cipher 0.4.4", @@ -3114,19 +3105,19 @@ dependencies = [ [[package]] name = "masp_primitives" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=449a7295fe24d96456ece24c223ca9eb76b0e6ba#449a7295fe24d96456ece24c223ca9eb76b0e6ba" dependencies = [ - "aes 0.7.5", + "aes", "bip0039", "bitvec 1.0.1", "blake2b_simd", "blake2s_simd", "bls12_381", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "byteorder", - "ff 0.12.1", + "ff", "fpe", - "group 0.12.1", + "group", "hex", "incrementalmerkletree", "jubjub", @@ -3137,7 +3128,7 @@ dependencies = [ "num-traits", "rand 0.8.5", "rand_core 0.6.4", - "sha2 0.9.9", + "sha2 0.10.6", "subtle 2.4.1", "zcash_encoding", ] @@ -3145,15 +3136,15 @@ dependencies = [ [[package]] name = "masp_proofs" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=449a7295fe24d96456ece24c223ca9eb76b0e6ba#449a7295fe24d96456ece24c223ca9eb76b0e6ba" dependencies = [ "bellman", "blake2b_simd", "bls12_381", "directories", "getrandom 0.2.9", - "group 0.12.1", - "itertools", + "group", + "itertools 0.11.0", "jubjub", "lazy_static", "masp_primitives", @@ -3315,7 +3306,8 @@ version = "0.23.0" dependencies = [ "async-trait", "bimap", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "circular-queue", "clru", "data-encoding", @@ -3325,7 +3317,7 @@ dependencies = [ "ethers", "eyre", "futures", - "itertools", + "itertools 0.10.5", "loupe", "masp_primitives", "masp_proofs", @@ -3376,7 +3368,8 @@ dependencies = [ "ark-ec", "ark-serialize", "bech32 0.8.1", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "chrono", "data-encoding", "derivative", @@ -3392,7 +3385,7 @@ dependencies = [ "ics23", "impl-num-traits", "index-set", - "itertools", + "itertools 0.10.5", "libsecp256k1 0.7.0", "masp_primitives", "namada_macros", @@ -3425,10 +3418,11 @@ dependencies = [ name = "namada_ethereum_bridge" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "ethers", "eyre", - "itertools", + "itertools 0.10.5", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -3454,7 +3448,7 @@ dependencies = [ name = "namada_proof_of_stake" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "data-encoding", "derivative", "namada_core", @@ -3468,7 +3462,7 @@ dependencies = [ name = "namada_test_utils" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "namada_core", "strum", ] @@ -3507,7 +3501,8 @@ dependencies = [ name = "namada_tx_prelude" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "masp_primitives", "namada_core", "namada_macros", @@ -3521,7 +3516,7 @@ dependencies = [ name = "namada_vm_env" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "masp_primitives", "namada_core", ] @@ -3530,7 +3525,8 @@ dependencies = [ name = "namada_vp_prelude" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -3543,7 +3539,7 @@ dependencies = [ name = "namada_wasm_for_tests" version = "0.23.0" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "getrandom 0.2.9", "namada_test_utils", "namada_tests", @@ -3761,9 +3757,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -3828,11 +3824,11 @@ checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "pairing" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group 0.12.1", + "group", ] [[package]] @@ -3912,6 +3908,19 @@ dependencies = [ "subtle 2.4.1", ] +[[package]] +name = "pasta_curves" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" +dependencies = [ + "ff", + "group", + "rand 0.8.5", + "static_assertions", + "subtle 2.4.1", +] + [[package]] name = "paste" version = "1.0.12" @@ -3929,11 +3938,11 @@ dependencies = [ [[package]] name = "pbkdf2" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" +checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" dependencies = [ - "crypto-mac 0.11.1", + "digest 0.10.6", "password-hash", ] @@ -4202,7 +4211,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -4223,7 +4232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -4390,21 +4399,36 @@ dependencies = [ ] [[package]] -name = "redjubjub" -version = "0.5.0" +name = "reddsa" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6039ff156887caf92df308cbaccdc058c9d3155a913da046add6e48c4cdbd91d" +checksum = "78a5191930e84973293aa5f532b513404460cd2216c1cfb76d08748c15b40b02" dependencies = [ "blake2b_simd", "byteorder", - "digest 0.9.0", + "group", + "hex", "jubjub", + "pasta_curves", "rand_core 0.6.4", "serde", "thiserror", "zeroize", ] +[[package]] +name = "redjubjub" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a60db2c3bc9c6fd1e8631fee75abc008841d27144be744951d6b9b75f9b569c" +dependencies = [ + "rand_core 0.6.4", + "reddsa", + "serde", + "thiserror", + "zeroize", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -5164,9 +5188,9 @@ dependencies = [ [[package]] name = "sparse-merkle-tree" version = "0.3.1-pre" -source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=e086b235ed6e68929bf73f617dd61cd17b000a56#e086b235ed6e68929bf73f617dd61cd17b000a56" +source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=df7ec062e7c40d5e76b136064e9aaf8bd2490750#df7ec062e7c40d5e76b136064e9aaf8bd2490750" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "cfg-if 1.0.0", "ics23", "sha2 0.9.9", @@ -5290,6 +5314,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6eef0000c4a12ecdfd7873ea84a8b5aab5e44db72e38e07b028a25386f29a5" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.16", +] + [[package]] name = "tap" version = "1.0.1" @@ -6641,8 +6677,8 @@ dependencies = [ [[package]] name = "zcash_encoding" -version = "0.0.0" -source = "git+https://github.com/zcash/librustzcash?rev=43c18d0#43c18d000fcbe45363b2d53585d5102841eff99e" +version = "0.2.0" +source = "git+https://github.com/zcash/librustzcash?rev=bd7f9d7#bd7f9d7c3ce5cfd14af169ffe0e1c5c903162f46" dependencies = [ "byteorder", "nonempty", From bbe3a675e164445a1d9cccb615f63a135395cef1 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Mon, 25 Sep 2023 11:43:20 +0100 Subject: [PATCH 068/161] Fix unit test --- ethereum_bridge/src/protocol/transactions/update.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethereum_bridge/src/protocol/transactions/update.rs b/ethereum_bridge/src/protocol/transactions/update.rs index 69530c0315..d14c20ec35 100644 --- a/ethereum_bridge/src/protocol/transactions/update.rs +++ b/ethereum_bridge/src/protocol/transactions/update.rs @@ -53,7 +53,7 @@ mod tests { fn test_value() -> Result<()> { let key = storage::Key::parse("some arbitrary key") .expect("could not set up test"); - let value = 21u64; + let value = 21i32; let mut wl_storage = TestWlStorage::default(); let serialized = value.serialize_to_vec(); wl_storage From f0a9720cdcb9b9a2d7542799cf76b99b829b4dd2 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Mon, 25 Sep 2023 11:44:30 +0100 Subject: [PATCH 069/161] Changelog for #1930 --- .changelog/unreleased/improvements/1930-update-borsh.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 .changelog/unreleased/improvements/1930-update-borsh.md diff --git a/.changelog/unreleased/improvements/1930-update-borsh.md b/.changelog/unreleased/improvements/1930-update-borsh.md new file mode 100644 index 0000000000..e7fb8b994e --- /dev/null +++ b/.changelog/unreleased/improvements/1930-update-borsh.md @@ -0,0 +1 @@ +- Migrate to upstream borsh ([\#1930](https://github.com/anoma/namada/pull/1930)) \ No newline at end of file From ac2535a4b148e5a248b179a0bbb19496bc1ec030 Mon Sep 17 00:00:00 2001 From: yito88 Date: Fri, 6 Oct 2023 14:13:35 +0200 Subject: [PATCH 070/161] add changelog --- .changelog/unreleased/improvements/1917-ibc-shielded-actions.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1917-ibc-shielded-actions.md diff --git a/.changelog/unreleased/improvements/1917-ibc-shielded-actions.md b/.changelog/unreleased/improvements/1917-ibc-shielded-actions.md new file mode 100644 index 0000000000..d9ec6743d0 --- /dev/null +++ b/.changelog/unreleased/improvements/1917-ibc-shielded-actions.md @@ -0,0 +1,2 @@ +- IBC transfer to a payment address + ([\#1917](https://github.com/anoma/namada/issues/1917)) \ No newline at end of file From b1bc8450eca547827010532d50375a23c80e75a3 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Wed, 4 Oct 2023 06:57:00 +0200 Subject: [PATCH 071/161] Reintegrated generic IO support. --- apps/src/bin/namada-client/main.rs | 1 + apps/src/bin/namada-relayer/main.rs | 3 +- apps/src/bin/namada-wallet/main.rs | 2 +- apps/src/lib/cli.rs | 1 + apps/src/lib/cli/api.rs | 6 +- apps/src/lib/cli/client.rs | 305 +++-- apps/src/lib/cli/context.rs | 16 +- apps/src/lib/cli/relayer.rs | 52 +- apps/src/lib/cli/utils.rs | 3 +- apps/src/lib/cli/wallet.rs | 261 ++-- apps/src/lib/client/rpc.rs | 1135 +++++++++-------- apps/src/lib/client/tx.rs | 474 ++++--- .../lib/node/ledger/shell/testing/client.rs | 6 +- .../lib/node/ledger/shell/testing/utils.rs | 16 +- apps/src/lib/wallet/mod.rs | 4 +- benches/lib.rs | 23 +- shared/src/ledger/eth_bridge.rs | 14 +- shared/src/ledger/eth_bridge/bridge_pool.rs | 270 ++-- shared/src/ledger/eth_bridge/validator_set.rs | 83 +- shared/src/ledger/mod.rs | 104 +- shared/src/sdk/args.rs | 64 +- shared/src/sdk/masp.rs | 84 +- shared/src/sdk/rpc.rs | 125 +- shared/src/sdk/signing.rs | 122 +- shared/src/sdk/tx.rs | 383 +++--- shared/src/types/io.rs | 46 +- 26 files changed, 1782 insertions(+), 1821 deletions(-) diff --git a/apps/src/bin/namada-client/main.rs b/apps/src/bin/namada-client/main.rs index 9b43ca8f91..167674f65e 100644 --- a/apps/src/bin/namada-client/main.rs +++ b/apps/src/bin/namada-client/main.rs @@ -16,6 +16,7 @@ async fn main() -> Result<()> { CliApi::::handle_client_command::( None, cli::namada_client_cli()?, + &CliIo, ) .await } diff --git a/apps/src/bin/namada-relayer/main.rs b/apps/src/bin/namada-relayer/main.rs index 05d2620bcb..ef5e05f913 100644 --- a/apps/src/bin/namada-relayer/main.rs +++ b/apps/src/bin/namada-relayer/main.rs @@ -14,5 +14,6 @@ async fn main() -> Result<()> { let cmd = cli::namada_relayer_cli()?; // run the CLI - CliApi::::handle_relayer_command::(None, cmd).await + CliApi::::handle_relayer_command::(None, cmd, &CliIo) + .await } diff --git a/apps/src/bin/namada-wallet/main.rs b/apps/src/bin/namada-wallet/main.rs index 5e94831716..987e9d2699 100644 --- a/apps/src/bin/namada-wallet/main.rs +++ b/apps/src/bin/namada-wallet/main.rs @@ -6,5 +6,5 @@ pub fn main() -> Result<()> { color_eyre::install()?; let (cmd, ctx) = cli::namada_wallet_cli()?; // run the CLI - CliApi::::handle_wallet_command(cmd, ctx) + CliApi::::handle_wallet_command(cmd, ctx, &CliIo) } diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 531027a102..ce5bf600b5 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -2534,6 +2534,7 @@ pub mod args { use super::context::*; use super::utils::*; use super::{ArgGroup, ArgMatches}; + use crate::cli::context::FromContext; use crate::config::{self, Action, ActionAtHeight}; use crate::facade::tendermint::Timeout; use crate::facade::tendermint_config::net::Address as TendermintAddress; diff --git a/apps/src/lib/cli/api.rs b/apps/src/lib/cli/api.rs index bb387c5d9a..052a834f55 100644 --- a/apps/src/lib/cli/api.rs +++ b/apps/src/lib/cli/api.rs @@ -13,7 +13,7 @@ use crate::client::utils; #[async_trait::async_trait(?Send)] pub trait CliClient: Client + Sync { fn from_tendermint_address(address: &mut TendermintAddress) -> Self; - async fn wait_until_node_is_synced(&self) -> Halt<()>; + async fn wait_until_node_is_synced(&self, io: &impl Io) -> Halt<()>; } #[async_trait::async_trait(?Send)] @@ -22,8 +22,8 @@ impl CliClient for HttpClient { HttpClient::new(utils::take_config_address(address)).unwrap() } - async fn wait_until_node_is_synced(&self) -> Halt<()> { - wait_until_node_is_synched::<_, IO>(self).await + async fn wait_until_node_is_synced(&self, io: &impl Io) -> Halt<()> { + wait_until_node_is_synched(self, io).await } } diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index 449d4f38ce..ac1ca1e34d 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -1,11 +1,9 @@ use color_eyre::eyre::{eyre, Report, Result}; - -use namada::sdk::tx::dump_tx; +use namada::ledger::{Namada, NamadaImpl}; use namada::sdk::signing; +use namada::sdk::tx::dump_tx; use namada::types::control_flow::ProceedOrElse; use namada::types::io::Io; -use namada::ledger::NamadaImpl; -use namada::ledger::Namada; use crate::cli; use crate::cli::api::{CliApi, CliClient}; @@ -21,6 +19,7 @@ impl CliApi { pub async fn handle_client_command( client: Option, cmd: cli::NamadaClient, + io: &IO, ) -> Result<()> where C: CliClient, @@ -38,19 +37,22 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); + let namada = ctx.to_sdk(&client, io); let dry_run = args.tx.dry_run || args.tx.dry_run_wrapper; - tx::submit_custom::<_, IO>(&client, &mut ctx, args) - .await?; + tx::submit_custom(&namada, args).await?; if !dry_run { - crate::wallet::save(&ctx.wallet) + namada + .wallet() + .await + .save() .unwrap_or_else(|err| eprintln!("{}", err)); } else { - IO::println( + io.println( "Transaction dry run. No addresses have been \ saved.", ) @@ -63,12 +65,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_transfer::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_transfer(&namada, args).await?; } Sub::TxIbcTransfer(TxIbcTransfer(mut args)) => { let client = client.unwrap_or_else(|| { @@ -77,12 +79,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_ibc_transfer::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_ibc_transfer(&namada, args).await?; } Sub::TxUpdateAccount(TxUpdateAccount(mut args)) => { let client = client.unwrap_or_else(|| { @@ -91,14 +93,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_update_account::<_, IO>( - &client, &mut ctx, args, - ) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_update_account(&namada, args).await?; } Sub::TxInitAccount(TxInitAccount(mut args)) => { let client = client.unwrap_or_else(|| { @@ -107,21 +107,22 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); + let namada = ctx.to_sdk(&client, io); let dry_run = args.tx.dry_run || args.tx.dry_run_wrapper; - tx::submit_init_account::<_, IO>( - &client, &mut ctx, args, - ) - .await?; + tx::submit_init_account(&namada, args).await?; if !dry_run { - crate::wallet::save(&ctx.wallet) + namada + .wallet() + .await + .save() .unwrap_or_else(|err| eprintln!("{}", err)); } else { - IO::println( + io.println( "Transaction dry run. No addresses have been \ saved.", ) @@ -134,12 +135,22 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_init_validator::<_, IO>(&client, ctx, args) - .await?; + let namada = NamadaImpl::new( + &client, + &mut ctx.wallet, + &mut ctx.shielded, + io, + ); + tx::submit_init_validator( + &namada, + &mut ctx.config, + args, + ) + .await?; } Sub::TxInitProposal(TxInitProposal(mut args)) => { let client = client.unwrap_or_else(|| { @@ -148,12 +159,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_init_proposal::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_init_proposal(&namada, args).await?; } Sub::TxVoteProposal(TxVoteProposal(mut args)) => { let client = client.unwrap_or_else(|| { @@ -162,12 +173,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_vote_proposal::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_vote_proposal(&namada, args).await?; } Sub::TxRevealPk(TxRevealPk(mut args)) => { let client = client.unwrap_or_else(|| { @@ -176,12 +187,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_reveal_pk::<_, IO>(&client, &mut ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_reveal_pk(&namada, args).await?; } Sub::Bond(Bond(mut args)) => { let client = client.unwrap_or_else(|| { @@ -190,12 +201,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_bond::<_, IO>(&client, &mut ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_bond(&namada, args).await?; } Sub::Unbond(Unbond(mut args)) => { let client = client.unwrap_or_else(|| { @@ -204,12 +215,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_unbond::<_, IO>(&client, &mut ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_unbond(&namada, args).await?; } Sub::Withdraw(Withdraw(mut args)) => { let client = client.unwrap_or_else(|| { @@ -218,12 +229,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_withdraw::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_withdraw(&namada, args).await?; } Sub::TxCommissionRateChange(TxCommissionRateChange( mut args, @@ -234,14 +245,13 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_validator_commission_change::<_, IO>( - &client, ctx, args, - ) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_validator_commission_change(&namada, args) + .await?; } // Eth bridge Sub::AddToEthBridgePool(args) => { @@ -252,25 +262,19 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); + let namada = ctx.to_sdk(&client, io); let tx_args = args.tx.clone(); - - let namada = NamadaImpl::new( - &client, - &mut ctx.wallet, - &mut ctx.shielded, - ); - let (mut tx, signing_data, _epoch) = args.clone().build(&namada).await?; signing::generate_test_vector(&namada, &tx).await?; if args.tx.dump_tx { - dump_tx::(&args.tx, tx); + dump_tx::(io, &args.tx, tx); } else { tx::submit_reveal_aux( &namada, @@ -293,14 +297,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_unjail_validator::<_, IO>( - &client, ctx, args, - ) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_unjail_validator(&namada, args).await?; } Sub::TxUpdateStewardCommission( TxUpdateStewardCommission(mut args), @@ -311,14 +313,13 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_update_steward_commission::<_, IO>( - &client, ctx, args, - ) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_update_steward_commission(&namada, args) + .await?; } Sub::TxResignSteward(TxResignSteward(mut args)) => { let client = client.unwrap_or_else(|| { @@ -327,12 +328,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_resign_steward::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_resign_steward(&namada, args).await?; } // Ledger queries Sub::QueryEpoch(QueryEpoch(mut args)) => { @@ -340,10 +341,11 @@ impl CliApi { C::from_tendermint_address(&mut args.ledger_address) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; - rpc::query_and_print_epoch::<_, IO>(&client).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_and_print_epoch(&namada).await; } Sub::QueryValidatorState(QueryValidatorState(mut args)) => { let client = client.unwrap_or_else(|| { @@ -352,16 +354,13 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_and_print_validator_state::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_and_print_validator_state(&namada, args) + .await; } Sub::QueryTransfers(QueryTransfers(mut args)) => { let client = client.unwrap_or_else(|| { @@ -370,17 +369,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_transfers::<_, _, IO>( - &client, - &mut ctx.wallet, - &mut ctx.shielded, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_transfers(&namada, args).await; } Sub::QueryConversions(QueryConversions(mut args)) => { let client = client.unwrap_or_else(|| { @@ -389,26 +383,23 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_conversions::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_conversions(&namada, args).await; } Sub::QueryBlock(QueryBlock(mut args)) => { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut args.ledger_address) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; - rpc::query_block::<_, IO>(&client).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_block(&namada).await; } Sub::QueryBalance(QueryBalance(mut args)) => { let client = client.unwrap_or_else(|| { @@ -417,17 +408,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_balance::<_, _, IO>( - &client, - &mut ctx.wallet, - &mut ctx.shielded, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_balance(&namada, args).await; } Sub::QueryBonds(QueryBonds(mut args)) => { let client = client.unwrap_or_else(|| { @@ -436,17 +422,14 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_bonds::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await - .expect("expected successful query of bonds"); + let namada = ctx.to_sdk(&client, io); + rpc::query_bonds(&namada, args) + .await + .expect("expected successful query of bonds"); } Sub::QueryBondedStake(QueryBondedStake(mut args)) => { let client = client.unwrap_or_else(|| { @@ -455,11 +438,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_bonded_stake::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_bonded_stake(&namada, args).await; } Sub::QueryCommissionRate(QueryCommissionRate(mut args)) => { let client = client.unwrap_or_else(|| { @@ -468,16 +452,13 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_and_print_commission_rate::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_and_print_commission_rate(&namada, args) + .await; } Sub::QuerySlashes(QuerySlashes(mut args)) => { let client = client.unwrap_or_else(|| { @@ -486,16 +467,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_slashes::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_slashes(&namada, args).await; } Sub::QueryDelegations(QueryDelegations(mut args)) => { let client = client.unwrap_or_else(|| { @@ -504,16 +481,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_delegations::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_delegations(&namada, args).await; } Sub::QueryFindValidator(QueryFindValidator(mut args)) => { let client = client.unwrap_or_else(|| { @@ -522,11 +495,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_find_validator::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_find_validator(&namada, args).await; } Sub::QueryResult(QueryResult(mut args)) => { let client = client.unwrap_or_else(|| { @@ -535,11 +509,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_result::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_result(&namada, args).await; } Sub::QueryRawBytes(QueryRawBytes(mut args)) => { let client = client.unwrap_or_else(|| { @@ -548,11 +523,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_raw_bytes::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_raw_bytes(&namada, args).await; } Sub::QueryProposal(QueryProposal(mut args)) => { let client = client.unwrap_or_else(|| { @@ -561,11 +537,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_proposal::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_proposal(&namada, args).await; } Sub::QueryProposalResult(QueryProposalResult(mut args)) => { let client = client.unwrap_or_else(|| { @@ -574,12 +551,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_proposal_result::<_, IO>(&client, args) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_proposal_result(&namada, args).await; } Sub::QueryProtocolParameters(QueryProtocolParameters( mut args, @@ -590,12 +567,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_protocol_parameters::<_, IO>(&client, args) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_protocol_parameters(&namada, args).await; } Sub::QueryPgf(QueryPgf(mut args)) => { let client = client.unwrap_or_else(|| { @@ -604,11 +581,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_pgf::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_pgf(&namada, args).await; } Sub::QueryAccount(QueryAccount(mut args)) => { let client = client.unwrap_or_else(|| { @@ -617,11 +595,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_account::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_account(&namada, args).await; } Sub::SignTx(SignTx(mut args)) => { let client = client.unwrap_or_else(|| { @@ -630,11 +609,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::sign_tx::<_, IO>(&client, &mut ctx, args).await?; + let namada = ctx.to_sdk(&client, io); + tx::sign_tx(&namada, args).await?; } } } @@ -668,11 +648,12 @@ impl CliApi { let client = C::from_tendermint_address(&mut ledger_address); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::epoch_sleep::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::epoch_sleep(&namada, args).await; } }, } diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index f2efec7fee..4772ef98b9 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -6,9 +6,10 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use color_eyre::eyre::Result; +use namada::ledger::{Namada, NamadaImpl}; +use namada::sdk::masp::fs::FsShieldedUtils; use namada::sdk::masp::ShieldedContext; use namada::sdk::wallet::Wallet; -use namada::sdk::masp::fs::FsShieldedUtils; use namada::types::address::{Address, InternalAddress}; use namada::types::chain::ChainId; use namada::types::ethereum_events::EthAddress; @@ -150,6 +151,19 @@ impl Context { }) } + /// Make an implementation of Namada from this object and parameters. + pub fn to_sdk<'a, C, IO>( + &'a mut self, + client: &'a C, + io: &'a IO, + ) -> impl Namada + where + C: namada::ledger::queries::Client + Sync, + IO: Io, + { + NamadaImpl::new(client, &mut self.wallet, &mut self.shielded, io) + } + /// Parse and/or look-up the value from the context. pub fn get(&self, from_context: &FromContext) -> T where diff --git a/apps/src/lib/cli/relayer.rs b/apps/src/lib/cli/relayer.rs index 3322e84e2f..d94fd5a09d 100644 --- a/apps/src/lib/cli/relayer.rs +++ b/apps/src/lib/cli/relayer.rs @@ -19,6 +19,7 @@ impl CliApi { pub async fn handle_relayer_command( client: Option, cmd: cli::NamadaRelayer, + io: &IO, ) -> Result<()> where C: CliClient, @@ -36,11 +37,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - bridge_pool::recommend_batch::<_, IO>(&client, args) + let namada = ctx.to_sdk(&client, io); + bridge_pool::recommend_batch(&namada, args) .await .proceed_or_else(error)?; } @@ -56,11 +58,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk_ctxless(); - bridge_pool::construct_proof::<_, IO>(&client, args) + bridge_pool::construct_proof(&client, io, args) .await .proceed_or_else(error)?; } @@ -71,7 +73,7 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let eth_client = Arc::new( @@ -79,8 +81,8 @@ impl CliApi { .unwrap(), ); let args = args.to_sdk_ctxless(); - bridge_pool::relay_bridge_pool_proof::<_, _, IO>( - eth_client, &client, args, + bridge_pool::relay_bridge_pool_proof( + eth_client, &client, io, args, ) .await .proceed_or_else(error)?; @@ -92,10 +94,10 @@ impl CliApi { C::from_tendermint_address(&mut query.ledger_address) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; - bridge_pool::query_bridge_pool::<_, IO>(&client).await; + bridge_pool::query_bridge_pool(&client, io).await; } EthBridgePoolWithoutCtx::QuerySigned( QuerySignedBridgePool(mut query), @@ -104,10 +106,10 @@ impl CliApi { C::from_tendermint_address(&mut query.ledger_address) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; - bridge_pool::query_signed_bridge_pool::<_, IO>(&client) + bridge_pool::query_signed_bridge_pool(&client, io) .await .proceed_or_else(error)?; } @@ -118,10 +120,10 @@ impl CliApi { C::from_tendermint_address(&mut query.ledger_address) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; - bridge_pool::query_relay_progress::<_, IO>(&client).await; + bridge_pool::query_relay_progress(&client, io).await; } }, cli::NamadaRelayer::ValidatorSet(sub) => match sub { @@ -134,12 +136,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk_ctxless(); - validator_set::query_bridge_validator_set::<_, IO>( - &client, args, + validator_set::query_bridge_validator_set( + &client, io, args, ) .await; } @@ -152,12 +154,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk_ctxless(); - validator_set::query_governnace_validator_set::<_, IO>( - &client, args, + validator_set::query_governnace_validator_set( + &client, io, args, ) .await; } @@ -170,12 +172,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let args = args.to_sdk_ctxless(); - validator_set::query_validator_set_update_proof::<_, IO>( - &client, args, + validator_set::query_validator_set_update_proof( + &client, io, args, ) .await; } @@ -188,7 +190,7 @@ impl CliApi { ) }); client - .wait_until_node_is_synced::() + .wait_until_node_is_synced(io) .await .proceed_or_else(error)?; let eth_client = Arc::new( @@ -196,8 +198,8 @@ impl CliApi { .unwrap(), ); let args = args.to_sdk_ctxless(); - validator_set::relay_validator_set_update::<_, _, IO>( - eth_client, &client, args, + validator_set::relay_validator_set_update( + eth_client, &client, io, args, ) .await .proceed_or_else(error)?; diff --git a/apps/src/lib/cli/utils.rs b/apps/src/lib/cli/utils.rs index 26cc38ff7f..7c8bc4100c 100644 --- a/apps/src/lib/cli/utils.rs +++ b/apps/src/lib/cli/utils.rs @@ -8,8 +8,9 @@ use clap::{ArgAction, ArgMatches}; use color_eyre::eyre::Result; use super::args; -use super::context::{Context, FromContext}; +use super::context::Context; use crate::cli::api::CliIo; +use crate::cli::context::FromContext; // We only use static strings pub type App = clap::Command; diff --git a/apps/src/lib/cli/wallet.rs b/apps/src/lib/cli/wallet.rs index 33b443edd9..5dc223cd64 100644 --- a/apps/src/lib/cli/wallet.rs +++ b/apps/src/lib/cli/wallet.rs @@ -8,11 +8,15 @@ use color_eyre::eyre::Result; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; use namada::sdk::masp::find_valid_diversifier; -use namada::sdk::wallet::{DecryptionError, FindKeyError, GenRestoreKeyError}; +use namada::sdk::wallet::{ + DecryptionError, FindKeyError, GenRestoreKeyError, Wallet, WalletIo, + WalletStorage, +}; use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::{MaspValue, PaymentAddress}; use namada::{display, display_line, edisplay_line}; +use rand::RngCore; use rand_core::OsRng; use crate::cli; @@ -25,61 +29,62 @@ impl CliApi { pub fn handle_wallet_command( cmd: cmds::NamadaWallet, mut ctx: Context, + io: &impl Io, ) -> Result<()> { match cmd { cmds::NamadaWallet::Key(sub) => match sub { cmds::WalletKey::Restore(cmds::KeyRestore(args)) => { - key_and_address_restore::(ctx, args) + key_and_address_restore(&mut ctx.wallet, io, args) } cmds::WalletKey::Gen(cmds::KeyGen(args)) => { - key_and_address_gen::(ctx, args) + key_and_address_gen(&mut ctx.wallet, io, &mut OsRng, args) } cmds::WalletKey::Find(cmds::KeyFind(args)) => { - key_find::(ctx, args) + key_find(&mut ctx.wallet, io, args) } cmds::WalletKey::List(cmds::KeyList(args)) => { - key_list::(ctx, args) + key_list(&mut ctx.wallet, io, args) } cmds::WalletKey::Export(cmds::Export(args)) => { - key_export::(ctx, args) + key_export(&mut ctx.wallet, io, args) } }, cmds::NamadaWallet::Address(sub) => match sub { cmds::WalletAddress::Gen(cmds::AddressGen(args)) => { - key_and_address_gen::(ctx, args) + key_and_address_gen(&mut ctx.wallet, io, &mut OsRng, args) } cmds::WalletAddress::Restore(cmds::AddressRestore(args)) => { - key_and_address_restore::(ctx, args) + key_and_address_restore(&mut ctx.wallet, io, args) } cmds::WalletAddress::Find(cmds::AddressOrAliasFind(args)) => { - address_or_alias_find::(ctx, args) + address_or_alias_find(&mut ctx.wallet, io, args) } cmds::WalletAddress::List(cmds::AddressList) => { - address_list::(ctx) + address_list(&mut ctx.wallet, io) } cmds::WalletAddress::Add(cmds::AddressAdd(args)) => { - address_add::(ctx, args) + address_add(&mut ctx.wallet, io, args) } }, cmds::NamadaWallet::Masp(sub) => match sub { cmds::WalletMasp::GenSpendKey(cmds::MaspGenSpendKey(args)) => { - spending_key_gen::(ctx, args) + spending_key_gen(&mut ctx.wallet, io, args) } cmds::WalletMasp::GenPayAddr(cmds::MaspGenPayAddr(args)) => { let args = args.to_sdk(&mut ctx); - payment_address_gen::(ctx, args) + payment_address_gen(&mut ctx.wallet, io, args) } cmds::WalletMasp::AddAddrKey(cmds::MaspAddAddrKey(args)) => { - address_key_add::(ctx, args) + address_key_add(&mut ctx.wallet, io, args) } cmds::WalletMasp::ListPayAddrs(cmds::MaspListPayAddrs) => { - payment_addresses_list::(ctx) + payment_addresses_list(&mut ctx.wallet, io) } cmds::WalletMasp::ListKeys(cmds::MaspListKeys(args)) => { - spending_keys_list::(ctx, args) + spending_keys_list(&mut ctx.wallet, io, args) } cmds::WalletMasp::FindAddrKey(cmds::MaspFindAddrKey(args)) => { - address_key_find::(ctx, args) + address_key_find(&mut ctx.wallet, io, args) } }, } @@ -88,35 +93,35 @@ impl CliApi { } /// Find shielded address or key -fn address_key_find( - ctx: Context, +fn address_key_find( + wallet: &mut Wallet, + io: &impl Io, args::AddrKeyFind { alias, unsafe_show_secret, }: args::AddrKeyFind, ) { - let mut wallet = ctx.wallet; let alias = alias.to_lowercase(); if let Ok(viewing_key) = wallet.find_viewing_key(&alias) { // Check if alias is a viewing key - display_line!(IO, "Viewing key: {}", viewing_key); + display_line!(io, "Viewing key: {}", viewing_key); if unsafe_show_secret { // Check if alias is also a spending key match wallet.find_spending_key(&alias, None) { Ok(spending_key) => { - display_line!(IO, "Spending key: {}", spending_key) + display_line!(io, "Spending key: {}", spending_key) } Err(FindKeyError::KeyNotFound) => {} - Err(err) => edisplay_line!(IO, "{}", err), + Err(err) => edisplay_line!(io, "{}", err), } } } else if let Some(payment_addr) = wallet.find_payment_addr(&alias) { // Failing that, check if alias is a payment address - display_line!(IO, "Payment address: {}", payment_addr); + display_line!(io, "Payment address: {}", payment_addr); } else { // Otherwise alias cannot be referring to any shielded value display_line!( - IO, + io, "No shielded address or key with alias {} found. Use the commands \ `masp list-addrs` and `masp list-keys` to see all the known \ addresses and keys.", @@ -126,44 +131,44 @@ fn address_key_find( } /// List spending keys. -fn spending_keys_list( - ctx: Context, +fn spending_keys_list( + wallet: &mut Wallet, + io: &impl Io, args::MaspKeysList { decrypt, unsafe_show_secret, }: args::MaspKeysList, ) { - let wallet = ctx.wallet; let known_view_keys = wallet.get_viewing_keys(); let known_spend_keys = wallet.get_spending_keys(); if known_view_keys.is_empty() { display_line!( - IO, + io, "No known keys. Try `masp add --alias my-addr --value ...` to add \ a new key to the wallet.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, &mut w; "Known keys:").unwrap(); + display_line!(io, &mut w; "Known keys:").unwrap(); for (alias, key) in known_view_keys { - display!(IO, &mut w; " Alias \"{}\"", alias).unwrap(); + display!(io, &mut w; " Alias \"{}\"", alias).unwrap(); let spending_key_opt = known_spend_keys.get(&alias); // If this alias is associated with a spending key, indicate whether // or not the spending key is encrypted // TODO: consider turning if let into match if let Some(spending_key) = spending_key_opt { if spending_key.is_encrypted() { - display_line!(IO, &mut w; " (encrypted):") + display_line!(io, &mut w; " (encrypted):") } else { - display_line!(IO, &mut w; " (not encrypted):") + display_line!(io, &mut w; " (not encrypted):") } .unwrap(); } else { - display_line!(IO, &mut w; ":").unwrap(); + display_line!(io, &mut w; ":").unwrap(); } // Always print the corresponding viewing key - display_line!(IO, &mut w; " Viewing Key: {}", key).unwrap(); + display_line!(io, &mut w; " Viewing Key: {}", key).unwrap(); // A subset of viewing keys will have corresponding spending keys. // Print those too if they are available and requested. if unsafe_show_secret { @@ -172,7 +177,7 @@ fn spending_keys_list( // Here the spending key is unencrypted or successfully // decrypted Ok(spending_key) => { - display_line!(IO, + display_line!(io, &mut w; " Spending key: {}", spending_key, ) @@ -186,7 +191,7 @@ fn spending_keys_list( // Here the key is encrypted but incorrect password has // been provided Err(err) => { - display_line!(IO, + display_line!(io, &mut w; " Couldn't decrypt the spending key: {}", err, @@ -201,49 +206,52 @@ fn spending_keys_list( } /// List payment addresses. -fn payment_addresses_list(ctx: Context) { - let wallet = ctx.wallet; +fn payment_addresses_list( + wallet: &mut Wallet, + io: &impl Io, +) { let known_addresses = wallet.get_payment_addrs(); if known_addresses.is_empty() { display_line!( - IO, + io, "No known payment addresses. Try `masp gen-addr --alias my-addr` \ to generate a new payment address.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, &mut w; "Known payment addresses:").unwrap(); + display_line!(io, &mut w; "Known payment addresses:").unwrap(); for (alias, address) in sorted(known_addresses) { - display_line!(IO, &mut w; " \"{}\": {}", alias, address).unwrap(); + display_line!(io, &mut w; " \"{}\": {}", alias, address).unwrap(); } } } /// Generate a spending key. -fn spending_key_gen( - ctx: Context, +fn spending_key_gen( + wallet: &mut Wallet, + io: &impl Io, args::MaspSpendKeyGen { alias, alias_force, unsafe_dont_encrypt, }: args::MaspSpendKeyGen, ) { - let mut wallet = ctx.wallet; let alias = alias.to_lowercase(); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); let (alias, _key) = wallet.gen_spending_key(alias, password, alias_force); - crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); + wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); display_line!( - IO, + io, "Successfully added a spending key with alias: \"{}\"", alias ); } /// Generate a shielded payment address from the given key. -fn payment_address_gen( - ctx: Context, +fn payment_address_gen( + wallet: &mut Wallet, + io: &impl Io, args::MaspPayAddrGen { alias, alias_force, @@ -257,7 +265,6 @@ fn payment_address_gen( let payment_addr = viewing_key .to_payment_address(div) .expect("a PaymentAddress"); - let mut wallet = ctx.wallet; let alias = wallet .insert_payment_addr( alias, @@ -265,20 +272,21 @@ fn payment_address_gen( alias_force, ) .unwrap_or_else(|| { - edisplay_line!(IO, "Payment address not added"); + edisplay_line!(io, "Payment address not added"); cli::safe_exit(1); }); - crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); + wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); display_line!( - IO, + io, "Successfully generated a payment address with the following alias: {}", alias, ); } /// Add a viewing key, spending key, or payment address to wallet. -fn address_key_add( - mut ctx: Context, +fn address_key_add( + wallet: &mut Wallet, + io: &impl Io, args::MaspAddrKeyAdd { alias, alias_force, @@ -289,11 +297,10 @@ fn address_key_add( let alias = alias.to_lowercase(); let (alias, typ) = match value { MaspValue::FullViewingKey(viewing_key) => { - let alias = ctx - .wallet + let alias = wallet .insert_viewing_key(alias, viewing_key, alias_force) .unwrap_or_else(|| { - edisplay_line!(IO, "Viewing key not added"); + edisplay_line!(io, "Viewing key not added"); cli::safe_exit(1); }); (alias, "viewing key") @@ -301,8 +308,7 @@ fn address_key_add( MaspValue::ExtendedSpendingKey(spending_key) => { let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let alias = ctx - .wallet + let alias = wallet .encrypt_insert_spending_key( alias, spending_key, @@ -310,25 +316,24 @@ fn address_key_add( alias_force, ) .unwrap_or_else(|| { - edisplay_line!(IO, "Spending key not added"); + edisplay_line!(io, "Spending key not added"); cli::safe_exit(1); }); (alias, "spending key") } MaspValue::PaymentAddress(payment_addr) => { - let alias = ctx - .wallet + let alias = wallet .insert_payment_addr(alias, payment_addr, alias_force) .unwrap_or_else(|| { - edisplay_line!(IO, "Payment address not added"); + edisplay_line!(io, "Payment address not added"); cli::safe_exit(1); }); (alias, "payment address") } }; - crate::wallet::save(&ctx.wallet).unwrap_or_else(|err| eprintln!("{}", err)); + wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); display_line!( - IO, + io, "Successfully added a {} with the following alias to wallet: {}", typ, alias, @@ -337,8 +342,9 @@ fn address_key_add( /// Restore a keypair and an implicit address from the mnemonic code in the /// wallet. -fn key_and_address_restore( - ctx: Context, +fn key_and_address_restore( + wallet: &mut Wallet, + io: &impl Io, args::KeyAndAddressRestore { scheme, alias, @@ -347,7 +353,6 @@ fn key_and_address_restore( derivation_path, }: args::KeyAndAddressRestore, ) { - let mut wallet = ctx.wallet; let encryption_password = read_and_confirm_encryption_password(unsafe_dont_encrypt); let (alias, _key) = wallet @@ -360,17 +365,18 @@ fn key_and_address_restore( encryption_password, ) .unwrap_or_else(|err| { - edisplay_line!(IO, "{}", err); + edisplay_line!(io, "{}", err); cli::safe_exit(1) }) .unwrap_or_else(|| { - display_line!(IO, "No changes are persisted. Exiting."); + display_line!(io, "No changes are persisted. Exiting."); cli::safe_exit(0); }); - crate::wallet::save(&wallet) - .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); display_line!( - IO, + io, "Successfully added a key and an address with alias: \"{}\"", alias ); @@ -378,8 +384,10 @@ fn key_and_address_restore( /// Generate a new keypair and derive implicit address from it and store them in /// the wallet. -fn key_and_address_gen( - ctx: Context, +fn key_and_address_gen( + wallet: &mut Wallet>, + io: &impl Io, + rng: &mut R, args::KeyAndAddressGen { scheme, alias, @@ -388,12 +396,9 @@ fn key_and_address_gen( derivation_path, }: args::KeyAndAddressGen, ) { - let mut wallet = ctx.wallet; let encryption_password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let mut rng = OsRng; - let derivation_path_and_mnemonic_rng = - derivation_path.map(|p| (p, &mut rng)); + let derivation_path_and_mnemonic_rng = derivation_path.map(|p| (p, rng)); let (alias, _key, _mnemonic) = wallet .gen_key( scheme, @@ -413,18 +418,20 @@ fn key_and_address_gen( cli::safe_exit(1); } }); - crate::wallet::save(&wallet) - .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); display_line!( - IO, + io, "Successfully added a key and an address with alias: \"{}\"", alias ); } /// Find a keypair in the wallet store. -fn key_find( - ctx: Context, +fn key_find( + wallet: &mut Wallet, + io: &impl Io, args::KeyFind { public_key, alias, @@ -432,7 +439,6 @@ fn key_find( unsafe_show_secret, }: args::KeyFind, ) { - let mut wallet = ctx.wallet; let found_keypair = match public_key { Some(pk) => wallet.find_key_by_pk(&pk, None), None => { @@ -440,7 +446,7 @@ fn key_find( match alias { None => { edisplay_line!( - IO, + io, "An alias, public key or public key hash needs to be \ supplied", ); @@ -453,62 +459,62 @@ fn key_find( match found_keypair { Ok(keypair) => { let pkh: PublicKeyHash = (&keypair.ref_to()).into(); - display_line!(IO, "Public key hash: {}", pkh); - display_line!(IO, "Public key: {}", keypair.ref_to()); + display_line!(io, "Public key hash: {}", pkh); + display_line!(io, "Public key: {}", keypair.ref_to()); if unsafe_show_secret { - display_line!(IO, "Secret key: {}", keypair); + display_line!(io, "Secret key: {}", keypair); } } Err(err) => { - edisplay_line!(IO, "{}", err); + edisplay_line!(io, "{}", err); } } } /// List all known keys. -fn key_list( - ctx: Context, +fn key_list( + wallet: &mut Wallet, + io: &impl Io, args::KeyList { decrypt, unsafe_show_secret, }: args::KeyList, ) { - let wallet = ctx.wallet; let known_keys = wallet.get_keys(); if known_keys.is_empty() { display_line!( - IO, + io, "No known keys. Try `key gen --alias my-key` to generate a new \ key.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, &mut w; "Known keys:").unwrap(); + display_line!(io, &mut w; "Known keys:").unwrap(); for (alias, (stored_keypair, pkh)) in known_keys { let encrypted = if stored_keypair.is_encrypted() { "encrypted" } else { "not encrypted" }; - display_line!(IO, + display_line!(io, &mut w; " Alias \"{}\" ({}):", alias, encrypted, ) .unwrap(); if let Some(pkh) = pkh { - display_line!(IO, &mut w; " Public key hash: {}", pkh) + display_line!(io, &mut w; " Public key hash: {}", pkh) .unwrap(); } match stored_keypair.get::(decrypt, None) { Ok(keypair) => { - display_line!(IO, + display_line!(io, &mut w; " Public key: {}", keypair.ref_to(), ) .unwrap(); if unsafe_show_secret { - display_line!(IO, + display_line!(io, &mut w; " Secret key: {}", keypair, ) @@ -519,7 +525,7 @@ fn key_list( continue; } Err(err) => { - display_line!(IO, + display_line!(io, &mut w; " Couldn't decrypt the keypair: {}", err, ) @@ -531,11 +537,11 @@ fn key_list( } /// Export a keypair to a file. -fn key_export( - ctx: Context, +fn key_export( + wallet: &mut Wallet, + io: &impl Io, args::KeyExport { alias }: args::KeyExport, ) { - let mut wallet = ctx.wallet; wallet .find_key(alias.to_lowercase(), None) .map(|keypair| { @@ -546,30 +552,32 @@ fn key_export( let mut file = File::create(&file_name).unwrap(); file.write_all(file_data.as_ref()).unwrap(); - display_line!(IO, "Exported to file {}", file_name); + display_line!(io, "Exported to file {}", file_name); }) .unwrap_or_else(|err| { - edisplay_line!(IO, "{}", err); + edisplay_line!(io, "{}", err); cli::safe_exit(1) }) } /// List all known addresses. -fn address_list(ctx: Context) { - let wallet = ctx.wallet; +fn address_list( + wallet: &mut Wallet, + io: &impl Io, +) { let known_addresses = wallet.get_addresses(); if known_addresses.is_empty() { display_line!( - IO, + io, "No known addresses. Try `address gen --alias my-addr` to \ generate a new implicit address.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, &mut w; "Known addresses:").unwrap(); + display_line!(io, &mut w; "Known addresses:").unwrap(); for (alias, address) in sorted(known_addresses) { - display_line!(IO, + display_line!(io, &mut w; " \"{}\": {}", alias, address.to_pretty_string(), ) @@ -579,8 +587,11 @@ fn address_list(ctx: Context) { } /// Find address (alias) by its alias (address). -fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { - let wallet = ctx.wallet; +fn address_or_alias_find( + wallet: &mut Wallet, + io: &impl Io, + args: args::AddressOrAliasFind, +) { if args.address.is_some() && args.alias.is_some() { panic!( "This should not be happening: clap should emit its own error \ @@ -589,10 +600,10 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { } else if args.alias.is_some() { if let Some(address) = wallet.find_address(args.alias.as_ref().unwrap()) { - display_line!(IO, "Found address {}", address.to_pretty_string()); + display_line!(io, "Found address {}", address.to_pretty_string()); } else { display_line!( - IO, + io, "No address with alias {} found. Use the command `address \ list` to see all the known addresses.", args.alias.unwrap().to_lowercase() @@ -600,10 +611,10 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { } } else if args.address.is_some() { if let Some(alias) = wallet.find_alias(args.address.as_ref().unwrap()) { - display_line!(IO, "Found alias {}", alias); + display_line!(io, "Found alias {}", alias); } else { display_line!( - IO, + io, "No alias with address {} found. Use the command `address \ list` to see all the known addresses.", args.address.unwrap() @@ -613,8 +624,11 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { } /// Add an address to the wallet. -fn address_add(ctx: Context, args: args::AddressAdd) { - let mut wallet = ctx.wallet; +fn address_add( + wallet: &mut Wallet, + io: &impl Io, + args: args::AddressAdd, +) { if wallet .add_address( args.alias.clone().to_lowercase(), @@ -623,13 +637,14 @@ fn address_add(ctx: Context, args: args::AddressAdd) { ) .is_none() { - edisplay_line!(IO, "Address not added"); + edisplay_line!(io, "Address not added"); cli::safe_exit(1); } - crate::wallet::save(&wallet) - .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); display_line!( - IO, + io, "Successfully added a key and an address with alias: \"{}\"", args.alias.to_lowercase() ); diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 72d6514c35..24b7708f42 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -33,17 +33,16 @@ use namada::ledger::parameters::{storage as param_storage, EpochDuration}; use namada::ledger::pos::{CommissionPair, PosParams, Slash}; use namada::ledger::queries::RPC; use namada::ledger::storage::ConversionState; +use namada::ledger::Namada; use namada::proof_of_stake::types::{ValidatorState, WeightedValidator}; use namada::sdk::error; use namada::sdk::error::{is_pinned_error, Error, PinnedBalanceError}; -use namada::sdk::masp::{ - Conversions, MaspAmount, MaspChange, ShieldedContext, ShieldedUtils, -}; +use namada::sdk::masp::{Conversions, MaspAmount, MaspChange}; use namada::sdk::rpc::{ self, enriched_bonds_and_unbonds, format_denominated_amount, query_epoch, TxResponse, }; -use namada::sdk::wallet::{AddressVpType, Wallet}; +use namada::sdk::wallet::AddressVpType; use namada::types::address::{masp, Address}; use namada::types::control_flow::ProceedOrElse; use namada::types::hash::Hash; @@ -59,46 +58,37 @@ use tokio::time::Instant; use crate::cli::{self, args}; use crate::facade::tendermint::merkle::proof::Proof; use crate::facade::tendermint_rpc::error::Error as TError; -use crate::wallet::CliWalletUtils; /// Query the status of a given transaction. /// /// If a response is not delivered until `deadline`, we exit the cli with an /// error. -pub async fn query_tx_status< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_tx_status<'a>( + namada: &impl Namada<'a>, status: namada::sdk::rpc::TxEventQuery<'_>, deadline: Instant, ) -> Event { - rpc::query_tx_status::<_, IO>(client, status, deadline) + rpc::query_tx_status(namada, status, deadline) .await .proceed() } /// Query and print the epoch of the last committed block -pub async fn query_and_print_epoch< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, -) -> Epoch { - let epoch = rpc::query_epoch(client).await.unwrap(); - display_line!(IO, "Last committed epoch: {}", epoch); +pub async fn query_and_print_epoch<'a>(context: &impl Namada<'a>) -> Epoch { + let epoch = rpc::query_epoch(context.client()).await.unwrap(); + display_line!(context.io(), "Last committed epoch: {}", epoch); epoch } /// Query the last committed block -pub async fn query_block( - client: &C, -) { - let block = namada::sdk::rpc::query_block(client).await.unwrap(); +pub async fn query_block<'a>(context: &impl Namada<'a>) { + let block = namada::sdk::rpc::query_block(context.client()) + .await + .unwrap(); match block { Some(block) => { display_line!( - IO, + context.io(), "Last committed block ID: {}, height: {}, time: {}", block.hash, block.height, @@ -106,7 +96,7 @@ pub async fn query_block( ); } None => { - display_line!(IO, "No block has been committed yet."); + display_line!(context.io(), "No block has been committed yet."); } } } @@ -122,26 +112,22 @@ pub async fn query_results( } /// Query the specified accepted transfers from the ledger -pub async fn query_transfers< - C: namada::ledger::queries::Client + Sync, - U: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn query_transfers<'a>( + context: &impl Namada<'a>, args: args::QueryTransfers, ) { let query_token = args.token; + let wallet = context.wallet().await; let query_owner = args.owner.map_or_else( || Either::Right(wallet.get_addresses().into_values().collect()), Either::Left, ); + let mut shielded = context.shielded_mut().await; let _ = shielded.load().await; // Obtain the effects of all shielded and transparent transactions let transfers = shielded .query_tx_deltas( - client, + context.client(), &query_owner, &query_token, &wallet.get_viewing_keys(), @@ -174,7 +160,7 @@ pub async fn query_transfers< // transaction's reception let amt = shielded .compute_exchanged_amount( - client, + context, amt, epoch, Conversions::new(), @@ -182,7 +168,8 @@ pub async fn query_transfers< .await .unwrap() .0; - let dec = shielded.decode_amount(client, amt, epoch).await; + let dec = + shielded.decode_amount(context.client(), amt, epoch).await; shielded_accounts.insert(acc, dec); } // Check if this transfer pertains to the supplied token @@ -205,7 +192,7 @@ pub async fn query_transfers< continue; } display_line!( - IO, + context.io(), "Height: {}, Index: {}, Transparent Transfer:", height, idx @@ -213,7 +200,7 @@ pub async fn query_transfers< // Display the transparent changes first for (account, MaspChange { ref asset, change }) in tfer_delta { if account != masp() { - display!(IO, " {}:", account); + display!(context.io(), " {}:", account); let token_alias = wallet.lookup_alias(asset); let sign = match change.cmp(&Change::zero()) { Ordering::Greater => "+", @@ -221,26 +208,22 @@ pub async fn query_transfers< Ordering::Equal => "", }; display!( - IO, + context.io(), " {}{} {}", sign, - format_denominated_amount( - client, - asset, - change.into(), - ) - .await, + format_denominated_amount(context, asset, change.into(),) + .await, token_alias ); } - display_line!(IO, ""); + display_line!(context.io(), ""); } // Then display the shielded changes afterwards // TODO: turn this to a display impl // (account, amt) for (account, masp_change) in shielded_accounts { if fvk_map.contains_key(&account) { - display!(IO, " {}:", fvk_map[&account]); + display!(context.io(), " {}:", fvk_map[&account]); for (token_addr, val) in masp_change { let token_alias = wallet.lookup_alias(&token_addr); let sign = match val.cmp(&Change::zero()) { @@ -249,11 +232,11 @@ pub async fn query_transfers< Ordering::Equal => "", }; display!( - IO, + context.io(), " {}{} {}", sign, format_denominated_amount( - client, + context, &token_addr, val.into(), ) @@ -261,113 +244,102 @@ pub async fn query_transfers< token_alias, ); } - display_line!(IO, ""); + display_line!(context.io(), ""); } } } } /// Query the raw bytes of given storage key -pub async fn query_raw_bytes< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_raw_bytes<'a, N: Namada<'a>>( + context: &N, args: args::QueryRawBytes, ) { - let response = unwrap_client_response::( + let response = unwrap_client_response::( RPC.shell() - .storage_value(client, None, None, false, &args.storage_key) + .storage_value( + context.client(), + None, + None, + false, + &args.storage_key, + ) .await, ); if !response.data.is_empty() { - display_line!(IO, "Found data: 0x{}", HEXLOWER.encode(&response.data)); + display_line!( + context.io(), + "Found data: 0x{}", + HEXLOWER.encode(&response.data) + ); } else { - display_line!(IO, "No data found for key {}", args.storage_key); + display_line!( + context.io(), + "No data found for key {}", + args.storage_key + ); } } /// Query token balance(s) -pub async fn query_balance< - C: namada::ledger::queries::Client + Sync, - U: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn query_balance<'a>( + context: &impl Namada<'a>, args: args::QueryBalance, ) { // Query the balances of shielded or transparent account types depending on // the CLI arguments match &args.owner { Some(BalanceOwner::FullViewingKey(_viewing_key)) => { - query_shielded_balance::<_, _, IO>(client, wallet, shielded, args) - .await + query_shielded_balance(context, args).await } Some(BalanceOwner::Address(_owner)) => { - query_transparent_balance::<_, IO>(client, wallet, args).await + query_transparent_balance(context, args).await } Some(BalanceOwner::PaymentAddress(_owner)) => { - query_pinned_balance::<_, _, IO>(client, wallet, shielded, args) - .await + query_pinned_balance(context, args).await } None => { // Print pinned balance - query_pinned_balance::<_, _, IO>( - client, - wallet, - shielded, - args.clone(), - ) - .await; + query_pinned_balance(context, args.clone()).await; // Print shielded balance - query_shielded_balance::<_, _, IO>( - client, - wallet, - shielded, - args.clone(), - ) - .await; + query_shielded_balance(context, args.clone()).await; // Then print transparent balance - query_transparent_balance::<_, IO>(client, wallet, args).await; + query_transparent_balance(context, args).await; } }; } /// Query token balance(s) -pub async fn query_transparent_balance< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn query_transparent_balance<'a>( + context: &impl Namada<'a>, args: args::QueryBalance, ) { let prefix = Key::from( Address::Internal(namada::types::address::InternalAddress::Multitoken) .to_db_key(), ); - let tokens = wallet.tokens_with_aliases(); + let tokens = context.wallet().await.tokens_with_aliases(); match (args.token, args.owner) { (Some(token), Some(owner)) => { let balance_key = token::balance_key(&token, &owner.address().unwrap()); - let token_alias = wallet.lookup_alias(&token); - match query_storage_value::(client, &balance_key) - .await + let token_alias = context.wallet().await.lookup_alias(&token); + match query_storage_value::<_, token::Amount>( + context.client(), + &balance_key, + ) + .await { Ok(balance) => { - let balance = format_denominated_amount( - client, &token, balance, - ) - .await; - display_line!(IO, "{}: {}", token_alias, balance); + let balance = + format_denominated_amount(context, &token, balance) + .await; + display_line!(context.io(), "{}: {}", token_alias, balance); } Err(e) => { - display_line!(IO, "Eror in querying: {e}"); + display_line!(context.io(), "Eror in querying: {e}"); display_line!( - IO, + context.io(), "No {} balance found for {}", token_alias, owner @@ -378,56 +350,40 @@ pub async fn query_transparent_balance< (None, Some(owner)) => { let owner = owner.address().unwrap(); for (token_alias, token) in tokens { - let balance = get_token_balance(client, &token, &owner).await; + let balance = + get_token_balance(context.client(), &token, &owner).await; if !balance.is_zero() { - let balance = format_denominated_amount( - client, &token, balance, - ) - .await; - display_line!(IO, "{}: {}", token_alias, balance); + let balance = + format_denominated_amount(context, &token, balance) + .await; + display_line!(context.io(), "{}: {}", token_alias, balance); } } } (Some(token), None) => { let prefix = token::balance_prefix(&token); let balances = - query_storage_prefix::(client, &prefix) - .await; + query_storage_prefix::(context, &prefix).await; if let Some(balances) = balances { - print_balances::<_, IO>( - client, - wallet, - balances, - Some(&token), - None, - ) - .await; + print_balances(context, balances, Some(&token), None).await; } } (None, None) => { - let balances = - query_storage_prefix::(client, &prefix) - .await; + let balances = query_storage_prefix(context, &prefix).await; if let Some(balances) = balances { - print_balances::<_, IO>(client, wallet, balances, None, None) - .await; + print_balances(context, balances, None, None).await; } } } } /// Query the token pinned balance(s) -pub async fn query_pinned_balance< - C: namada::ledger::queries::Client + Sync, - U: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn query_pinned_balance<'a>( + context: &impl Namada<'a>, args: args::QueryBalance, ) { // Map addresses to token names + let wallet = context.wallet().await; let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); let owners = if let Some(pa) = args.owner.and_then(|x| x.payment_address()) { @@ -445,7 +401,7 @@ pub async fn query_pinned_balance< .values() .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) .collect(); - let _ = shielded.load().await; + let _ = context.shielded_mut().await.load().await; // Print the token balances by payment address let pinned_error = Err(Error::from(PinnedBalanceError::InvalidViewingKey)); for owner in owners { @@ -453,8 +409,10 @@ pub async fn query_pinned_balance< // Find the viewing key that can recognize payments the current payment // address for vk in &viewing_keys { - balance = shielded - .compute_exchanged_pinned_balance::<_, IO>(client, owner, vk) + balance = context + .shielded_mut() + .await + .compute_exchanged_pinned_balance(context, owner, vk) .await; if !is_pinned_error(&balance) { break; @@ -463,18 +421,21 @@ pub async fn query_pinned_balance< // If a suitable viewing key was not found, then demand it from the user if is_pinned_error(&balance) { let vk_str = - prompt!(IO, "Enter the viewing key for {}: ", owner).await; + prompt!(context.io(), "Enter the viewing key for {}: ", owner) + .await; let fvk = match ExtendedViewingKey::from_str(vk_str.trim()) { Ok(fvk) => fvk, _ => { - edisplay_line!(IO, "Invalid viewing key entered"); + edisplay_line!(context.io(), "Invalid viewing key entered"); continue; } }; let vk = ExtendedFullViewingKey::from(fvk).fvk.vk; // Use the given viewing key to decrypt pinned transaction data - balance = shielded - .compute_exchanged_pinned_balance::<_, IO>(client, owner, &vk) + balance = context + .shielded_mut() + .await + .compute_exchanged_pinned_balance(context, owner, &vk) .await } @@ -482,7 +443,7 @@ pub async fn query_pinned_balance< match (balance, args.token.as_ref()) { (Err(Error::Pinned(PinnedBalanceError::InvalidViewingKey)), _) => { display_line!( - IO, + context.io(), "Supplied viewing key cannot decode transactions to given \ payment address." ) @@ -492,13 +453,17 @@ pub async fn query_pinned_balance< _, ) => { display_line!( - IO, + context.io(), "Payment address {} has not yet been consumed.", owner ) } (Err(other), _) => { - display_line!(IO, "Error in Querying Pinned balance {}", other) + display_line!( + context.io(), + "Error in Querying Pinned balance {}", + other + ) } (Ok((balance, epoch)), Some(token)) => { let token_alias = wallet.lookup_alias(token); @@ -510,7 +475,7 @@ pub async fn query_pinned_balance< if total_balance.is_zero() { display_line!( - IO, + context.io(), "Payment address {} was consumed during epoch {}. \ Received no shielded {}", owner, @@ -519,13 +484,13 @@ pub async fn query_pinned_balance< ); } else { let formatted = format_denominated_amount( - client, + context, token, total_balance.into(), ) .await; display_line!( - IO, + context.io(), "Payment address {} was consumed during epoch {}. \ Received {} {}", owner, @@ -544,7 +509,7 @@ pub async fn query_pinned_balance< { if !found_any { display_line!( - IO, + context.io(), "Payment address {} was consumed during epoch {}. \ Received:", owner, @@ -553,7 +518,7 @@ pub async fn query_pinned_balance< found_any = true; } let formatted = format_denominated_amount( - client, + context, token_addr, (*value).into(), ) @@ -562,11 +527,16 @@ pub async fn query_pinned_balance< .get(token_addr) .map(|a| a.to_string()) .unwrap_or_else(|| token_addr.to_string()); - display_line!(IO, " {}: {}", token_alias, formatted,); + display_line!( + context.io(), + " {}: {}", + token_alias, + formatted, + ); } if !found_any { display_line!( - IO, + context.io(), "Payment address {} was consumed during epoch {}. \ Received no shielded assets.", owner, @@ -578,15 +548,15 @@ pub async fn query_pinned_balance< } } -async fn print_balances( - client: &C, - wallet: &Wallet, +async fn print_balances<'a>( + context: &impl Namada<'a>, balances: impl Iterator, token: Option<&Address>, target: Option<&Address>, ) { let stdout = io::stdout(); let mut w = stdout.lock(); + let wallet = context.wallet().await; let mut print_num = 0; let mut print_token = None; @@ -599,8 +569,7 @@ async fn print_balances( owner.clone(), format!( ": {}, owned by {}", - format_denominated_amount(client, tok, balance) - .await, + format_denominated_amount(context, tok, balance).await, wallet.lookup_alias(owner) ), ), @@ -629,19 +598,20 @@ async fn print_balances( } _ => { let token_alias = wallet.lookup_alias(&t); - display_line!(IO, &mut w; "Token {}", token_alias).unwrap(); + display_line!(context.io(), &mut w; "Token {}", token_alias) + .unwrap(); print_token = Some(t); } } // Print the balance - display_line!(IO, &mut w; "{}", s).unwrap(); + display_line!(context.io(), &mut w; "{}", s).unwrap(); print_num += 1; } if print_num == 0 { match (token, target) { (Some(_), Some(target)) | (None, Some(target)) => display_line!( - IO, + context.io(), &mut w; "No balances owned by {}", wallet.lookup_alias(target) @@ -649,38 +619,38 @@ async fn print_balances( .unwrap(), (Some(token), None) => { let token_alias = wallet.lookup_alias(token); - display_line!(IO, &mut w; "No balances for token {}", token_alias).unwrap() + display_line!(context.io(), &mut w; "No balances for token {}", token_alias).unwrap() + } + (None, None) => { + display_line!(context.io(), &mut w; "No balances").unwrap() } - (None, None) => display_line!(IO, &mut w; "No balances").unwrap(), } } } /// Query Proposals -pub async fn query_proposal< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_proposal<'a>( + context: &impl Namada<'a>, args: args::QueryProposal, ) { - let current_epoch = query_and_print_epoch::<_, IO>(client).await; + let current_epoch = query_and_print_epoch(context).await; if let Some(id) = args.proposal_id { - let proposal = query_proposal_by_id(client, id).await.unwrap(); + let proposal = + query_proposal_by_id(context.client(), id).await.unwrap(); if let Some(proposal) = proposal { display_line!( - IO, + context.io(), "{}", proposal.to_string_with_status(current_epoch) ); } else { - edisplay_line!(IO, "No proposal found with id: {}", id); + edisplay_line!(context.io(), "No proposal found with id: {}", id); } } else { let last_proposal_id_key = governance_storage::get_counter_key(); - let last_proposal_id = - query_storage_value::(client, &last_proposal_id_key) + let last_proposal_id: u64 = + query_storage_value(context.client(), &last_proposal_id_key) .await .unwrap(); @@ -690,14 +660,14 @@ pub async fn query_proposal< 0 }; - display_line!(IO, "id: {}", last_proposal_id); + display_line!(context.io(), "id: {}", last_proposal_id); for id in from_id..last_proposal_id { - let proposal = query_proposal_by_id(client, id) + let proposal = query_proposal_by_id(context.client(), id) .await .unwrap() .expect("Proposal should be written to storage."); - display_line!(IO, "{}", proposal); + display_line!(context.io(), "{}", proposal); } } } @@ -711,14 +681,8 @@ pub async fn query_proposal_by_id( } /// Query token shielded balance(s) -pub async fn query_shielded_balance< - C: namada::ledger::queries::Client + Sync, - U: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn query_shielded_balance<'a>( + context: &impl Namada<'a>, args: args::QueryBalance, ) { // Used to control whether balances for all keys or a specific key are @@ -730,20 +694,32 @@ pub async fn query_shielded_balance< // provided, then convert to a viewing key first. let viewing_keys = match owner { Some(viewing_key) => vec![viewing_key], - None => wallet.get_viewing_keys().values().copied().collect(), + None => context + .wallet() + .await + .get_viewing_keys() + .values() + .copied() + .collect(), }; - let _ = shielded.load().await; - let fvks: Vec<_> = viewing_keys - .iter() - .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) - .collect(); - shielded.fetch(client, &[], &fvks).await.unwrap(); - // Save the update state so that future fetches can be short-circuited - let _ = shielded.save().await; + { + let mut shielded = context.shielded_mut().await; + let _ = shielded.load().await; + let fvks: Vec<_> = viewing_keys + .iter() + .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) + .collect(); + shielded.fetch(context.client(), &[], &fvks).await.unwrap(); + // Save the update state so that future fetches can be short-circuited + let _ = shielded.save().await; + } // The epoch is required to identify timestamped tokens - let epoch = query_and_print_epoch::<_, IO>(client).await; + let epoch = query_and_print_epoch(context).await; // Map addresses to token names - let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); + let tokens = context + .wallet() + .await + .get_addresses_with_vp_type(AddressVpType::Token); match (args.token, owner.is_some()) { // Here the user wants to know the balance for a specific token (Some(token), true) => { @@ -751,24 +727,24 @@ pub async fn query_shielded_balance< let viewing_key = ExtendedFullViewingKey::from(viewing_keys[0]).fvk.vk; let balance: MaspAmount = if no_conversions { - shielded - .compute_shielded_balance(client, &viewing_key) + context + .shielded_mut() + .await + .compute_shielded_balance(context.client(), &viewing_key) .await .unwrap() .expect("context should contain viewing key") } else { - shielded - .compute_exchanged_balance::<_, IO>( - client, - &viewing_key, - epoch, - ) + context + .shielded_mut() + .await + .compute_exchanged_balance(context, &viewing_key, epoch) .await .unwrap() .expect("context should contain viewing key") }; - let token_alias = wallet.lookup_alias(&token); + let token_alias = context.wallet().await.lookup_alias(&token); let total_balance = balance .get(&(epoch, token.clone())) @@ -776,17 +752,17 @@ pub async fn query_shielded_balance< .unwrap_or_default(); if total_balance.is_zero() { display_line!( - IO, + context.io(), "No shielded {} balance found for given key", token_alias ); } else { display_line!( - IO, + context.io(), "{}: {}", token_alias, format_denominated_amount( - client, + context, &token, token::Amount::from(total_balance) ) @@ -802,18 +778,21 @@ pub async fn query_shielded_balance< // Query the multi-asset balance at the given spending key let viewing_key = ExtendedFullViewingKey::from(fvk).fvk.vk; let balance = if no_conversions { - shielded - .compute_shielded_balance(client, &viewing_key) + context + .shielded_mut() + .await + .compute_shielded_balance( + context.client(), + &viewing_key, + ) .await .unwrap() .expect("context should contain viewing key") } else { - shielded - .compute_exchanged_balance::<_, IO>( - client, - &viewing_key, - epoch, - ) + context + .shielded_mut() + .await + .compute_exchanged_balance(context, &viewing_key, epoch) .await .unwrap() .expect("context should contain viewing key") @@ -836,7 +815,7 @@ pub async fn query_shielded_balance< // hashtable creation any uglier if balances.is_empty() { display_line!( - IO, + context.io(), "No shielded {} balance found for any wallet key", &token_addr ); @@ -852,14 +831,19 @@ pub async fn query_shielded_balance< .get(&token) .map(|a| a.to_string()) .unwrap_or_else(|| token.to_string()); - display_line!(IO, "Shielded Token {}:", alias); + display_line!(context.io(), "Shielded Token {}:", alias); let formatted = format_denominated_amount( - client, + context, &token, token_balance.into(), ) .await; - display_line!(IO, " {}, owned by {}", formatted, fvk); + display_line!( + context.io(), + " {}, owned by {}", + formatted, + fvk + ); } } // Here the user wants to know the balance for a specific token across @@ -874,27 +858,30 @@ pub async fn query_shielded_balance< .as_ref(), ) .unwrap(); - let token_alias = wallet.lookup_alias(&token); - display_line!(IO, "Shielded Token {}:", token_alias); + let token_alias = context.wallet().await.lookup_alias(&token); + display_line!(context.io(), "Shielded Token {}:", token_alias); let mut found_any = false; - let token_alias = wallet.lookup_alias(&token); - display_line!(IO, "Shielded Token {}:", token_alias,); + let token_alias = context.wallet().await.lookup_alias(&token); + display_line!(context.io(), "Shielded Token {}:", token_alias,); for fvk in viewing_keys { // Query the multi-asset balance at the given spending key let viewing_key = ExtendedFullViewingKey::from(fvk).fvk.vk; let balance = if no_conversions { - shielded - .compute_shielded_balance(client, &viewing_key) + context + .shielded_mut() + .await + .compute_shielded_balance( + context.client(), + &viewing_key, + ) .await .unwrap() .expect("context should contain viewing key") } else { - shielded - .compute_exchanged_balance::<_, IO>( - client, - &viewing_key, - epoch, - ) + context + .shielded_mut() + .await + .compute_exchanged_balance(context, &viewing_key, epoch) .await .unwrap() .expect("context should contain viewing key") @@ -905,17 +892,22 @@ pub async fn query_shielded_balance< found_any = true; } let formatted = format_denominated_amount( - client, + context, address, (*val).into(), ) .await; - display_line!(IO, " {}, owned by {}", formatted, fvk); + display_line!( + context.io(), + " {}, owned by {}", + formatted, + fvk + ); } } if !found_any { display_line!( - IO, + context.io(), "No shielded {} balance found for any wallet key", token_alias, ); @@ -927,56 +919,48 @@ pub async fn query_shielded_balance< let viewing_key = ExtendedFullViewingKey::from(viewing_keys[0]).fvk.vk; if no_conversions { - let balance = shielded - .compute_shielded_balance(client, &viewing_key) + let balance = context + .shielded_mut() + .await + .compute_shielded_balance(context.client(), &viewing_key) .await .unwrap() .expect("context should contain viewing key"); // Print balances by human-readable token names - print_decoded_balance_with_epoch::<_, IO>( - client, wallet, balance, - ) - .await; + print_decoded_balance_with_epoch(context, balance).await; } else { - let balance = shielded - .compute_exchanged_balance::<_, IO>( - client, - &viewing_key, - epoch, - ) + let balance = context + .shielded_mut() + .await + .compute_exchanged_balance(context, &viewing_key, epoch) .await .unwrap() .expect("context should contain viewing key"); // Print balances by human-readable token names - print_decoded_balance::<_, IO>(client, wallet, balance, epoch) - .await; + print_decoded_balance(context, balance, epoch).await; } } } } -pub async fn print_decoded_balance< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn print_decoded_balance<'a>( + context: &impl Namada<'a>, decoded_balance: MaspAmount, epoch: Epoch, ) { if decoded_balance.is_empty() { - display_line!(IO, "No shielded balance found for given key"); + display_line!(context.io(), "No shielded balance found for given key"); } else { for ((_, token_addr), amount) in decoded_balance .iter() .filter(|((token_epoch, _), _)| *token_epoch == epoch) { display_line!( - IO, + context.io(), "{} : {}", - wallet.lookup_alias(token_addr), + context.wallet().await.lookup_alias(token_addr), format_denominated_amount( - client, + context, token_addr, (*amount).into() ) @@ -986,17 +970,16 @@ pub async fn print_decoded_balance< } } -pub async fn print_decoded_balance_with_epoch< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn print_decoded_balance_with_epoch<'a>( + context: &impl Namada<'a>, decoded_balance: MaspAmount, ) { - let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); + let tokens = context + .wallet() + .await + .get_addresses_with_vp_type(AddressVpType::Token); if decoded_balance.is_empty() { - display_line!(IO, "No shielded balance found for given key"); + display_line!(context.io(), "No shielded balance found for given key"); } for ((epoch, token_addr), value) in decoded_balance.iter() { let asset_value = (*value).into(); @@ -1005,12 +988,11 @@ pub async fn print_decoded_balance_with_epoch< .map(|a| a.to_string()) .unwrap_or_else(|| token_addr.to_string()); display_line!( - IO, + context.io(), "{} | {} : {}", alias, epoch, - format_denominated_amount(client, token_addr, asset_value) - .await, + format_denominated_amount(context, token_addr, asset_value).await, ); } } @@ -1026,35 +1008,37 @@ pub async fn get_token_balance( .unwrap() } -pub async fn query_proposal_result< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_proposal_result<'a>( + context: &impl Namada<'a>, args: args::QueryProposalResult, ) { if args.proposal_id.is_some() { let proposal_id = args.proposal_id.expect("Proposal id should be defined."); let proposal = if let Some(proposal) = - query_proposal_by_id(client, proposal_id).await.unwrap() + query_proposal_by_id(context.client(), proposal_id) + .await + .unwrap() { proposal } else { - edisplay_line!(IO, "Proposal {} not found.", proposal_id); + edisplay_line!(context.io(), "Proposal {} not found.", proposal_id); return; }; - let is_author_steward = query_pgf_stewards(client) + let is_author_steward = query_pgf_stewards(context.client()) .await .iter() .any(|steward| steward.address.eq(&proposal.author)); let tally_type = proposal.get_tally_type(is_author_steward); - let total_voting_power = - get_total_staked_tokens(client, proposal.voting_end_epoch).await; + let total_voting_power = get_total_staked_tokens( + context.client(), + proposal.voting_end_epoch, + ) + .await; let votes = compute_proposal_votes( - client, + context.client(), proposal_id, proposal.voting_end_epoch, ) @@ -1063,8 +1047,8 @@ pub async fn query_proposal_result< let proposal_result = compute_proposal_result(votes, total_voting_power, tally_type); - display_line!(IO, "Proposal Id: {} ", proposal_id); - display_line!(IO, "{:4}{}", "", proposal_result); + display_line!(context.io(), "Proposal Id: {} ", proposal_id); + display_line!(context.io(), "{:4}{}", "", proposal_result); } else { let proposal_folder = args.proposal_folder.expect( "The argument --proposal-folder is required with --offline.", @@ -1085,11 +1069,13 @@ pub async fn query_proposal_result< serde_json::from_reader(proposal_file) .expect("file should be proper JSON"); - let author_account = - rpc::get_account_info(client, &proposal.proposal.author) - .await - .unwrap() - .expect("Account should exist."); + let author_account = rpc::get_account_info( + context.client(), + &proposal.proposal.author, + ) + .await + .unwrap() + .expect("Account should exist."); let proposal = proposal.validate( &author_account.public_keys_map, @@ -1100,12 +1086,15 @@ pub async fn query_proposal_result< if proposal.is_ok() { proposal.unwrap() } else { - edisplay_line!(IO, "The offline proposal is not valid."); + edisplay_line!( + context.io(), + "The offline proposal is not valid." + ); return; } } else { edisplay_line!( - IO, + context.io(), "Couldn't find a file name offline_proposal_*.json." ); return; @@ -1121,15 +1110,14 @@ pub async fn query_proposal_result< }) .collect::>(); - let proposal_votes = compute_offline_proposal_votes::<_, IO>( - client, - &proposal, - votes.clone(), + let proposal_votes = + compute_offline_proposal_votes(context, &proposal, votes.clone()) + .await; + let total_voting_power = get_total_staked_tokens( + context.client(), + proposal.proposal.tally_epoch, ) .await; - let total_voting_power = - get_total_staked_tokens(client, proposal.proposal.tally_epoch) - .await; let proposal_result = compute_proposal_result( proposal_votes, @@ -1137,51 +1125,54 @@ pub async fn query_proposal_result< TallyType::TwoThird, ); - display_line!(IO, "Proposal offline: {}", proposal.proposal.hash()); - display_line!(IO, "Parsed {} votes.", votes.len()); - display_line!(IO, "{:4}{}", "", proposal_result); + display_line!( + context.io(), + "Proposal offline: {}", + proposal.proposal.hash() + ); + display_line!(context.io(), "Parsed {} votes.", votes.len()); + display_line!(context.io(), "{:4}{}", "", proposal_result); } } -pub async fn query_account< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_account<'a>( + context: &impl Namada<'a>, args: args::QueryAccount, ) { - let account = rpc::get_account_info(client, &args.owner).await.unwrap(); + let account = rpc::get_account_info(context.client(), &args.owner) + .await + .unwrap(); if let Some(account) = account { - display_line!(IO, "Address: {}", account.address); - display_line!(IO, "Threshold: {}", account.threshold); - display_line!(IO, "Public keys:"); + display_line!(context.io(), "Address: {}", account.address); + display_line!(context.io(), "Threshold: {}", account.threshold); + display_line!(context.io(), "Public keys:"); for (public_key, _) in account.public_keys_map.pk_to_idx { - display_line!(IO, "- {}", public_key); + display_line!(context.io(), "- {}", public_key); } } else { - display_line!(IO, "No account exists for {}", args.owner); + display_line!(context.io(), "No account exists for {}", args.owner); } } -pub async fn query_pgf( - client: &C, - _args: args::QueryPgf, -) { - let stewards = query_pgf_stewards(client).await; - let fundings = query_pgf_fundings(client).await; +pub async fn query_pgf<'a>(context: &impl Namada<'a>, _args: args::QueryPgf) { + let stewards = query_pgf_stewards(context.client()).await; + let fundings = query_pgf_fundings(context.client()).await; match stewards.is_empty() { true => { - display_line!(IO, "Pgf stewards: no stewards are currectly set.") + display_line!( + context.io(), + "Pgf stewards: no stewards are currectly set." + ) } false => { - display_line!(IO, "Pgf stewards:"); + display_line!(context.io(), "Pgf stewards:"); for steward in stewards { - display_line!(IO, "{:4}- {}", "", steward.address); - display_line!(IO, "{:4} Reward distribution:", ""); + display_line!(context.io(), "{:4}- {}", "", steward.address); + display_line!(context.io(), "{:4} Reward distribution:", ""); for (address, percentage) in steward.reward_distribution { display_line!( - IO, + context.io(), "{:6}- {} to {}", "", percentage, @@ -1194,13 +1185,16 @@ pub async fn query_pgf( match fundings.is_empty() { true => { - display_line!(IO, "Pgf fundings: no fundings are currently set.") + display_line!( + context.io(), + "Pgf fundings: no fundings are currently set." + ) } false => { - display_line!(IO, "Pgf fundings:"); + display_line!(context.io(), "Pgf fundings:"); for funding in fundings { display_line!( - IO, + context.io(), "{:4}- {} for {}", "", funding.detail.target, @@ -1211,180 +1205,198 @@ pub async fn query_pgf( } } -pub async fn query_protocol_parameters< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_protocol_parameters<'a>( + context: &impl Namada<'a>, _args: args::QueryProtocolParameters, ) { - let governance_parameters = query_governance_parameters(client).await; - display_line!(IO, "Governance Parameters\n"); + let governance_parameters = + query_governance_parameters(context.client()).await; + display_line!(context.io(), "Governance Parameters\n"); display_line!( - IO, + context.io(), "{:4}Min. proposal fund: {}", "", governance_parameters.min_proposal_fund.to_string_native() ); display_line!( - IO, + context.io(), "{:4}Max. proposal code size: {}", "", governance_parameters.max_proposal_code_size ); display_line!( - IO, + context.io(), "{:4}Min. proposal voting period: {}", "", governance_parameters.min_proposal_voting_period ); display_line!( - IO, + context.io(), "{:4}Max. proposal period: {}", "", governance_parameters.max_proposal_period ); display_line!( - IO, + context.io(), "{:4}Max. proposal content size: {}", "", governance_parameters.max_proposal_content_size ); display_line!( - IO, + context.io(), "{:4}Min. proposal grace epochs: {}", "", governance_parameters.min_proposal_grace_epochs ); - let pgf_parameters = query_pgf_parameters(client).await; - display_line!(IO, "Public Goods Funding Parameters\n"); + let pgf_parameters = query_pgf_parameters(context.client()).await; + display_line!(context.io(), "Public Goods Funding Parameters\n"); display_line!( - IO, + context.io(), "{:4}Pgf inflation rate: {}", "", pgf_parameters.pgf_inflation_rate ); display_line!( - IO, + context.io(), "{:4}Steward inflation rate: {}", "", pgf_parameters.stewards_inflation_rate ); - display_line!(IO, "Protocol parameters"); + display_line!(context.io(), "Protocol parameters"); let key = param_storage::get_epoch_duration_storage_key(); - let epoch_duration = query_storage_value::(client, &key) - .await - .expect("Parameter should be definied."); + let epoch_duration: EpochDuration = + query_storage_value(context.client(), &key) + .await + .expect("Parameter should be definied."); display_line!( - IO, + context.io(), "{:4}Min. epoch duration: {}", "", epoch_duration.min_duration ); display_line!( - IO, + context.io(), "{:4}Min. number of blocks: {}", "", epoch_duration.min_num_of_blocks ); let key = param_storage::get_max_expected_time_per_block_key(); - let max_block_duration = query_storage_value::(client, &key) + let max_block_duration: u64 = query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); - display_line!(IO, "{:4}Max. block duration: {}", "", max_block_duration); + display_line!( + context.io(), + "{:4}Max. block duration: {}", + "", + max_block_duration + ); let key = param_storage::get_tx_whitelist_storage_key(); - let vp_whitelist = query_storage_value::>(client, &key) + let vp_whitelist: Vec = query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); - display_line!(IO, "{:4}VP whitelist: {:?}", "", vp_whitelist); + display_line!(context.io(), "{:4}VP whitelist: {:?}", "", vp_whitelist); let key = param_storage::get_tx_whitelist_storage_key(); - let tx_whitelist = query_storage_value::>(client, &key) + let tx_whitelist: Vec = query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); - display_line!(IO, "{:4}Transactions whitelist: {:?}", "", tx_whitelist); + display_line!( + context.io(), + "{:4}Transactions whitelist: {:?}", + "", + tx_whitelist + ); let key = param_storage::get_max_block_gas_key(); - let max_block_gas = query_storage_value::(client, &key) + let max_block_gas: u64 = query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); - display_line!(IO, "{:4}Max block gas: {:?}", "", max_block_gas); + display_line!(context.io(), "{:4}Max block gas: {:?}", "", max_block_gas); let key = param_storage::get_fee_unshielding_gas_limit_key(); - let fee_unshielding_gas_limit = query_storage_value::(client, &key) - .await - .expect("Parameter should be defined."); + let fee_unshielding_gas_limit: u64 = + query_storage_value(context.client(), &key) + .await + .expect("Parameter should be defined."); display_line!( - IO, + context.io(), "{:4}Fee unshielding gas limit: {:?}", "", fee_unshielding_gas_limit ); let key = param_storage::get_fee_unshielding_descriptions_limit_key(); - let fee_unshielding_descriptions_limit = - query_storage_value::(client, &key) + let fee_unshielding_descriptions_limit: u64 = + query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); display_line!( - IO, + context.io(), "{:4}Fee unshielding descriptions limit: {:?}", "", fee_unshielding_descriptions_limit ); let key = param_storage::get_gas_cost_key(); - let gas_cost_table = query_storage_value::< - C, - BTreeMap, - >(client, &key) - .await - .expect("Parameter should be defined."); - display_line!(IO, "{:4}Gas cost table:", ""); + let gas_cost_table: BTreeMap = + query_storage_value(context.client(), &key) + .await + .expect("Parameter should be defined."); + display_line!(context.io(), "{:4}Gas cost table:", ""); for (token, gas_cost) in gas_cost_table { - display_line!(IO, "{:8}{}: {:?}", "", token, gas_cost); + display_line!(context.io(), "{:8}{}: {:?}", "", token, gas_cost); } - display_line!(IO, "PoS parameters"); - let pos_params = query_pos_parameters(client).await; + display_line!(context.io(), "PoS parameters"); + let pos_params = query_pos_parameters(context.client()).await; display_line!( - IO, + context.io(), "{:4}Block proposer reward: {}", "", pos_params.block_proposer_reward ); display_line!( - IO, + context.io(), "{:4}Block vote reward: {}", "", pos_params.block_vote_reward ); display_line!( - IO, + context.io(), "{:4}Duplicate vote minimum slash rate: {}", "", pos_params.duplicate_vote_min_slash_rate ); display_line!( - IO, + context.io(), "{:4}Light client attack minimum slash rate: {}", "", pos_params.light_client_attack_min_slash_rate ); display_line!( - IO, + context.io(), "{:4}Max. validator slots: {}", "", pos_params.max_validator_slots ); - display_line!(IO, "{:4}Pipeline length: {}", "", pos_params.pipeline_len); - display_line!(IO, "{:4}Unbonding length: {}", "", pos_params.unbonding_len); display_line!( - IO, + context.io(), + "{:4}Pipeline length: {}", + "", + pos_params.pipeline_len + ); + display_line!( + context.io(), + "{:4}Unbonding length: {}", + "", + pos_params.unbonding_len + ); + display_line!( + context.io(), "{:4}Votes per token: {}", "", pos_params.tm_votes_per_token @@ -1443,16 +1455,14 @@ pub async fn query_pgf_parameters( unwrap_client_response::(RPC.vp().pgf().parameters(client).await) } -pub async fn query_and_print_unbonds< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_and_print_unbonds<'a>( + context: &impl Namada<'a>, source: &Address, validator: &Address, ) { - let unbonds = query_unbond_with_slashing(client, source, validator).await; - let current_epoch = query_epoch(client).await.unwrap(); + let unbonds = + query_unbond_with_slashing(context.client(), source, validator).await; + let current_epoch = query_epoch(context.client()).await.unwrap(); let mut total_withdrawable = token::Amount::default(); let mut not_yet_withdrawable = HashMap::::new(); @@ -1467,17 +1477,17 @@ pub async fn query_and_print_unbonds< } if total_withdrawable != token::Amount::default() { display_line!( - IO, + context.io(), "Total withdrawable now: {}.", total_withdrawable.to_string_native() ); } if !not_yet_withdrawable.is_empty() { - display_line!(IO, "Current epoch: {current_epoch}."); + display_line!(context.io(), "Current epoch: {current_epoch}."); } for (withdraw_epoch, amount) in not_yet_withdrawable { display_line!( - IO, + context.io(), "Amount {} withdrawable starting from epoch {withdraw_epoch}.", amount.to_string_native(), ); @@ -1501,12 +1511,11 @@ pub async fn query_withdrawable_tokens< } /// Query PoS bond(s) and unbond(s) -pub async fn query_bonds( - client: &C, - _wallet: &mut Wallet, +pub async fn query_bonds<'a>( + context: &impl Namada<'a>, args: args::QueryBonds, ) -> std::io::Result<()> { - let epoch = query_and_print_epoch::<_, IO>(client).await; + let epoch = query_and_print_epoch(context).await; let source = args.owner; let validator = args.validator; @@ -1514,10 +1523,14 @@ pub async fn query_bonds( let stdout = io::stdout(); let mut w = stdout.lock(); - let bonds_and_unbonds = - enriched_bonds_and_unbonds(client, epoch, &source, &validator) - .await - .unwrap(); + let bonds_and_unbonds = enriched_bonds_and_unbonds( + context.client(), + epoch, + &source, + &validator, + ) + .await + .unwrap(); for (bond_id, details) in &bonds_and_unbonds.data { let bond_type = if bond_id.source == bond_id.validator { @@ -1528,10 +1541,10 @@ pub async fn query_bonds( bond_id.source, bond_id.validator ) }; - display_line!(IO, &mut w; "{}:", bond_type)?; + display_line!(context.io(), &mut w; "{}:", bond_type)?; for bond in &details.data.bonds { display_line!( - IO, + context.io(), &mut w; " Remaining active bond from epoch {}: Δ {}", bond.start, @@ -1540,14 +1553,14 @@ pub async fn query_bonds( } if details.bonds_total != token::Amount::zero() { display_line!( - IO, + context.io(), &mut w; "Active (slashed) bonds total: {}", details.bonds_total_active().to_string_native() )?; } - display_line!(IO, &mut w; "Bonds total: {}", details.bonds_total.to_string_native())?; - display_line!(IO, &mut w; "")?; + display_line!(context.io(), &mut w; "Bonds total: {}", details.bonds_total.to_string_native())?; + display_line!(context.io(), &mut w; "")?; if !details.data.unbonds.is_empty() { let bond_type = if bond_id.source == bond_id.validator { @@ -1555,10 +1568,10 @@ pub async fn query_bonds( } else { format!("Unbonded delegations from {}", bond_id.source) }; - display_line!(IO, &mut w; "{}:", bond_type)?; + display_line!(context.io(), &mut w; "{}:", bond_type)?; for unbond in &details.data.unbonds { display_line!( - IO, + context.io(), &mut w; " Withdrawable from epoch {} (active from {}): Δ {}", unbond.withdraw, @@ -1567,30 +1580,30 @@ pub async fn query_bonds( )?; } display_line!( - IO, + context.io(), &mut w; "Unbonded total: {}", details.unbonds_total.to_string_native() )?; } display_line!( - IO, + context.io(), &mut w; "Withdrawable total: {}", details.total_withdrawable.to_string_native() )?; - display_line!(IO, &mut w; "")?; + display_line!(context.io(), &mut w; "")?; } if bonds_and_unbonds.bonds_total != bonds_and_unbonds.bonds_total_slashed { display_line!( - IO, + context.io(), &mut w; "All bonds total active: {}", bonds_and_unbonds.bonds_total_active().to_string_native() )?; } display_line!( - IO, + context.io(), &mut w; "All bonds total: {}", bonds_and_unbonds.bonds_total.to_string_native() @@ -1600,20 +1613,20 @@ pub async fn query_bonds( != bonds_and_unbonds.unbonds_total_slashed { display_line!( - IO, + context.io(), &mut w; "All unbonds total active: {}", bonds_and_unbonds.unbonds_total_active().to_string_native() )?; } display_line!( - IO, + context.io(), &mut w; "All unbonds total: {}", bonds_and_unbonds.unbonds_total.to_string_native() )?; display_line!( - IO, + context.io(), &mut w; "All unbonds total withdrawable: {}", bonds_and_unbonds.total_withdrawable.to_string_native() @@ -1622,51 +1635,55 @@ pub async fn query_bonds( } /// Query PoS bonded stake -pub async fn query_bonded_stake< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_bonded_stake<'a, N: Namada<'a>>( + context: &N, args: args::QueryBondedStake, ) { let epoch = match args.epoch { Some(epoch) => epoch, - None => query_and_print_epoch::<_, IO>(client).await, + None => query_and_print_epoch(context).await, }; match args.validator { Some(validator) => { let validator = validator; // Find bonded stake for the given validator - let stake = get_validator_stake(client, epoch, &validator).await; + let stake = + get_validator_stake(context.client(), epoch, &validator).await; match stake { Some(stake) => { // TODO: show if it's in consensus set, below capacity, or // below threshold set display_line!( - IO, + context.io(), "Bonded stake of validator {validator}: {}", stake.to_string_native() ) } None => { - display_line!(IO, "No bonded stake found for {validator}"); + display_line!( + context.io(), + "No bonded stake found for {validator}" + ); } } } None => { - let consensus = - unwrap_client_response::>( + let consensus: BTreeSet = + unwrap_client_response::( RPC.vp() .pos() - .consensus_validator_set(client, &Some(epoch)) + .consensus_validator_set(context.client(), &Some(epoch)) .await, ); - let below_capacity = - unwrap_client_response::>( + let below_capacity: BTreeSet = + unwrap_client_response::( RPC.vp() .pos() - .below_capacity_validator_set(client, &Some(epoch)) + .below_capacity_validator_set( + context.client(), + &Some(epoch), + ) .await, ); @@ -1674,10 +1691,11 @@ pub async fn query_bonded_stake< let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, &mut w; "Consensus validators:").unwrap(); + display_line!(context.io(), &mut w; "Consensus validators:") + .unwrap(); for val in consensus.into_iter().rev() { display_line!( - IO, + context.io(), &mut w; " {}: {}", val.address.encode(), @@ -1686,11 +1704,11 @@ pub async fn query_bonded_stake< .unwrap(); } if !below_capacity.is_empty() { - display_line!(IO, &mut w; "Below capacity validators:") + display_line!(context.io(), &mut w; "Below capacity validators:") .unwrap(); for val in below_capacity.into_iter().rev() { display_line!( - IO, + context.io(), &mut w; " {}: {}", val.address.encode(), @@ -1702,9 +1720,10 @@ pub async fn query_bonded_stake< } } - let total_staked_tokens = get_total_staked_tokens(client, epoch).await; + let total_staked_tokens = + get_total_staked_tokens(context.client(), epoch).await; display_line!( - IO, + context.io(), "Total bonded stake: {}", total_staked_tokens.to_string_native() ); @@ -1744,47 +1763,43 @@ pub async fn query_validator_state< } /// Query a validator's state information -pub async fn query_and_print_validator_state< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - _wallet: &mut Wallet, +pub async fn query_and_print_validator_state<'a>( + context: &impl Namada<'a>, args: args::QueryValidatorState, ) { let validator = args.validator; let state: Option = - query_validator_state(client, &validator, args.epoch).await; + query_validator_state(context.client(), &validator, args.epoch).await; match state { Some(state) => match state { ValidatorState::Consensus => { display_line!( - IO, + context.io(), "Validator {validator} is in the consensus set" ) } ValidatorState::BelowCapacity => { display_line!( - IO, + context.io(), "Validator {validator} is in the below-capacity set" ) } ValidatorState::BelowThreshold => { display_line!( - IO, + context.io(), "Validator {validator} is in the below-threshold set" ) } ValidatorState::Inactive => { - display_line!(IO, "Validator {validator} is inactive") + display_line!(context.io(), "Validator {validator} is inactive") } ValidatorState::Jailed => { - display_line!(IO, "Validator {validator} is jailed") + display_line!(context.io(), "Validator {validator} is jailed") } }, None => display_line!( - IO, + context.io(), "Validator {validator} is either not a validator, or an epoch \ before the current epoch has been queried (and the validator \ state information is no longer stored)" @@ -1793,25 +1808,21 @@ pub async fn query_and_print_validator_state< } /// Query PoS validator's commission rate information -pub async fn query_and_print_commission_rate< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - _wallet: &mut Wallet, +pub async fn query_and_print_commission_rate<'a>( + context: &impl Namada<'a>, args: args::QueryCommissionRate, ) { let validator = args.validator; let info: Option = - query_commission_rate(client, &validator, args.epoch).await; + query_commission_rate(context.client(), &validator, args.epoch).await; match info { Some(CommissionPair { commission_rate: rate, max_commission_change_per_epoch: change, }) => { display_line!( - IO, + context.io(), "Validator {} commission rate: {}, max change per epoch: {}", validator.encode(), rate, @@ -1820,7 +1831,7 @@ pub async fn query_and_print_commission_rate< } None => { display_line!( - IO, + context.io(), "Address {} is not a validator (did not find commission rate \ and max change)", validator.encode(), @@ -1830,28 +1841,27 @@ pub async fn query_and_print_commission_rate< } /// Query PoS slashes -pub async fn query_slashes< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - _wallet: &mut Wallet, +pub async fn query_slashes<'a, N: Namada<'a>>( + context: &N, args: args::QuerySlashes, ) { match args.validator { Some(validator) => { let validator = validator; // Find slashes for the given validator - let slashes: Vec = unwrap_client_response::>( - RPC.vp().pos().validator_slashes(client, &validator).await, + let slashes: Vec = unwrap_client_response::( + RPC.vp() + .pos() + .validator_slashes(context.client(), &validator) + .await, ); if !slashes.is_empty() { - display_line!(IO, "Processed slashes:"); + display_line!(context.io(), "Processed slashes:"); let stdout = io::stdout(); let mut w = stdout.lock(); for slash in slashes { display_line!( - IO, + context.io(), &mut w; "Infraction epoch {}, block height {}, type {}, rate \ {}", @@ -1864,7 +1874,7 @@ pub async fn query_slashes< } } else { display_line!( - IO, + context.io(), "No processed slashes found for {}", validator.encode() ) @@ -1874,20 +1884,26 @@ pub async fn query_slashes< let enqueued_slashes: HashMap< Address, BTreeMap>, - > = unwrap_client_response::< - C, - HashMap>>, - >(RPC.vp().pos().enqueued_slashes(client).await); + > = unwrap_client_response::( + RPC.vp().pos().enqueued_slashes(context.client()).await, + ); let enqueued_slashes = enqueued_slashes.get(&validator).cloned(); if let Some(enqueued) = enqueued_slashes { - display_line!(IO, "\nEnqueued slashes for future processing"); + display_line!( + context.io(), + "\nEnqueued slashes for future processing" + ); for (epoch, slashes) in enqueued { - display_line!(IO, "To be processed in epoch {}", epoch); + display_line!( + context.io(), + "To be processed in epoch {}", + epoch + ); for slash in slashes { let stdout = io::stdout(); let mut w = stdout.lock(); display_line!( - IO, + context.io(), &mut w; "Infraction epoch {}, block height {}, type {}", slash.epoch, slash.block_height, slash.r#type, @@ -1897,7 +1913,7 @@ pub async fn query_slashes< } } else { display_line!( - IO, + context.io(), "No enqueued slashes found for {}", validator.encode() ) @@ -1905,18 +1921,18 @@ pub async fn query_slashes< } None => { let all_slashes: HashMap> = - unwrap_client_response::>>( - RPC.vp().pos().slashes(client).await, + unwrap_client_response::( + RPC.vp().pos().slashes(context.client()).await, ); if !all_slashes.is_empty() { let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, "Processed slashes:"); + display_line!(context.io(), "Processed slashes:"); for (validator, slashes) in all_slashes.into_iter() { for slash in slashes { display_line!( - IO, + context.io(), &mut w; "Infraction epoch {}, block height {}, rate {}, \ type {}, validator {}", @@ -1930,7 +1946,7 @@ pub async fn query_slashes< } } } else { - display_line!(IO, "No processed slashes found") + display_line!(context.io(), "No processed slashes found") } // Find enqueued slashes to be processed in the future for the given @@ -1938,16 +1954,18 @@ pub async fn query_slashes< let enqueued_slashes: HashMap< Address, BTreeMap>, - > = unwrap_client_response::< - C, - HashMap>>, - >(RPC.vp().pos().enqueued_slashes(client).await); + > = unwrap_client_response::( + RPC.vp().pos().enqueued_slashes(context.client()).await, + ); if !enqueued_slashes.is_empty() { - display_line!(IO, "\nEnqueued slashes for future processing"); + display_line!( + context.io(), + "\nEnqueued slashes for future processing" + ); for (validator, slashes_by_epoch) in enqueued_slashes { for (epoch, slashes) in slashes_by_epoch { display_line!( - IO, + context.io(), "\nTo be processed in epoch {}", epoch ); @@ -1955,7 +1973,7 @@ pub async fn query_slashes< let stdout = io::stdout(); let mut w = stdout.lock(); display_line!( - IO, + context.io(), &mut w; "Infraction epoch {}, block height {}, type \ {}, validator {}", @@ -1970,7 +1988,7 @@ pub async fn query_slashes< } } else { display_line!( - IO, + context.io(), "\nNo enqueued slashes found for future processing" ) } @@ -1978,55 +1996,57 @@ pub async fn query_slashes< } } -pub async fn query_delegations< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - _wallet: &mut Wallet, +pub async fn query_delegations<'a, N: Namada<'a>>( + context: &N, args: args::QueryDelegations, ) { let owner = args.owner; - let delegations = unwrap_client_response::>( - RPC.vp().pos().delegation_validators(client, &owner).await, + let delegations: HashSet
= unwrap_client_response::( + RPC.vp() + .pos() + .delegation_validators(context.client(), &owner) + .await, ); if delegations.is_empty() { - display_line!(IO, "No delegations found"); + display_line!(context.io(), "No delegations found"); } else { - display_line!(IO, "Found delegations to:"); + display_line!(context.io(), "Found delegations to:"); for delegation in delegations { - display_line!(IO, " {delegation}"); + display_line!(context.io(), " {delegation}"); } } } -pub async fn query_find_validator< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_find_validator<'a, N: Namada<'a>>( + context: &N, args: args::QueryFindValidator, ) { let args::QueryFindValidator { query: _, tm_addr } = args; if tm_addr.len() != 40 { edisplay_line!( - IO, + context.io(), "Expected 40 characters in Tendermint address, got {}", tm_addr.len() ); cli::safe_exit(1); } let tm_addr = tm_addr.to_ascii_uppercase(); - let validator = unwrap_client_response::( - RPC.vp().pos().validator_by_tm_addr(client, &tm_addr).await, + let validator = unwrap_client_response::( + RPC.vp() + .pos() + .validator_by_tm_addr(context.client(), &tm_addr) + .await, ); match validator { Some(address) => { - display_line!(IO, "Found validator address \"{address}\".") + display_line!( + context.io(), + "Found validator address \"{address}\"." + ) } None => { display_line!( - IO, + context.io(), "No validator with Tendermint address {tm_addr} found." ) } @@ -2034,18 +2054,17 @@ pub async fn query_find_validator< } /// Dry run a transaction -pub async fn dry_run_tx( - client: &C, +pub async fn dry_run_tx<'a, N: Namada<'a>>( + context: &N, tx_bytes: Vec, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { display_line!( - IO, + context.io(), "Dry-run result: {}", - rpc::dry_run_tx::<_, IO>(client, tx_bytes).await? + rpc::dry_run_tx(context, tx_bytes).await? ); Ok(()) } @@ -2102,25 +2121,24 @@ pub async fn known_address( } /// Query for all conversions. -pub async fn query_conversions< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn query_conversions<'a>( + context: &impl Namada<'a>, args: args::QueryConversions, ) { // The chosen token type of the conversions let target_token = args.token; // To facilitate human readable token addresses - let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); + let tokens = context + .wallet() + .await + .get_addresses_with_vp_type(AddressVpType::Token); let masp_addr = masp(); let key_prefix: Key = masp_addr.to_db_key().into(); let state_key = key_prefix .push(&(token::CONVERSION_KEY_PREFIX.to_owned())) .unwrap(); - let conv_state = - query_storage_value::(client, &state_key) + let conv_state: ConversionState = + query_storage_value(context.client(), &state_key) .await .expect("Conversions should be defined"); // Track whether any non-sentinel conversions are found @@ -2139,7 +2157,7 @@ pub async fn query_conversions< conversions_found = true; // Print the asset to which the conversion applies display!( - IO, + context.io(), "{}[{}]: ", tokens.get(addr).cloned().unwrap_or_else(|| addr.clone()), epoch, @@ -2152,7 +2170,7 @@ pub async fn query_conversions< let ((addr, _), epoch, _, _) = &conv_state.assets[asset_type]; // Now print out this component of the conversion display!( - IO, + context.io(), "{}{} {}[{}]", prefix, val, @@ -2163,11 +2181,11 @@ pub async fn query_conversions< prefix = " + "; } // Allowed conversions are always implicit equations - display_line!(IO, " = 0"); + display_line!(context.io(), " = 0"); } if !conversions_found { display_line!( - IO, + context.io(), "No conversions found satisfying specified criteria." ); } @@ -2188,14 +2206,11 @@ pub async fn query_conversion( } /// Query a wasm code hash -pub async fn query_wasm_code_hash< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_wasm_code_hash<'a>( + context: &impl Namada<'a>, code_path: impl AsRef, ) -> Result { - rpc::query_wasm_code_hash(client, code_path).await + rpc::query_wasm_code_hash(context, code_path).await } /// Query a storage value and decode it with [`BorshDeserialize`]. @@ -2226,20 +2241,14 @@ pub async fn query_storage_value_bytes< /// Query a range of storage values with a matching prefix and decode them with /// [`BorshDeserialize`]. Returns an iterator of the storage keys paired with /// their associated values. -pub async fn query_storage_prefix< - C: namada::ledger::queries::Client + Sync, - T, - IO: Io, ->( - client: &C, +pub async fn query_storage_prefix<'a, 'b, T>( + context: &'b impl Namada<'a>, key: &storage::Key, -) -> Option> +) -> Option> where T: BorshDeserialize, { - rpc::query_storage_prefix::<_, IO, _>(client, key) - .await - .unwrap() + rpc::query_storage_prefix(context, key).await.unwrap() } /// Query to check if the given storage key exists. @@ -2277,20 +2286,20 @@ pub async fn query_tx_response( /// Lookup the results of applying the specified transaction to the /// blockchain. -pub async fn query_result( - client: &C, +pub async fn query_result<'a>( + context: &impl Namada<'a>, args: args::QueryResult, ) { // First try looking up application event pertaining to given hash. let tx_response = query_tx_response( - client, + context.client(), namada::sdk::rpc::TxEventQuery::Applied(&args.tx_hash), ) .await; match tx_response { Ok(result) => { display_line!( - IO, + context.io(), "Transaction was applied with result: {}", serde_json::to_string_pretty(&result).unwrap() ) @@ -2298,19 +2307,19 @@ pub async fn query_result( Err(err1) => { // If this fails then instead look for an acceptance event. let tx_response = query_tx_response( - client, + context.client(), namada::sdk::rpc::TxEventQuery::Accepted(&args.tx_hash), ) .await; match tx_response { Ok(result) => display_line!( - IO, + context.io(), "Transaction was accepted with result: {}", serde_json::to_string_pretty(&result).unwrap() ), Err(err2) => { // Print the errors that caused the lookups to fail - edisplay_line!(IO, "{}\n{}", err1, err2); + edisplay_line!(context.io(), "{}\n{}", err1, err2); cli::safe_exit(1) } } @@ -2318,16 +2327,13 @@ pub async fn query_result( } } -pub async fn epoch_sleep( - client: &C, - _args: args::Query, -) { - let start_epoch = query_and_print_epoch::<_, IO>(client).await; +pub async fn epoch_sleep<'a>(context: &impl Namada<'a>, _args: args::Query) { + let start_epoch = query_and_print_epoch(context).await; loop { tokio::time::sleep(core::time::Duration::from_secs(1)).await; - let current_epoch = query_epoch(client).await.unwrap(); + let current_epoch = query_epoch(context.client()).await.unwrap(); if current_epoch > start_epoch { - display_line!(IO, "Reached epoch {}", current_epoch); + display_line!(context.io(), "Reached epoch {}", current_epoch); break; } } @@ -2427,11 +2433,8 @@ fn unwrap_client_response( }) } -pub async fn compute_offline_proposal_votes< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn compute_offline_proposal_votes<'a>( + context: &impl Namada<'a>, proposal: &OfflineSignedProposal, votes: Vec, ) -> ProposalVotes { @@ -2444,11 +2447,11 @@ pub async fn compute_offline_proposal_votes< HashMap, > = HashMap::default(); for vote in votes { - let is_validator = is_validator(client, &vote.address).await; - let is_delegator = is_delegator(client, &vote.address).await; + let is_validator = is_validator(context.client(), &vote.address).await; + let is_delegator = is_delegator(context.client(), &vote.address).await; if is_validator { let validator_stake = get_validator_stake( - client, + context.client(), proposal.proposal.tally_epoch, &vote.address, ) @@ -2459,7 +2462,7 @@ pub async fn compute_offline_proposal_votes< .insert(vote.address.clone(), validator_stake); } else if is_delegator { let validators = get_delegators_delegation_at( - client, + context.client(), &vote.address.clone(), proposal.proposal.tally_epoch, ) @@ -2478,7 +2481,7 @@ pub async fn compute_offline_proposal_votes< } } else { display_line!( - IO, + context.io(), "Skipping vote, not a validator/delegator at epoch {}.", proposal.proposal.tally_epoch ); diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index c64fdf0044..22f0c1b1b4 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -6,14 +6,11 @@ use namada::core::ledger::governance::cli::offline::{ use namada::core::ledger::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, }; -use namada::ledger::pos; -use namada::sdk::rpc::{TxBroadcastData, TxResponse}; -use namada::sdk::wallet::{Wallet, WalletIo}; -use namada::ledger::{Namada, NamadaImpl}; +use namada::ledger::{pos, Namada}; use namada::proof_of_stake::parameters::PosParams; use namada::proto::Tx; +use namada::sdk::rpc::{TxBroadcastData, TxResponse}; use namada::sdk::{error, signing, tx}; -use namada::tendermint_rpc::HttpClient; use namada::types::address::{Address, ImplicitAddress}; use namada::types::dec::Dec; use namada::types::io::Io; @@ -22,21 +19,18 @@ use namada::types::transaction::pos::InitValidator; use namada::{display_line, edisplay_line}; use super::rpc; -use crate::cli::{args, safe_exit, Context}; +use crate::cli::{args, safe_exit}; use crate::client::rpc::query_wasm_code_hash; use crate::client::tx::tx::ProcessTxResponse; use crate::config::TendermintMode; use crate::facade::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::node::ledger::tendermint_node; -use crate::wallet::{ - gen_validator_keys, read_and_confirm_encryption_password, CliWalletUtils, -}; -use namada::types::io::StdIo; +use crate::wallet::{gen_validator_keys, read_and_confirm_encryption_password}; /// Wrapper around `signing::aux_signing_data` that stores the optional /// disposable address to the wallet pub async fn aux_signing_data<'a>( - context: &impl Namada<'a, WalletUtils = CliWalletUtils>, + context: &impl Namada<'a>, args: &args::Tx, owner: Option
, default_signer: Option
, @@ -47,14 +41,14 @@ pub async fn aux_signing_data<'a>( if args.disposable_signing_key { if !(args.dry_run || args.dry_run_wrapper) { // Store the generated signing key to wallet in case of need - crate::wallet::save(*context.wallet().await).map_err(|_| { + context.wallet().await.save().map_err(|_| { error::Error::Other( "Failed to save disposable address to wallet".to_string(), ) })?; } else { display_line!( - StdIo, + context.io(), "Transaction dry run. The disposable address will not be \ saved to wallet." ) @@ -103,24 +97,21 @@ pub async fn submit_reveal_aux<'a>( Ok(()) } -pub async fn submit_custom( - client: &C, - ctx: &mut Context, +pub async fn submit_custom<'a, N: Namada<'a>>( + namada: &N, args: args::TxCustom, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - submit_reveal_aux(&namada, args.tx.clone(), &args.owner).await?; + submit_reveal_aux(namada, args.tx.clone(), &args.owner).await?; - let (mut tx, signing_data, _epoch) = args.build(&namada).await?; + let (mut tx, signing_data, _epoch) = args.build(namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; @@ -129,22 +120,19 @@ where Ok(()) } -pub async fn submit_update_account( - client: &C, - ctx: &mut Context, +pub async fn submit_update_account<'a, N: Namada<'a>>( + namada: &N, args: args::TxUpdateAccount, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - let (mut tx, signing_data, _epoch) = args.build(&namada).await?; + let (mut tx, signing_data, _epoch) = args.build(namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; @@ -153,23 +141,20 @@ where Ok(()) } -pub async fn submit_init_account( - client: &C, - ctx: &mut Context, +pub async fn submit_init_account<'a, N: Namada<'a>>( + namada: &N, args: args::TxInitAccount, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _epoch) = - tx::build_init_account(&namada, &args).await?; + tx::build_init_account(namada, &args).await?; - signing::generate_test_vector(&namada, &tx).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; @@ -178,9 +163,9 @@ where Ok(()) } -pub async fn submit_init_validator( - client: &C, - mut ctx: Context, +pub async fn submit_init_validator<'a>( + namada: &impl Namada<'a>, + config: &mut crate::config::Config, args::TxInitValidator { tx: tx_args, scheme, @@ -196,15 +181,12 @@ pub async fn submit_init_validator( unsafe_dont_encrypt, tx_code_path: _, }: args::TxInitValidator, -) -> Result<(), error::Error> -where - C: namada::ledger::queries::Client + Sync, -{ +) -> Result<(), error::Error> { let tx_args = args::Tx { chain_id: tx_args .clone() .chain_id - .or_else(|| Some(ctx.config.ledger.chain_id.clone())), + .or_else(|| Some(config.ledger.chain_id.clone())), ..tx_args.clone() }; let alias = tx_args @@ -229,19 +211,23 @@ where let eth_hot_key_alias = format!("{}-eth-hot-key", alias); let eth_cold_key_alias = format!("{}-eth-cold-key", alias); + let mut wallet = namada.wallet_mut().await; let consensus_key = consensus_key .map(|key| match key { common::SecretKey::Ed25519(_) => key, common::SecretKey::Secp256k1(_) => { - edisplay_line!(IO, "Consensus key can only be ed25519"); + edisplay_line!( + namada.io(), + "Consensus key can only be ed25519" + ); safe_exit(1) } }) .unwrap_or_else(|| { - display_line!(IO, "Generating consensus key..."); + display_line!(namada.io(), "Generating consensus key..."); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - ctx.wallet + wallet .gen_key( // Note that TM only allows ed25519 for consensus key SchemeType::Ed25519, @@ -259,15 +245,18 @@ where .map(|key| match key { common::SecretKey::Secp256k1(_) => key.ref_to(), common::SecretKey::Ed25519(_) => { - edisplay_line!(IO, "Eth cold key can only be secp256k1"); + edisplay_line!( + namada.io(), + "Eth cold key can only be secp256k1" + ); safe_exit(1) } }) .unwrap_or_else(|| { - display_line!(IO, "Generating Eth cold key..."); + display_line!(namada.io(), "Generating Eth cold key..."); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - ctx.wallet + wallet .gen_key( // Note that ETH only allows secp256k1 SchemeType::Secp256k1, @@ -286,15 +275,18 @@ where .map(|key| match key { common::SecretKey::Secp256k1(_) => key.ref_to(), common::SecretKey::Ed25519(_) => { - edisplay_line!(IO, "Eth hot key can only be secp256k1"); + edisplay_line!( + namada.io(), + "Eth hot key can only be secp256k1" + ); safe_exit(1) } }) .unwrap_or_else(|| { - display_line!(IO, "Generating Eth hot key..."); + display_line!(namada.io(), "Generating Eth hot key..."); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - ctx.wallet + wallet .gen_key( // Note that ETH only allows secp256k1 SchemeType::Secp256k1, @@ -308,13 +300,15 @@ where .1 .ref_to() }); + // To avoid wallet deadlocks in following operations + drop(wallet); if protocol_key.is_none() { - display_line!(IO, "Generating protocol signing key..."); + display_line!(namada.io(), "Generating protocol signing key..."); } // Generate the validator keys let validator_keys = gen_validator_keys( - &mut ctx.wallet, + *namada.wallet_mut().await, Some(eth_hot_pk.clone()), protocol_key, scheme, @@ -327,17 +321,15 @@ where .expect("DKG sessions keys should have been created") .public(); - let validator_vp_code_hash = query_wasm_code_hash::( - client, - validator_vp_code_path.to_str().unwrap(), - ) - .await - .unwrap(); + let validator_vp_code_hash = + query_wasm_code_hash(namada, validator_vp_code_path.to_str().unwrap()) + .await + .unwrap(); // Validate the commission rate data if commission_rate > Dec::one() || commission_rate < Dec::zero() { edisplay_line!( - IO, + namada.io(), "The validator commission rate must not exceed 1.0 or 100%, and \ it must be 0 or positive" ); @@ -349,7 +341,7 @@ where || max_commission_rate_change < Dec::zero() { edisplay_line!( - IO, + namada.io(), "The validator maximum change in commission rate per epoch must \ not exceed 1.0 or 100%" ); @@ -358,7 +350,7 @@ where } } let tx_code_hash = - query_wasm_code_hash::<_, IO>(client, args::TX_INIT_VALIDATOR_WASM) + query_wasm_code_hash(namada, args::TX_INIT_VALIDATOR_WASM) .await .unwrap(); @@ -384,11 +376,10 @@ where tx.add_code_from_hash(tx_code_hash).add_data(data); - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - let signing_data = aux_signing_data(&namada, &tx_args, None, None).await?; + let signing_data = aux_signing_data(namada, &tx_args, None, None).await?; tx::prepare_tx( - &namada, + namada, &tx_args, &mut tx, signing_data.fee_payer.clone(), @@ -396,10 +387,10 @@ where ) .await?; - signing::generate_test_vector(&namada, &tx).await?; + signing::generate_test_vector(namada, &tx).await?; if tx_args.dump_tx { - tx::dump_tx::(&tx_args, tx); + tx::dump_tx(namada.io(), &tx_args, tx); } else { namada.sign(&mut tx, &tx_args, signing_data).await?; @@ -411,29 +402,37 @@ where // There should be 1 account for the validator itself [validator_address] => { if let Some(alias) = - ctx.wallet.find_alias(validator_address) + namada.wallet().await.find_alias(validator_address) { (alias.clone(), validator_address.clone()) } else { edisplay_line!( - IO, + namada.io(), "Expected one account to be created" ); safe_exit(1) } } _ => { - edisplay_line!(IO, "Expected one account to be created"); + edisplay_line!( + namada.io(), + "Expected one account to be created" + ); safe_exit(1) } }; // add validator address and keys to the wallet - ctx.wallet + namada + .wallet_mut() + .await .add_validator_data(validator_address, validator_keys); - crate::wallet::save(&ctx.wallet) - .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + namada + .wallet_mut() + .await + .save() + .unwrap_or_else(|err| edisplay_line!(namada.io(), "{}", err)); - let tendermint_home = ctx.config.ledger.cometbft_dir(); + let tendermint_home = config.ledger.cometbft_dir(); tendermint_node::write_validator_key( &tendermint_home, &consensus_key, @@ -442,51 +441,55 @@ where // Write Namada config stuff or figure out how to do the above // tendermint_node things two epochs in the future!!! - ctx.config.ledger.shell.tendermint_mode = TendermintMode::Validator; - ctx.config + config.ledger.shell.tendermint_mode = TendermintMode::Validator; + config .write( - &ctx.config.ledger.shell.base_dir, - &ctx.config.ledger.chain_id, + &config.ledger.shell.base_dir, + &config.ledger.chain_id, true, ) .unwrap(); let key = pos::params_key(); - let pos_params = - rpc::query_storage_value::(client, &key) + let pos_params: PosParams = + rpc::query_storage_value(namada.client(), &key) .await .expect("Pos parameter should be defined."); - display_line!(IO, ""); + display_line!(namada.io(), ""); display_line!( - IO, + namada.io(), "The validator's addresses and keys were stored in the wallet:" ); display_line!( - IO, + namada.io(), " Validator address \"{}\"", validator_address_alias ); display_line!( - IO, + namada.io(), " Validator account key \"{}\"", validator_key_alias ); - display_line!(IO, " Consensus key \"{}\"", consensus_key_alias); display_line!( - IO, + namada.io(), + " Consensus key \"{}\"", + consensus_key_alias + ); + display_line!( + namada.io(), "The ledger node has been setup to use this validator's \ address and consensus key." ); display_line!( - IO, + namada.io(), "Your validator will be active in {} epochs. Be sure to \ restart your node for the changes to take effect!", pos_params.pipeline_len ); } else { display_line!( - IO, + namada.io(), "Transaction dry run. No addresses have been saved." ); } @@ -494,35 +497,30 @@ where Ok(()) } -pub async fn submit_transfer( - client: &C, - mut ctx: Context, +pub async fn submit_transfer<'a>( + namada: &impl Namada<'a>, args: args::TxTransfer, ) -> Result<(), error::Error> { for _ in 0..2 { - let namada = - NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - submit_reveal_aux( - &namada, + namada, args.tx.clone(), &args.source.effective_address(), ) .await?; let (mut tx, signing_data, tx_epoch) = - args.clone().build(&namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + args.clone().build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); break; } else { namada.sign(&mut tx, &args.tx, signing_data).await?; let result = namada.submit(tx, &args.tx).await?; - let submission_epoch = - rpc::query_and_print_epoch::<_, IO>(client).await; + let submission_epoch = rpc::query_and_print_epoch(namada).await; match result { ProcessTxResponse::Applied(resp) if @@ -534,7 +532,7 @@ pub async fn submit_transfer( tx_epoch.unwrap() != submission_epoch => { // Then we probably straddled an epoch boundary. Let's retry... - edisplay_line!(IO, + edisplay_line!(namada.io(), "MASP transaction rejected and this may be due to the \ epoch changing. Attempting to resubmit transaction.", ); @@ -550,22 +548,19 @@ pub async fn submit_transfer( Ok(()) } -pub async fn submit_ibc_transfer( - client: &C, - mut ctx: Context, +pub async fn submit_ibc_transfer<'a, N: Namada<'a>>( + namada: &N, args: args::TxIbcTransfer, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - submit_reveal_aux(&namada, args.tx.clone(), &args.source).await?; - let (mut tx, signing_data, _epoch) = args.build(&namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + submit_reveal_aux(namada, args.tx.clone(), &args.source).await?; + let (mut tx, signing_data, _epoch) = args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; @@ -574,18 +569,16 @@ where Ok(()) } -pub async fn submit_init_proposal( - client: &C, - mut ctx: Context, +pub async fn submit_init_proposal<'a, N: Namada<'a>>( + namada: &N, args: args::InitProposal, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let current_epoch = rpc::query_and_print_epoch::<_, IO>(client).await; - let governance_parameters = rpc::query_governance_parameters(client).await; - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); + let current_epoch = rpc::query_and_print_epoch(namada).await; + let governance_parameters = + rpc::query_governance_parameters(namada.client()).await; let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline { let proposal = OfflineProposal::try_from(args.proposal_data.as_ref()) @@ -599,7 +592,7 @@ where let default_signer = Some(proposal.author.clone()); let signing_data = aux_signing_data( - &namada, + namada, &args.tx, Some(proposal.author.clone()), default_signer, @@ -618,7 +611,11 @@ where ) })?; - display_line!(IO, "Proposal serialized to: {}", output_file_path); + display_line!( + namada.io(), + "Proposal serialized to: {}", + output_file_path + ); return Ok(()); } else if args.is_pgf_funding { let proposal = @@ -631,10 +628,10 @@ where .validate(&governance_parameters, current_epoch, args.tx.force) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - submit_reveal_aux(&namada, args.tx.clone(), &proposal.proposal.author) + submit_reveal_aux(namada, args.tx.clone(), &proposal.proposal.author) .await?; - tx::build_pgf_funding_proposal(&namada, &args, proposal).await? + tx::build_pgf_funding_proposal(namada, &args, proposal).await? } else if args.is_pgf_stewards { let proposal = PgfStewardProposal::try_from( args.proposal_data.as_ref(), @@ -643,8 +640,8 @@ where error::TxError::FailedGovernaneProposalDeserialize(e.to_string()) })?; let author_balance = rpc::get_token_balance( - client, - &ctx.native_token, + namada.client(), + &namada.native_token().await, &proposal.proposal.author, ) .await; @@ -657,18 +654,18 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - submit_reveal_aux(&namada, args.tx.clone(), &proposal.proposal.author) + submit_reveal_aux(namada, args.tx.clone(), &proposal.proposal.author) .await?; - tx::build_pgf_stewards_proposal(&namada, &args, proposal).await? + tx::build_pgf_stewards_proposal(namada, &args, proposal).await? } else { let proposal = DefaultProposal::try_from(args.proposal_data.as_ref()) .map_err(|e| { error::TxError::FailedGovernaneProposalDeserialize(e.to_string()) })?; let author_balane = rpc::get_token_balance( - client, - &ctx.native_token, + namada.client(), + &namada.native_token().await, &proposal.proposal.author, ) .await; @@ -681,15 +678,15 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - submit_reveal_aux(&namada, args.tx.clone(), &proposal.proposal.author) + submit_reveal_aux(namada, args.tx.clone(), &proposal.proposal.author) .await?; - tx::build_default_proposal(&namada, &args, proposal).await? + tx::build_default_proposal(namada, &args, proposal).await? }; - signing::generate_test_vector(&namada, &tx_builder).await?; + signing::generate_test_vector(namada, &tx_builder).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx_builder); + tx::dump_tx(namada.io(), &args.tx, tx_builder); } else { namada.sign(&mut tx_builder, &args.tx, signing_data).await?; namada.submit(tx_builder, &args.tx).await?; @@ -698,21 +695,18 @@ where Ok(()) } -pub async fn submit_vote_proposal( - client: &C, - mut ctx: Context, +pub async fn submit_vote_proposal<'a, N: Namada<'a>>( + namada: &N, args: args::VoteProposal, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline { let default_signer = Some(args.voter.clone()); let signing_data = aux_signing_data( - &namada, + namada, &args.tx, Some(args.voter.clone()), default_signer.clone(), @@ -733,7 +727,7 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; let delegations = rpc::get_delegators_delegation_at( - client, + namada.client(), &args.voter, proposal.proposal.tally_epoch, ) @@ -757,15 +751,19 @@ where .serialize(args.tx.output_folder) .expect("Should be able to serialize the offline proposal"); - display_line!(IO, "Proposal vote serialized to: {}", output_file_path); + display_line!( + namada.io(), + "Proposal vote serialized to: {}", + output_file_path + ); return Ok(()); } else { - args.build(&namada).await? + args.build(namada).await? }; - signing::generate_test_vector(&namada, &tx_builder).await?; + signing::generate_test_vector(namada, &tx_builder).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx_builder); + tx::dump_tx(namada.io(), &args.tx, tx_builder); } else { namada.sign(&mut tx_builder, &args.tx, signing_data).await?; namada.submit(tx_builder, &args.tx).await?; @@ -774,9 +772,8 @@ where Ok(()) } -pub async fn sign_tx( - client: &C, - ctx: &mut Context, +pub async fn sign_tx<'a, N: Namada<'a>>( + namada: &N, args::SignTx { tx: tx_args, tx_data, @@ -784,36 +781,31 @@ pub async fn sign_tx( }: args::SignTx, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let tx = if let Ok(transaction) = Tx::deserialize(tx_data.as_ref()) { transaction } else { - edisplay_line!(IO, "Couldn't decode the transaction."); + edisplay_line!(namada.io(), "Couldn't decode the transaction."); safe_exit(1) }; - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let default_signer = Some(owner.clone()); - let signing_data = aux_signing_data( - &namada, - &tx_args, - Some(owner.clone()), - default_signer, - ) - .await?; + let signing_data = + aux_signing_data(namada, &tx_args, Some(owner.clone()), default_signer) + .await?; + let mut wallet = namada.wallet_mut().await; let secret_keys = &signing_data .public_keys .iter() .filter_map(|public_key| { if let Ok(secret_key) = - signing::find_key_by_pk(&mut ctx.wallet, &tx_args, public_key) + signing::find_key_by_pk(&mut wallet, &tx_args, public_key) { Some(secret_key) } else { edisplay_line!( - IO, + namada.io(), "Couldn't find the secret key for {}. Skipping signature \ generation.", public_key @@ -851,7 +843,7 @@ where ) .expect("Signature should be deserializable."); display_line!( - IO, + namada.io(), "Signature for {} serialized at {}", signature.pubkey, output_path.display() @@ -861,40 +853,34 @@ where Ok(()) } -pub async fn submit_reveal_pk( - client: &C, - ctx: &mut Context, +pub async fn submit_reveal_pk<'a, N: Namada<'a>>( + namada: &N, args: args::RevealPk, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - submit_reveal_aux(&namada, args.tx, &(&args.public_key).into()).await?; + submit_reveal_aux(namada, args.tx, &(&args.public_key).into()).await?; Ok(()) } -pub async fn submit_bond( - client: &C, - ctx: &mut Context, +pub async fn submit_bond<'a, N: Namada<'a>>( + namada: &N, args: args::Bond, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let default_address = args.source.clone().unwrap_or(args.validator.clone()); - submit_reveal_aux(&namada, args.tx.clone(), &default_address).await?; + submit_reveal_aux(namada, args.tx.clone(), &default_address).await?; let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(&namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; @@ -904,50 +890,43 @@ where Ok(()) } -pub async fn submit_unbond( - client: &C, - ctx: &mut Context, +pub async fn submit_unbond<'a, N: Namada<'a>>( + namada: &N, args: args::Unbond, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch, latest_withdrawal_pre) = - args.build(&namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; - tx::query_unbonds::<_, IO>(client, args.clone(), latest_withdrawal_pre) - .await?; + tx::query_unbonds(namada, args.clone(), latest_withdrawal_pre).await?; } Ok(()) } -pub async fn submit_withdraw( - client: &C, - mut ctx: Context, +pub async fn submit_withdraw<'a, N: Namada<'a>>( + namada: &N, args: args::Withdraw, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(&namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; @@ -957,21 +936,19 @@ where Ok(()) } -pub async fn submit_validator_commission_change( - client: &C, - mut ctx: Context, +pub async fn submit_validator_commission_change<'a, N: Namada<'a>>( + namada: &N, args: args::CommissionRateChange, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(&namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; @@ -981,24 +958,19 @@ where Ok(()) } -pub async fn submit_unjail_validator< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - mut ctx: Context, +pub async fn submit_unjail_validator<'a, N: Namada<'a>>( + namada: &N, args: args::TxUnjailValidator, ) -> Result<(), error::Error> where - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(&namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; @@ -1008,26 +980,20 @@ where Ok(()) } -pub async fn submit_update_steward_commission< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - mut ctx: Context, +pub async fn submit_update_steward_commission<'a, N: Namada<'a>>( + namada: &N, args: args::UpdateStewardCommission, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(&namada).await?; + args.build(namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; @@ -1036,22 +1002,19 @@ where Ok(()) } -pub async fn submit_resign_steward( - client: &C, - mut ctx: Context, +pub async fn submit_resign_steward<'a, N: Namada<'a>>( + namada: &N, args: args::ResignSteward, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let namada = NamadaImpl::new(client, &mut ctx.wallet, &mut ctx.shielded); - let (mut tx, signing_data, _epoch) = args.build(&namada).await?; + let (mut tx, signing_data, _epoch) = args.build(namada).await?; - signing::generate_test_vector(&namada, &tx).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { namada.sign(&mut tx, &args.tx, signing_data).await?; namada.submit(tx, &args.tx).await?; @@ -1061,24 +1024,23 @@ where } /// Save accounts initialized from a tx into the wallet, if any. -pub async fn save_initialized_accounts( - wallet: &mut Wallet, +pub async fn save_initialized_accounts<'a>( + namada: &impl Namada<'a>, args: &args::Tx, initialized_accounts: Vec
, ) { - tx::save_initialized_accounts::(wallet, args, initialized_accounts) - .await + tx::save_initialized_accounts(namada, args, initialized_accounts).await } /// Broadcast a transaction to be included in the blockchain and checks that /// the tx has been successfully included into the mempool of a validator /// /// In the case of errors in any of those stages, an error message is returned -pub async fn broadcast_tx( - rpc_cli: &HttpClient, +pub async fn broadcast_tx<'a>( + namada: &impl Namada<'a>, to_broadcast: &TxBroadcastData, ) -> Result { - tx::broadcast_tx::<_, IO>(rpc_cli, to_broadcast).await + tx::broadcast_tx(namada, to_broadcast).await } /// Broadcast a transaction to be included in the blockchain. @@ -1089,9 +1051,9 @@ pub async fn broadcast_tx( /// 3. The decrypted payload of the tx has been included on the blockchain. /// /// In the case of errors in any of those stages, an error message is returned -pub async fn submit_tx( - client: &HttpClient, +pub async fn submit_tx<'a>( + namada: &impl Namada<'a>, to_broadcast: TxBroadcastData, ) -> Result { - tx::submit_tx::<_, IO>(client, to_broadcast).await + tx::submit_tx(namada, to_broadcast).await } diff --git a/apps/src/lib/node/ledger/shell/testing/client.rs b/apps/src/lib/node/ledger/shell/testing/client.rs index 9ebc825f54..504bdc7e5b 100644 --- a/apps/src/lib/node/ledger/shell/testing/client.rs +++ b/apps/src/lib/node/ledger/shell/testing/client.rs @@ -50,6 +50,7 @@ pub fn run( rt.block_on(CliApi::::handle_client_command( Some(node), cmd, + &TestingIo, )) } Bin::Wallet => { @@ -60,7 +61,7 @@ pub fn run( let cmd = cmds::NamadaWallet::parse(&matches) .expect("Could not parse wallet command"); - CliApi::::handle_wallet_command(cmd, ctx) + CliApi::::handle_wallet_command(cmd, ctx, &TestingIo) } Bin::Relayer => { args.insert(0, "relayer"); @@ -85,6 +86,7 @@ pub fn run( rt.block_on(CliApi::::handle_relayer_command( Some(node), cmd, + &TestingIo, )) } } @@ -96,7 +98,7 @@ impl<'a> CliClient for &'a MockNode { unreachable!("MockNode should always be instantiated at test start.") } - async fn wait_until_node_is_synced(&self) -> Halt<()> { + async fn wait_until_node_is_synced(&self, _io: &impl Io) -> Halt<()> { ControlFlow::Continue(()) } } diff --git a/apps/src/lib/node/ledger/shell/testing/utils.rs b/apps/src/lib/node/ledger/shell/testing/utils.rs index bfcb7f50ab..451e20c2df 100644 --- a/apps/src/lib/node/ledger/shell/testing/utils.rs +++ b/apps/src/lib/node/ledger/shell/testing/utils.rs @@ -74,13 +74,13 @@ pub struct TestingIo; #[async_trait::async_trait(?Send)] impl Io for TestingIo { - fn print(output: impl AsRef) { + fn print(&self, output: impl AsRef) { let mut testout = TESTOUT.lock().unwrap(); testout.append(output.as_ref().as_bytes().to_vec()); print!("{}", output.as_ref()); } - fn println(output: impl AsRef) { + fn println(&self, output: impl AsRef) { let mut testout = TESTOUT.lock().unwrap(); let mut bytes = output.as_ref().as_bytes().to_vec(); bytes.extend_from_slice("\n".as_bytes()); @@ -89,22 +89,24 @@ impl Io for TestingIo { } fn write( + &self, _: W, output: impl AsRef, ) -> std::io::Result<()> { - Self::print(output); + self.print(output); Ok(()) } fn writeln( + &self, _: W, output: impl AsRef, ) -> std::io::Result<()> { - Self::println(output); + self.println(output); Ok(()) } - fn eprintln(output: impl AsRef) { + fn eprintln(&self, output: impl AsRef) { let mut testout = TESTOUT.lock().unwrap(); let mut bytes = output.as_ref().as_bytes().to_vec(); bytes.extend_from_slice("\n".as_bytes()); @@ -112,11 +114,11 @@ impl Io for TestingIo { eprintln!("{}", output.as_ref()); } - async fn read() -> tokio::io::Result { + async fn read(&self) -> tokio::io::Result { read_aux(&*TESTIN).await } - async fn prompt(question: impl AsRef) -> String { + async fn prompt(&self, question: impl AsRef) -> String { prompt_aux(&*TESTIN, tokio::io::stdout(), question.as_ref()).await } } diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index cca43a43d6..61c49fe580 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -10,6 +10,8 @@ use std::{env, fs}; use namada::bip39::{Language, Mnemonic}; pub use namada::sdk::wallet::alias::Alias; +use namada::sdk::wallet::fs::FsWalletStorage; +use namada::sdk::wallet::store::Store; use namada::sdk::wallet::{ AddressVpType, ConfirmationResponse, FindKeyError, GenRestoreKeyError, Wallet, WalletIo, @@ -20,8 +22,6 @@ use namada::types::key::*; use rand_core::OsRng; pub use store::wallet_file; use zeroize::Zeroizing; -use namada::sdk::wallet::store::Store; -use namada::sdk::wallet::fs::FsWalletStorage; use crate::cli; use crate::config::genesis::genesis_config::GenesisConfig; diff --git a/benches/lib.rs b/benches/lib.rs index 3aacaef90f..b5036d5f66 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -69,7 +69,6 @@ use namada::ledger::queries::{ Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada::ledger::storage_api::StorageRead; -use namada::sdk::wallet::Wallet; use namada::ledger::NamadaImpl; use namada::proof_of_stake; use namada::proto::{Code, Data, Section, Signature, Tx}; @@ -77,6 +76,7 @@ use namada::sdk::args::InputAmount; use namada::sdk::masp::{ self, ShieldedContext, ShieldedTransfer, ShieldedUtils, }; +use namada::sdk::wallet::Wallet; use namada::tendermint::Hash; use namada::tendermint_rpc::{self}; use namada::types::address::InternalAddress; @@ -689,13 +689,12 @@ impl Default for BenchShieldedCtx { fn default() -> Self { let mut shell = BenchShell::default(); - let mut ctx = - Context::new::(namada_apps::cli::args::Global { - chain_id: None, - base_dir: shell.tempdir.as_ref().canonicalize().unwrap(), - wasm_dir: Some(WASM_DIR.into()), - }) - .unwrap(); + let mut ctx = Context::new::(namada_apps::cli::args::Global { + chain_id: None, + base_dir: shell.tempdir.as_ref().canonicalize().unwrap(), + wasm_dir: Some(WASM_DIR.into()), + }) + .unwrap(); // Generate spending key for Albert and Bertha ctx.wallet.gen_spending_key( @@ -811,8 +810,12 @@ impl BenchShieldedCtx { &[], )) .unwrap(); - let namada = - NamadaImpl::new(&self.shell, &mut self.wallet, &mut self.shielded); + let namada = NamadaImpl::new( + &self.shell, + &mut self.wallet, + &mut self.shielded, + &StdIo, + ); let shielded = async_runtime .block_on( ShieldedContext::::gen_shielded_transfer( diff --git a/shared/src/ledger/eth_bridge.rs b/shared/src/ledger/eth_bridge.rs index a73f5efd77..66ec22a63b 100644 --- a/shared/src/ledger/eth_bridge.rs +++ b/shared/src/ledger/eth_bridge.rs @@ -102,6 +102,7 @@ pub struct BlockOnEthSync { /// Block until Ethereum finishes synchronizing. pub async fn block_on_eth_sync( client: &C, + io: &IO, args: BlockOnEthSync, ) -> Halt<()> where @@ -111,7 +112,7 @@ where deadline, delta_sleep, } = args; - display_line!(IO, "Attempting to synchronize with the Ethereum network"); + display_line!(io, "Attempting to synchronize with the Ethereum network"); Sleep { strategy: LinearBackoff { delta: delta_sleep }, } @@ -128,11 +129,11 @@ where .await .try_halt(|_| { edisplay_line!( - IO, + io, "Timed out while waiting for Ethereum to synchronize" ); })?; - display_line!(IO, "The Ethereum node is up to date"); + display_line!(io, "The Ethereum node is up to date"); control_flow::proceed(()) } @@ -140,6 +141,7 @@ where /// not, perform `action`. pub async fn eth_sync_or( client: &C, + io: &IO, mut action: F, ) -> Halt> where @@ -151,7 +153,7 @@ where .map(|status| status.is_synchronized()) .try_halt(|err| { edisplay_line!( - IO, + io, "An error occurred while fetching the Ethereum \ synchronization status: {err}" ); @@ -165,11 +167,11 @@ where /// Check if Ethereum has finished synchronizing. In case it has /// not, end execution. -pub async fn eth_sync_or_exit(client: &C) -> Halt<()> +pub async fn eth_sync_or_exit(client: &C, io: &IO) -> Halt<()> where C: Middleware, { - eth_sync_or::<_, _, _, IO>(client, || { + eth_sync_or(client, io, || { tracing::error!("The Ethereum node has not finished synchronizing"); }) .await? diff --git a/shared/src/ledger/eth_bridge/bridge_pool.rs b/shared/src/ledger/eth_bridge/bridge_pool.rs index 14002b53d3..b4c633ca78 100644 --- a/shared/src/ledger/eth_bridge/bridge_pool.rs +++ b/shared/src/ledger/eth_bridge/bridge_pool.rs @@ -19,12 +19,12 @@ use crate::ledger::queries::{ Client, GenBridgePoolProofReq, GenBridgePoolProofRsp, TransferToErcArgs, RPC, }; -use crate::sdk::rpc::{query_wasm_code_hash, validate_amount}; use crate::ledger::signing::aux_signing_data; use crate::ledger::tx::prepare_tx; use crate::ledger::{args, Namada, SigningTxData}; use crate::proto::Tx; use crate::sdk::error::Error; +use crate::sdk::rpc::{query_wasm_code_hash, validate_amount}; use crate::types::address::Address; use crate::types::control_flow::time::{Duration, Instant}; use crate::types::control_flow::{ @@ -40,7 +40,6 @@ use crate::types::token::{Amount, DenominatedAmount}; use crate::types::voting_power::FractionalVotingPower; use crate::{display, display_line}; - /// Craft a transaction that adds a transfer to the Ethereum bridge pool. pub async fn build_bridge_pool_tx<'a>( context: &impl Namada<'a>, @@ -67,7 +66,7 @@ pub async fn build_bridge_pool_tx<'a>( .await?; let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); let DenominatedAmount { amount, .. } = validate_amount( - context.client(), + context, amount, &wrapped_erc20s::token(&asset), tx_args.force, @@ -76,19 +75,14 @@ pub async fn build_bridge_pool_tx<'a>( .map_err(|e| Error::Other(format!("Failed to validate amount. {}", e)))?; let DenominatedAmount { amount: fee_amount, .. - } = validate_amount( - context.client(), - fee_amount, - &fee_token, - tx_args.force, - ) - .await - .map_err(|e| { - Error::Other(format!( - "Failed to validate Bridge pool fee amount. {}", - e - )) - })?; + } = validate_amount(context, fee_amount, &fee_token, tx_args.force) + .await + .map_err(|e| { + Error::Other(format!( + "Failed to validate Bridge pool fee amount. {}", + e + )) + })?; let transfer = PendingTransfer { transfer: TransferToEthereum { asset, @@ -109,7 +103,7 @@ pub async fn build_bridge_pool_tx<'a>( }; let tx_code_hash = - query_wasm_code_hash(context.client(), code_path.to_str().unwrap()) + query_wasm_code_hash(context, code_path.to_str().unwrap()) .await .unwrap(); @@ -140,10 +134,10 @@ struct BridgePoolResponse { /// Query the contents of the Ethereum bridge pool. /// Prints out a json payload. -pub async fn query_bridge_pool(client: &C) -where - C: Client + Sync, -{ +pub async fn query_bridge_pool<'a>( + client: &(impl Client + Sync), + io: &impl Io, +) { let response: Vec = RPC .shell() .eth_bridge() @@ -155,24 +149,22 @@ where .map(|transfer| (transfer.keccak256().to_string(), transfer)) .collect(); if pool_contents.is_empty() { - display_line!(IO, "Bridge pool is empty."); + display_line!(io, "Bridge pool is empty."); return; } let contents = BridgePoolResponse { bridge_pool_contents: pool_contents, }; - display_line!(IO, "{}", serde_json::to_string_pretty(&contents).unwrap()); + display_line!(io, "{}", serde_json::to_string_pretty(&contents).unwrap()); } /// Query the contents of the Ethereum bridge pool that /// is covered by the latest signed root. /// Prints out a json payload. -pub async fn query_signed_bridge_pool( - client: &C, -) -> Halt> -where - C: Client + Sync, -{ +pub async fn query_signed_bridge_pool<'a>( + client: &(impl Client + Sync), + io: &impl Io, +) -> Halt> { let response: Vec = RPC .shell() .eth_bridge() @@ -184,13 +176,13 @@ where .map(|transfer| (transfer.keccak256().to_string(), transfer)) .collect(); if pool_contents.is_empty() { - display_line!(IO, "Bridge pool is empty."); + display_line!(io, "Bridge pool is empty."); return control_flow::halt(); } let contents = BridgePoolResponse { bridge_pool_contents: pool_contents.clone(), }; - display_line!(IO, "{}", serde_json::to_string_pretty(&contents).unwrap()); + display_line!(io, "{}", serde_json::to_string_pretty(&contents).unwrap()); control_flow::proceed(pool_contents) } @@ -199,28 +191,26 @@ where /// backing each `TransferToEthereum` event. /// /// Prints a json payload. -pub async fn query_relay_progress(client: &C) -where - C: Client + Sync, -{ +pub async fn query_relay_progress<'a>( + client: &(impl Client + Sync), + io: &impl Io, +) { let resp = RPC .shell() .eth_bridge() .transfer_to_ethereum_progress(client) .await .unwrap(); - display_line!(IO, "{}", serde_json::to_string_pretty(&resp).unwrap()); + display_line!(io, "{}", serde_json::to_string_pretty(&resp).unwrap()); } /// Internal methdod to construct a proof that a set of transfers are in the /// bridge pool. -async fn construct_bridge_pool_proof( - client: &C, +async fn construct_bridge_pool_proof<'a>( + client: &(impl Client + Sync), + io: &impl Io, args: GenBridgePoolProofReq<'_, '_>, -) -> Halt -where - C: Client + Sync, -{ +) -> Halt { let in_progress = RPC .shell() .eth_bridge() @@ -245,19 +235,19 @@ where let warning = warning.bold(); let warning = warning.blink(); display_line!( - IO, + io, "{warning}: The following hashes correspond to transfers that \ have surpassed the security threshold in Namada, therefore have \ likely been relayed to Ethereum, but do not yet have a quorum of \ validator signatures behind them in Namada; thus they are still \ in the Bridge pool:\n{warnings:?}", ); - display!(IO, "\nDo you wish to proceed? (y/n): "); - IO::flush(); + display!(io, "\nDo you wish to proceed? (y/n): "); + io.flush(); loop { - let resp = IO::read().await.try_halt(|e| { + let resp = io.read().await.try_halt(|e| { display_line!( - IO, + io, "Encountered error reading from STDIN: {e:?}" ); })?; @@ -265,8 +255,8 @@ where "y" => break, "n" => return control_flow::halt(), _ => { - display!(IO, "Expected 'y' or 'n'. Please try again: "); - IO::flush(); + display!(io, "Expected 'y' or 'n'. Please try again: "); + io.flush(); } } } @@ -280,7 +270,7 @@ where .await; response.map(|response| response.data).try_halt(|e| { - display_line!(IO, "Encountered error constructing proof:\n{:?}", e); + display_line!(io, "Encountered error constructing proof:\n{:?}", e); }) } @@ -296,18 +286,17 @@ struct BridgePoolProofResponse { /// Construct a merkle proof of a batch of transfers in /// the bridge pool and return it to the user (as opposed /// to relaying it to ethereum). -pub async fn construct_proof( - client: &C, +pub async fn construct_proof<'a>( + client: &(impl Client + Sync), + io: &impl Io, args: args::BridgePoolProof, -) -> Halt<()> -where - C: Client + Sync, -{ +) -> Halt<()> { let GenBridgePoolProofRsp { abi_encoded_args, appendices, - } = construct_bridge_pool_proof::<_, IO>( + } = construct_bridge_pool_proof( client, + io, GenBridgePoolProofReq { transfers: args.transfers.as_slice().into(), relayer: Cow::Borrowed(&args.relayer), @@ -336,26 +325,27 @@ where .unwrap_or_default(), abi_encoded_args, }; - display_line!(IO, "{}", serde_json::to_string(&resp).unwrap()); + display_line!(io, "{}", serde_json::to_string(&resp).unwrap()); control_flow::proceed(()) } /// Relay a validator set update, signed off for a given epoch. -pub async fn relay_bridge_pool_proof( +pub async fn relay_bridge_pool_proof<'a, E>( eth_client: Arc, - nam_client: &C, + client: &(impl Client + Sync), + io: &impl Io, args: args::RelayBridgePoolProof, ) -> Halt<()> where - C: Client + Sync, E: Middleware, E::Error: std::fmt::Debug + std::fmt::Display, { let _signal_receiver = args.safe_mode.then(install_shutdown_signal); if args.sync { - block_on_eth_sync::<_, IO>( + block_on_eth_sync( &*eth_client, + io, BlockOnEthSync { deadline: Instant::now() + Duration::from_secs(60), delta_sleep: Duration::from_secs(1), @@ -363,13 +353,14 @@ where ) .await?; } else { - eth_sync_or_exit::<_, IO>(&*eth_client).await?; + eth_sync_or_exit(&*eth_client, io).await?; } let GenBridgePoolProofRsp { abi_encoded_args, .. - } = construct_bridge_pool_proof::<_, IO>( - nam_client, + } = construct_bridge_pool_proof( + client, + io, GenBridgePoolProofReq { transfers: Cow::Owned(args.transfers), relayer: Cow::Owned(args.relayer), @@ -377,32 +368,28 @@ where }, ) .await?; - let bridge = match RPC - .shell() - .eth_bridge() - .read_bridge_contract(nam_client) - .await - { - Ok(address) => Bridge::new(address.address, eth_client), - Err(err_msg) => { - let error = "Error".on_red(); - let error = error.bold(); - let error = error.blink(); - display_line!( - IO, - "{error}: Failed to retrieve the Ethereum Bridge smart \ - contract address from storage with \ - reason:\n{err_msg}\n\nPerhaps the Ethereum bridge is not \ - active.", - ); - return control_flow::halt(); - } - }; + let bridge = + match RPC.shell().eth_bridge().read_bridge_contract(client).await { + Ok(address) => Bridge::new(address.address, eth_client), + Err(err_msg) => { + let error = "Error".on_red(); + let error = error.bold(); + let error = error.blink(); + display_line!( + io, + "{error}: Failed to retrieve the Ethereum Bridge smart \ + contract address from storage with \ + reason:\n{err_msg}\n\nPerhaps the Ethereum bridge is not \ + active.", + ); + return control_flow::halt(); + } + }; let (validator_set, signatures, bp_proof): TransferToErcArgs = AbiDecode::decode(&abi_encoded_args).try_halt(|error| { display_line!( - IO, + io, "Unable to decode the generated proof: {:?}", error ); @@ -419,7 +406,7 @@ where let error = error.bold(); let error = error.blink(); display_line!( - IO, + io, "{error}: The Bridge pool nonce in the smart contract is \ {contract_nonce}, while the nonce in Namada is still {}. A \ relay of the former one has already happened, but a proof \ @@ -433,7 +420,7 @@ where let error = error.bold(); let error = error.blink(); display_line!( - IO, + io, "{error}: The Bridge pool nonce in the smart contract is \ {contract_nonce}, while the nonce in Namada is still {}. \ Somehow, Namada's nonce is ahead of the contract's nonce!", @@ -461,7 +448,7 @@ where .await .unwrap(); - display_line!(IO, "{transf_result:?}"); + display_line!(io, "{transf_result:?}"); control_flow::proceed(()) } @@ -560,19 +547,16 @@ mod recommendations { /// Recommend the most economical batch of transfers to relay based /// on a conversion rate estimates from NAM to ETH and gas usage /// heuristics. - pub async fn recommend_batch( - client: &C, + pub async fn recommend_batch<'a>( + context: &impl Namada<'a>, args: args::RecommendBatch, - ) -> Halt<()> - where - C: Client + Sync, - { + ) -> Halt<()> { // get transfers that can already been relayed but are awaiting a quorum // of backing votes. let in_progress = RPC .shell() .eth_bridge() - .transfer_to_ethereum_progress(client) + .transfer_to_ethereum_progress(context.client()) .await .unwrap() .into_keys() @@ -585,7 +569,7 @@ mod recommendations { <(BridgePoolRootProof, BlockHeight)>::try_from_slice( &RPC.shell() .storage_value( - client, + context.client(), None, None, false, @@ -594,36 +578,48 @@ mod recommendations { .await .try_halt(|err| { edisplay_line!( - IO, + context.io(), "Failed to query Bridge pool proof: {err}" ); })? .data, ) .try_halt(|err| { - edisplay_line!(IO, "Failed to decode Bridge pool proof: {err}"); + edisplay_line!( + context.io(), + "Failed to decode Bridge pool proof: {err}" + ); })?; // get the latest bridge pool nonce let latest_bp_nonce = EthUint::try_from_slice( &RPC.shell() - .storage_value(client, None, None, false, &get_nonce_key()) + .storage_value( + context.client(), + None, + None, + false, + &get_nonce_key(), + ) .await .try_halt(|err| { edisplay_line!( - IO, + context.io(), "Failed to query Bridge pool nonce: {err}" ); })? .data, ) .try_halt(|err| { - edisplay_line!(IO, "Failed to decode Bridge pool nonce: {err}"); + edisplay_line!( + context.io(), + "Failed to decode Bridge pool nonce: {err}" + ); })?; if latest_bp_nonce != bp_root.data.1 { edisplay_line!( - IO, + context.io(), "The signed Bridge pool nonce is not up to date, repeat this \ query at a later time" ); @@ -635,7 +631,7 @@ mod recommendations { let voting_powers = RPC .shell() .eth_bridge() - .voting_powers_at_height(client, &height) + .voting_powers_at_height(context.client(), &height) .await .unwrap(); let valset_size = Uint::from_u64(voting_powers.len() as u64); @@ -647,17 +643,19 @@ mod recommendations { + valset_fee() * valset_size; // we don't recommend transfers that have already been relayed - let eligible = generate_eligible::( + let eligible = generate_eligible( + context.io(), &args.conversion_table, &in_progress, - query_signed_bridge_pool::<_, IO>(client).await?, + query_signed_bridge_pool(context.client(), context.io()).await?, )?; let max_gas = args.max_gas.map(Uint::from_u64).unwrap_or(uint::MAX_VALUE); let max_cost = args.gas.map(I256::from).unwrap_or_default(); - generate_recommendations::( + generate_recommendations( + context.io(), eligible, &args.conversion_table, validator_gas, @@ -671,22 +669,28 @@ mod recommendations { net_profit, bridge_pool_gas_fees, }| { - display_line!(IO, "Recommended batch: {transfer_hashes:#?}"); display_line!( - IO, + context.io(), + "Recommended batch: {transfer_hashes:#?}" + ); + display_line!( + context.io(), "Estimated Ethereum transaction gas (in gwei): \ {ethereum_gas_fees}", ); display_line!( - IO, + context.io(), "Estimated net profit (in gwei): {net_profit}" ); - display_line!(IO, "Total fees: {bridge_pool_gas_fees:#?}"); + display_line!( + context.io(), + "Total fees: {bridge_pool_gas_fees:#?}" + ); }, ) .unwrap_or_else(|| { display_line!( - IO, + context.io(), "Unable to find a recommendation satisfying the input \ parameters." ); @@ -731,6 +735,7 @@ mod recommendations { /// Generate eligible recommendations. fn generate_eligible( + io: &IO, conversion_table: &HashMap, in_progress: &BTreeSet, signed_pool: HashMap, @@ -747,7 +752,7 @@ mod recommendations { .and_then(|entry| match entry.conversion_rate { r if r == 0.0f64 => { edisplay_line!( - IO, + io, "{}: Ignoring null conversion rate", pending.gas_fee.token, ); @@ -755,7 +760,7 @@ mod recommendations { } r if r < 0.0f64 => { edisplay_line!( - IO, + io, "{}: Ignoring negative conversion rate: {r:.1}", pending.gas_fee.token, ); @@ -763,7 +768,7 @@ mod recommendations { } r if r > 1e9 => { edisplay_line!( - IO, + io, "{}: Ignoring high conversion rate: {r:.1} > \ 10^9", pending.gas_fee.token, @@ -814,6 +819,7 @@ mod recommendations { /// Generates the actual recommendation from restrictions given by the /// input parameters. fn generate_recommendations( + io: &IO, contents: Vec, conversion_table: &HashMap, validator_gas: Uint, @@ -882,7 +888,7 @@ mod recommendations { }) } else { display_line!( - IO, + io, "Unable to find a recommendation satisfying the input \ parameters." ); @@ -1021,12 +1027,9 @@ mod recommendations { signed_pool: &mut signed_pool, expected_eligible: &mut expected, }); - let eligible = generate_eligible::( - &table, - &in_progress, - signed_pool, - ) - .proceed(); + let eligible = + generate_eligible(&StdIo, &table, &in_progress, signed_pool) + .proceed(); assert_eq!(eligible, expected); eligible } @@ -1116,7 +1119,8 @@ mod recommendations { let profitable = vec![transfer(100_000); 17]; let hash = profitable[0].keccak256().to_string(); let expected = vec![hash; 17]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(profitable), &Default::default(), Uint::from_u64(800_000), @@ -1135,7 +1139,8 @@ mod recommendations { let hash = transfers[0].keccak256().to_string(); transfers.push(transfer(0)); let expected: Vec<_> = vec![hash; 17]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(transfers), &Default::default(), Uint::from_u64(800_000), @@ -1153,7 +1158,8 @@ mod recommendations { let transfers = vec![transfer(75_000); 4]; let hash = transfers[0].keccak256().to_string(); let expected = vec![hash; 2]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(transfers), &Default::default(), Uint::from_u64(50_000), @@ -1175,7 +1181,8 @@ mod recommendations { .map(|t| t.keccak256().to_string()) .take(5) .collect(); - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(transfers), &Default::default(), Uint::from_u64(150_000), @@ -1194,7 +1201,8 @@ mod recommendations { let hash = transfers[0].keccak256().to_string(); let expected = vec![hash; 4]; transfers.extend([transfer(17_500), transfer(17_500)]); - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(transfers), &Default::default(), Uint::from_u64(150_000), @@ -1210,7 +1218,8 @@ mod recommendations { #[test] fn test_wholly_infeasible() { let transfers = vec![transfer(75_000); 4]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(transfers), &Default::default(), Uint::from_u64(300_000), @@ -1291,7 +1300,8 @@ mod recommendations { const VALIDATOR_GAS_FEE: Uint = Uint::from_u64(100_000); - let recommended_batch = generate_recommendations::( + let recommended_batch = generate_recommendations( + &StdIo, eligible, &conversion_table, // gas spent by validator signature checks diff --git a/shared/src/ledger/eth_bridge/validator_set.rs b/shared/src/ledger/eth_bridge/validator_set.rs index be99e130f8..90f043fe18 100644 --- a/shared/src/ledger/eth_bridge/validator_set.rs +++ b/shared/src/ledger/eth_bridge/validator_set.rs @@ -26,7 +26,7 @@ use crate::types::control_flow::{ self, install_shutdown_signal, Halt, TryHalt, }; use crate::types::ethereum_events::EthAddress; -use crate::types::io::{Io, StdIo}; +use crate::types::io::Io; use crate::types::vote_extensions::validator_set_update::ValidatorSetArgs; use crate::{display_line, edisplay_line}; @@ -268,12 +268,11 @@ impl From> for RelayResult { /// Query an ABI encoding of the validator set to be installed /// at the given epoch, and its associated proof. -pub async fn query_validator_set_update_proof( - client: &C, +pub async fn query_validator_set_update_proof<'a>( + client: &(impl Client + Sync), + io: &impl Io, args: args::ValidatorSetProof, -) where - C: Client + Sync, -{ +) { let epoch = if let Some(epoch) = args.epoch { epoch } else { @@ -287,17 +286,15 @@ pub async fn query_validator_set_update_proof( .await .unwrap(); - display_line!(IO, "0x{}", HEXLOWER.encode(encoded_proof.as_ref())); + display_line!(io, "0x{}", HEXLOWER.encode(encoded_proof.as_ref())); } /// Query an ABI encoding of the Bridge validator set at a given epoch. -pub async fn query_bridge_validator_set( - client: &C, +pub async fn query_bridge_validator_set<'a>( + client: &(impl Client + Sync), + io: &impl Io, args: args::BridgeValidatorSet, -) -> Halt<()> -where - C: Client + Sync, -{ +) -> Halt<()> { let epoch = if let Some(epoch) = args.epoch { epoch } else { @@ -313,18 +310,16 @@ where tracing::error!(%err, "Failed to fetch Bridge validator set"); })?; - display_validator_set::(args); + display_validator_set(io, args); control_flow::proceed(()) } /// Query an ABI encoding of the Governance validator set at a given epoch. -pub async fn query_governnace_validator_set( - client: &C, +pub async fn query_governnace_validator_set<'a>( + client: &(impl Client + Sync), + io: &impl Io, args: args::GovernanceValidatorSet, -) -> Halt<()> -where - C: Client + Sync, -{ +) -> Halt<()> { let epoch = if let Some(epoch) = args.epoch { epoch } else { @@ -340,12 +335,12 @@ where tracing::error!(%err, "Failed to fetch Governance validator set"); })?; - display_validator_set::(args); + display_validator_set(io, args); control_flow::proceed(()) } /// Display the given [`ValidatorSetArgs`]. -fn display_validator_set(args: ValidatorSetArgs) { +fn display_validator_set(io: &IO, args: ValidatorSetArgs) { use serde::Serialize; #[derive(Serialize)] @@ -373,28 +368,29 @@ fn display_validator_set(args: ValidatorSetArgs) { }; display_line!( - IO, + io, "{}", serde_json::to_string_pretty(&validator_set).unwrap() ); } /// Relay a validator set update, signed off for a given epoch. -pub async fn relay_validator_set_update( +pub async fn relay_validator_set_update<'a, E>( eth_client: Arc, - nam_client: &C, + client: &(impl Client + Sync), + io: &impl Io, args: args::ValidatorSetUpdateRelay, ) -> Halt<()> where - C: Client + Sync, E: Middleware, E::Error: std::fmt::Debug + std::fmt::Display, { let mut signal_receiver = args.safe_mode.then(install_shutdown_signal); if args.sync { - block_on_eth_sync::<_, IO>( + block_on_eth_sync( &*eth_client, + io, BlockOnEthSync { deadline: Instant::now() + Duration::from_secs(60), delta_sleep: Duration::from_secs(1), @@ -402,14 +398,15 @@ where ) .await?; } else { - eth_sync_or_exit::<_, IO>(&*eth_client).await?; + eth_sync_or_exit(&*eth_client, io).await?; } if args.daemon { relay_validator_set_update_daemon( args, eth_client, - nam_client, + client, + io, &mut signal_receiver, ) .await @@ -417,11 +414,11 @@ where relay_validator_set_update_once::( &args, eth_client, - nam_client, + client, |relay_result| match relay_result { RelayResult::BridgeCallError(reason) => { edisplay_line!( - IO, + io, "Calling Bridge failed due to: {reason}" ); } @@ -432,27 +429,27 @@ where Ordering::Greater => "too far ahead of", }; edisplay_line!( - IO, + io, "Argument nonce <{argument}> is {whence} contract \ nonce <{contract}>" ); } RelayResult::NoReceipt => { edisplay_line!( - IO, + io, "No transfer receipt received from the Ethereum node" ); } RelayResult::Receipt { receipt } => { if receipt.is_successful() { display_line!( - IO, + io, "Ethereum transfer succeeded: {:?}", receipt ); } else { display_line!( - IO, + io, "Ethereum transfer failed: {:?}", receipt ); @@ -465,14 +462,14 @@ where } } -async fn relay_validator_set_update_daemon( +async fn relay_validator_set_update_daemon<'a, E, F>( mut args: args::ValidatorSetUpdateRelay, eth_client: Arc, - nam_client: &C, + client: &(impl Client + Sync), + io: &impl Io, shutdown_receiver: &mut Option, ) -> Halt<()> where - C: Client + Sync, E: Middleware, E::Error: std::fmt::Debug + std::fmt::Display, F: Future + Unpin, @@ -513,9 +510,7 @@ where time::sleep(sleep_for).await; let is_synchronizing = - eth_sync_or::<_, _, _, StdIo>(&*eth_client, || ()) - .await - .is_break(); + eth_sync_or(&*eth_client, io, || ()).await.is_break(); if is_synchronizing { tracing::debug!("The Ethereum node is synchronizing"); last_call_succeeded = false; @@ -525,7 +520,7 @@ where // we could be racing against governance updates, // so it is best to always fetch the latest Bridge // contract address - let bridge = get_bridge_contract(nam_client, Arc::clone(ð_client)) + let bridge = get_bridge_contract(client, Arc::clone(ð_client)) .await .try_halt(|err| { // only care about displaying errors, @@ -544,7 +539,7 @@ where }); let shell = RPC.shell(); - let nam_current_epoch_fut = shell.epoch(nam_client).map(|result| { + let nam_current_epoch_fut = shell.epoch(client).map(|result| { result .map_err(|err| { tracing::error!( @@ -596,7 +591,7 @@ where let result = relay_validator_set_update_once::( &args, Arc::clone(ð_client), - nam_client, + client, |transf_result| { let Some(receipt) = transf_result else { tracing::warn!("No transfer receipt received from the Ethereum node"); diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index a8159834a5..5536f46b63 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -16,40 +16,39 @@ pub mod vp_host_fns; use std::path::PathBuf; use std::str::FromStr; -use std::sync::Arc; pub use namada_core::ledger::{ gas, parameters, replay_protection, storage_api, tx_env, vp_env, }; +use namada_core::types::dec::Dec; +use namada_core::types::ethereum_events::EthAddress; +use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -use crate::sdk::wallet::{Wallet, WalletIo, WalletStorage}; +use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::proto::Tx; +use crate::sdk::args::{self, InputAmount, SdkTypes}; use crate::sdk::masp::{ShieldedContext, ShieldedUtils}; -use crate::types::masp::{TransferSource, TransferTarget}; -use crate::types::address::Address; -use crate::sdk::args::{self, InputAmount}; -use crate::sdk::args::SdkTypes; +use crate::sdk::signing::{self, SigningTxData}; use crate::sdk::tx::{ - TX_TRANSFER_WASM, TX_REVEAL_PK, TX_BOND_WASM, TX_UNBOND_WASM, TX_IBC_WASM, - TX_INIT_PROPOSAL, TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL, VP_USER_WASM, - TX_CHANGE_COMMISSION_WASM, TX_INIT_VALIDATOR_WASM, TX_UNJAIL_VALIDATOR_WASM, - TX_WITHDRAW_WASM, TX_BRIDGE_POOL_WASM, TX_RESIGN_STEWARD, - TX_UPDATE_STEWARD_COMMISSION, self, + self, ProcessTxResponse, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, + TX_CHANGE_COMMISSION_WASM, TX_IBC_WASM, TX_INIT_PROPOSAL, + TX_INIT_VALIDATOR_WASM, TX_RESIGN_STEWARD, TX_REVEAL_PK, TX_TRANSFER_WASM, + TX_UNBOND_WASM, TX_UNJAIL_VALIDATOR_WASM, TX_UPDATE_ACCOUNT_WASM, + TX_UPDATE_STEWARD_COMMISSION, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, + VP_USER_WASM, }; -use crate::types::transaction::GasLimit; -use crate::sdk::signing::{SigningTxData, self}; -use crate::proto::Tx; +use crate::sdk::wallet::{Wallet, WalletIo, WalletStorage}; +use crate::types::address::Address; +use crate::types::io::Io; use crate::types::key::*; +use crate::types::masp::{TransferSource, TransferTarget}; use crate::types::token; -use crate::sdk::tx::ProcessTxResponse; -use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use crate::types::token::NATIVE_MAX_DECIMAL_PLACES; -use namada_core::types::dec::Dec; -use namada_core::types::ethereum_events::EthAddress; -use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use crate::types::transaction::GasLimit; #[async_trait::async_trait(?Send)] /// An interface for high-level interaction with the Namada SDK -pub trait Namada<'a> { +pub trait Namada<'a>: Sized { /// A client with async request dispatcher method type Client: 'a + crate::ledger::queries::Client + Sync; /// Captures the interactive parts of the wallet's functioning @@ -57,38 +56,37 @@ pub trait Namada<'a> { /// Abstracts platform specific details away from the logic of shielded pool /// operations. type ShieldedUtils: 'a + ShieldedUtils; + /// Captures the input/output streams used by this object + type Io: 'a + Io; /// Obtain the client for communicating with the ledger fn client(&self) -> &'a Self::Client; - /// Obtain read lock on the wallet + /// Obtain the input/output handle for this context + fn io(&self) -> &'a Self::Io; + + /// Obtain read guard on the wallet async fn wallet( &self, ) -> RwLockReadGuard<&'a mut Wallet>; - /// Obtain write lock on the wallet + /// Obtain write guard on the wallet async fn wallet_mut( &self, ) -> RwLockWriteGuard<&'a mut Wallet>; - /// Obtain read lock on the shielded context + /// Obtain read guard on the shielded context async fn shielded( &self, ) -> RwLockReadGuard<&'a mut ShieldedContext>; - /// Obtain write lock on the shielded context + /// Obtain write guard on the shielded context async fn shielded_mut( &self, ) -> RwLockWriteGuard<&'a mut ShieldedContext>; /// Return the native token - async fn native_token(&self) -> Address { - self.wallet() - .await - .find_address(args::NAM) - .expect("NAM not in wallet") - .clone() - } + async fn native_token(&self) -> Address; /// Make a tx builder using no arguments async fn tx_builder(&self) -> args::Tx { @@ -387,47 +385,52 @@ pub trait Namada<'a> { tx: Tx, args: &args::Tx, ) -> crate::sdk::error::Result { - tx::process_tx(self.client(), *self.wallet_mut().await, args, tx).await + tx::process_tx(self, args, tx).await } } /// Provides convenience methods for common Namada interactions -pub struct NamadaImpl<'a, C, U, V> +pub struct NamadaImpl<'a, C, U, V, I> where C: crate::ledger::queries::Client + Sync, U: WalletIo, V: ShieldedUtils, + I: Io, { /// Used to send and receive messages from the ledger pub client: &'a C, /// Stores the addresses and keys required for ledger interactions - pub wallet: Arc>>, + pub wallet: RwLock<&'a mut Wallet>, /// Stores the current state of the shielded pool - pub shielded: Arc>>, + pub shielded: RwLock<&'a mut ShieldedContext>, + /// Captures the input/output streams used by this object + pub io: &'a I, /// The default builder for a Tx prototype: args::Tx, } -impl<'a, C, U, V> NamadaImpl<'a, C, U, V> +/// The Namada token +pub const NAM: &str = "atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5"; + +impl<'a, C, U, V, I> NamadaImpl<'a, C, U, V, I> where C: crate::ledger::queries::Client + Sync, U: WalletIo, V: ShieldedUtils, + I: Io, { /// Construct a new Namada context pub fn new( client: &'a C, wallet: &'a mut Wallet, shielded: &'a mut ShieldedContext, + io: &'a I, ) -> Self { - let fee_token = wallet - .find_address(args::NAM) - .expect("NAM not in wallet") - .clone(); Self { client, - wallet: Arc::new(RwLock::new(wallet)), - shielded: Arc::new(RwLock::new(shielded)), + wallet: RwLock::new(wallet), + shielded: RwLock::new(shielded), + io, prototype: args::Tx { dry_run: false, dry_run_wrapper: false, @@ -440,7 +443,7 @@ where wallet_alias_force: false, fee_amount: None, wrapper_fee_payer: None, - fee_token, + fee_token: Address::from_str(NAM).unwrap(), fee_unshield: None, gas_limit: GasLimit::from(20_000), expiration: None, @@ -457,13 +460,15 @@ where } #[async_trait::async_trait(?Send)] -impl<'a, C, U, V> Namada<'a> for NamadaImpl<'a, C, U, V> +impl<'a, C, U, V, I> Namada<'a> for NamadaImpl<'a, C, U, V, I> where C: crate::ledger::queries::Client + Sync, U: WalletIo + WalletStorage, V: ShieldedUtils, + I: Io, { type Client = C; + type Io = I; type ShieldedUtils = V; type WalletUtils = U; @@ -472,6 +477,14 @@ where self.prototype.clone() } + async fn native_token(&self) -> Address { + Address::from_str(NAM).unwrap() + } + + fn io(&self) -> &'a Self::Io { + self.io + } + fn client(&self) -> &'a Self::Client { self.client } @@ -502,11 +515,12 @@ where } /// Allow the prototypical Tx builder to be modified -impl<'a, C, U, V> args::TxBuilder for NamadaImpl<'a, C, U, V> +impl<'a, C, U, V, I> args::TxBuilder for NamadaImpl<'a, C, U, V, I> where C: crate::ledger::queries::Client + Sync, U: WalletIo, V: ShieldedUtils, + I: Io, { fn tx(self, func: F) -> Self where diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index 91e00dee68..0853dc251b 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -17,6 +17,8 @@ use zeroize::Zeroizing; use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use crate::ledger::eth_bridge::bridge_pool; use crate::ledger::Namada; +use crate::sdk::signing::SigningTxData; +use crate::sdk::{rpc, tx}; use crate::types::address::Address; use crate::types::keccak::KeccakHash; use crate::types::key::{common, SchemeType}; @@ -24,11 +26,6 @@ use crate::types::masp::MaspValue; use crate::types::storage::Epoch; use crate::types::transaction::GasLimit; use crate::types::{storage, token}; -use crate::sdk::signing::SigningTxData; -use crate::sdk::{tx, rpc}; - -/// The Namada token -pub const NAM: &str = "NAM"; /// [`Duration`](StdDuration) wrapper that provides a /// method to parse a value from a string. @@ -510,30 +507,30 @@ impl InitProposal { rpc::query_governance_parameters(context.client()).await; if self.is_pgf_funding { - let proposal = - PgfFundingProposal::try_from(self.proposal_data.as_ref()) - .map_err(|e| { - crate::sdk::error::TxError::FailedGovernaneProposalDeserialize( - e.to_string(), - ) - })? - .validate(&governance_parameters, current_epoch, self.tx.force) - .map_err(|e| crate::sdk::error::TxError::InvalidProposal(e.to_string()))?; + let proposal = PgfFundingProposal::try_from( + self.proposal_data.as_ref(), + ) + .map_err(|e| { + crate::sdk::error::TxError::FailedGovernaneProposalDeserialize( + e.to_string(), + ) + })? + .validate(&governance_parameters, current_epoch, self.tx.force) + .map_err(|e| { + crate::sdk::error::TxError::InvalidProposal(e.to_string()) + })?; tx::build_pgf_funding_proposal(context, self, proposal).await } else if self.is_pgf_stewards { let proposal = PgfStewardProposal::try_from( self.proposal_data.as_ref(), ) - .map_err(|e| { - crate::sdk::error::TxError::FailedGovernaneProposalDeserialize(e.to_string()) - })?; - let nam_address = context - .wallet() - .await - .find_address(NAM) - .expect("NAM not in wallet") - .clone(); + .map_err(|e| { + crate::sdk::error::TxError::FailedGovernaneProposalDeserialize( + e.to_string(), + ) + })?; + let nam_address = context.native_token().await; let author_balance = rpc::get_token_balance( context.client(), &nam_address, @@ -553,16 +550,15 @@ impl InitProposal { tx::build_pgf_stewards_proposal(context, self, proposal).await } else { - let proposal = DefaultProposal::try_from(self.proposal_data.as_ref()) - .map_err(|e| { - crate::sdk::error::TxError::FailedGovernaneProposalDeserialize(e.to_string()) - })?; - let nam_address = context - .wallet() - .await - .find_address(NAM) - .expect("NAM not in wallet") - .clone(); + let proposal = DefaultProposal::try_from( + self.proposal_data.as_ref(), + ) + .map_err(|e| { + crate::sdk::error::TxError::FailedGovernaneProposalDeserialize( + e.to_string(), + ) + })?; + let nam_address = context.native_token().await; let author_balance = rpc::get_token_balance( context.client(), &nam_address, @@ -1358,7 +1354,7 @@ impl TxUnjailValidator { } /// Path to the TX WASM code file - pub fn tc_code_path(self, tx_code_path: PathBuf) -> Self { + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { Self { tx_code_path, ..self diff --git a/shared/src/sdk/masp.rs b/shared/src/sdk/masp.rs index 35c4f59956..96ab27fbd5 100644 --- a/shared/src/sdk/masp.rs +++ b/shared/src/sdk/masp.rs @@ -58,18 +58,18 @@ use ripemd::Digest as RipemdDigest; use sha2::Digest; use thiserror::Error; -use crate::sdk::args::InputAmount; use crate::ledger::queries::Client; -use crate::sdk::rpc::{query_conversion, query_storage_value}; -use crate::sdk::tx::decode_component; use crate::ledger::Namada; use crate::proto::Tx; +use crate::sdk::args::InputAmount; use crate::sdk::error::{EncodingError, Error, PinnedBalanceError, QueryError}; +use crate::sdk::rpc::{query_conversion, query_storage_value}; +use crate::sdk::tx::decode_component; use crate::sdk::{args, rpc}; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; use crate::types::address::{masp, Address}; -use crate::types::io::{Io, StdIo}; +use crate::types::io::Io; use crate::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; use crate::types::token; @@ -1030,18 +1030,19 @@ impl ShieldedContext { /// context and express that value in terms of the currently timestamped /// asset types. If the key is not in the context, then we do not know the /// balance and hence we return None. - pub async fn compute_exchanged_balance( + pub async fn compute_exchanged_balance<'a>( &mut self, - client: &C, + context: &impl Namada<'a>, vk: &ViewingKey, target_epoch: Epoch, ) -> Result, Error> { // First get the unexchanged balance - if let Some(balance) = self.compute_shielded_balance(client, vk).await? + if let Some(balance) = + self.compute_shielded_balance(context.client(), vk).await? { let exchanged_amount = self .compute_exchanged_amount( - client, + context, balance, target_epoch, BTreeMap::new(), @@ -1050,7 +1051,8 @@ impl ShieldedContext { .0; // And then exchange balance into current asset types Ok(Some( - self.decode_all_amounts(client, exchanged_amount).await, + self.decode_all_amounts(context.client(), exchanged_amount) + .await, )) } else { Ok(None) @@ -1063,9 +1065,9 @@ impl ShieldedContext { /// the trace amount that could not be converted is moved from input to /// output. #[allow(clippy::too_many_arguments)] - async fn apply_conversion( + async fn apply_conversion<'a>( &mut self, - client: &C, + context: &impl Namada<'a>, conv: AllowedConversion, asset_type: (Epoch, Address, MaspDenom), value: i128, @@ -1085,7 +1087,7 @@ impl ShieldedContext { let threshold = -conv[&masp_asset]; if threshold == 0 { edisplay_line!( - StdIo, + context.io(), "Asset threshold of selected conversion for asset type {} is \ 0, this is a bug, please report it.", masp_asset @@ -1104,7 +1106,7 @@ impl ShieldedContext { *usage += required; // Apply the conversions to input and move the trace amount to output *input += self - .decode_all_amounts(client, conv.clone() * required) + .decode_all_amounts(context.client(), conv.clone() * required) .await - trace.clone(); *output += trace; @@ -1115,9 +1117,9 @@ impl ShieldedContext { /// note of the conversions that were used. Note that this function does /// not assume that allowed conversions from the ledger are expressed in /// terms of the latest asset types. - pub async fn compute_exchanged_amount( + pub async fn compute_exchanged_amount<'a>( &mut self, - client: &C, + context: &impl Namada<'a>, mut input: MaspAmount, target_epoch: Epoch, mut conversions: Conversions, @@ -1139,13 +1141,13 @@ impl ShieldedContext { let denom_value = denom.denominate_i128(&value); self.query_allowed_conversion( - client, + context.client(), target_asset_type, &mut conversions, ) .await; self.query_allowed_conversion( - client, + context.client(), asset_type, &mut conversions, ) @@ -1154,7 +1156,7 @@ impl ShieldedContext { (conversions.get_mut(&asset_type), at_target_asset_type) { display_line!( - StdIo, + context.io(), "converting current asset type to latest asset type..." ); // Not at the target asset type, not at the latest asset @@ -1162,7 +1164,7 @@ impl ShieldedContext { // current asset type to the latest // asset type. self.apply_conversion( - client, + context, conv.clone(), (asset_epoch, token_addr.clone(), denom), denom_value, @@ -1176,7 +1178,7 @@ impl ShieldedContext { at_target_asset_type, ) { display_line!( - StdIo, + context.io(), "converting latest asset type to target asset type..." ); // Not at the target asset type, yet at the latest asset @@ -1184,7 +1186,7 @@ impl ShieldedContext { // from latest asset type to the target // asset type. self.apply_conversion( - client, + context, conv.clone(), (asset_epoch, token_addr.clone(), denom), denom_value, @@ -1218,9 +1220,9 @@ impl ShieldedContext { /// of the specified asset type. Return the total value accumulated plus /// notes and the corresponding diversifiers/merkle paths that were used to /// achieve the total value. - pub async fn collect_unspent_notes( + pub async fn collect_unspent_notes<'a>( &mut self, - client: &C, + context: &impl Namada<'a>, vk: &ViewingKey, target: I128Sum, target_epoch: Epoch, @@ -1262,10 +1264,11 @@ impl ShieldedContext { .to_string(), ) })?; - let input = self.decode_all_amounts(client, pre_contr).await; + let input = + self.decode_all_amounts(context.client(), pre_contr).await; let (contr, proposed_convs) = self .compute_exchanged_amount( - client, + context, input, target_epoch, conversions.clone(), @@ -1403,31 +1406,31 @@ impl ShieldedContext { /// the epoch of the transaction or even before, so exchange all these /// amounts to the epoch of the transaction in order to get the value that /// would have been displayed in the epoch of the transaction. - pub async fn compute_exchanged_pinned_balance( + pub async fn compute_exchanged_pinned_balance<'a>( &mut self, - client: &C, + context: &impl Namada<'a>, owner: PaymentAddress, viewing_key: &ViewingKey, ) -> Result<(MaspAmount, Epoch), Error> { // Obtain the balance that will be exchanged let (amt, ep) = - Self::compute_pinned_balance(client, owner, viewing_key).await?; - display_line!(IO, "Pinned balance: {:?}", amt); + Self::compute_pinned_balance(context.client(), owner, viewing_key) + .await?; + display_line!(context.io(), "Pinned balance: {:?}", amt); // Establish connection with which to do exchange rate queries - let amount = self.decode_all_amounts(client, amt).await; - display_line!(IO, "Decoded pinned balance: {:?}", amount); + let amount = self.decode_all_amounts(context.client(), amt).await; + display_line!(context.io(), "Decoded pinned balance: {:?}", amount); // Finally, exchange the balance to the transaction's epoch let computed_amount = self - .compute_exchanged_amount( - client, - amount, - ep, - BTreeMap::new(), - ) + .compute_exchanged_amount(context, amount, ep, BTreeMap::new()) .await? .0; - display_line!(IO, "Exchanged amount: {:?}", computed_amount); - Ok((self.decode_all_amounts(client, computed_amount).await, ep)) + display_line!(context.io(), "Exchanged amount: {:?}", computed_amount); + Ok(( + self.decode_all_amounts(context.client(), computed_amount) + .await, + ep, + )) } /// Convert an amount whose units are AssetTypes to one whose units are @@ -1567,7 +1570,7 @@ impl ShieldedContext { .shielded_mut() .await .collect_unspent_notes( - context.client(), + context, &to_viewing_key(&sk).vk, I128Sum::from_sum(amount), epoch, @@ -2131,6 +2134,7 @@ mod tests { pub mod fs { use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; + use async_trait::async_trait; use super::*; diff --git a/shared/src/sdk/rpc.rs b/shared/src/sdk/rpc.rs index 8a40d8a8dd..eb8ebd8a11 100644 --- a/shared/src/sdk/rpc.rs +++ b/shared/src/sdk/rpc.rs @@ -27,10 +27,12 @@ use serde::Serialize; use crate::ledger::events::Event; use crate::ledger::queries::vp::pos::EnrichedBondsAndUnbondsDetails; use crate::ledger::queries::RPC; +use crate::ledger::Namada; use crate::proto::Tx; use crate::sdk::args::InputAmount; use crate::sdk::error; use crate::sdk::error::{EncodingError, Error, QueryError}; +use crate::sdk::queries::Client; use crate::tendermint::block::Height; use crate::tendermint::merkle::proof::Proof; use crate::tendermint_rpc::error::Error as TError; @@ -38,7 +40,7 @@ use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; use crate::types::control_flow::{time, Halt, TryHalt}; use crate::types::hash::Hash; -use crate::types::io::{Io, StdIo}; +use crate::types::io::Io; use crate::types::key::common; use crate::types::storage::{BlockHeight, BlockResults, Epoch, PrefixValue}; use crate::types::{storage, token}; @@ -48,14 +50,11 @@ use crate::{display_line, edisplay_line}; /// /// If a response is not delivered until `deadline`, we exit the cli with an /// error. -pub async fn query_tx_status( - client: &C, +pub async fn query_tx_status<'a>( + context: &impl Namada<'a>, status: TxEventQuery<'_>, deadline: time::Instant, -) -> Halt -where - C: crate::ledger::queries::Client + Sync, -{ +) -> Halt { time::Sleep { strategy: time::LinearBackoff { delta: time::Duration::from_secs(1), @@ -63,7 +62,8 @@ where } .timeout(deadline, || async { tracing::debug!(query = ?status, "Querying tx status"); - let maybe_event = match query_tx_events(client, status).await { + let maybe_event = match query_tx_events(context.client(), status).await + { Ok(response) => response, Err(err) => { tracing::debug!( @@ -90,7 +90,7 @@ where .await .try_halt(|_| { edisplay_line!( - IO, + context.io(), "Transaction status query deadline of {deadline:?} exceeded" ); }) @@ -237,21 +237,19 @@ pub async fn query_conversion( } /// Query a wasm code hash -pub async fn query_wasm_code_hash< - C: crate::ledger::queries::Client + Sync, ->( - client: &C, +pub async fn query_wasm_code_hash<'a>( + context: &impl Namada<'a>, code_path: impl AsRef, ) -> Result { let hash_key = Key::wasm_hash(code_path.as_ref()); - match query_storage_value_bytes(client, &hash_key, None, false) + match query_storage_value_bytes(context.client(), &hash_key, None, false) .await? .0 { Some(hash) => Ok(Hash::try_from(&hash[..]).expect("Invalid code hash")), None => { edisplay_line!( - StdIo, + context.io(), "The corresponding wasm code of the code path {} doesn't \ exist on chain.", code_path.as_ref(), @@ -325,20 +323,16 @@ pub async fn query_storage_value_bytes< /// Query a range of storage values with a matching prefix and decode them with /// [`BorshDeserialize`]. Returns an iterator of the storage keys paired with /// their associated values. -pub async fn query_storage_prefix< - C: crate::ledger::queries::Client + Sync, - IO: Io, - T, ->( - client: &C, +pub async fn query_storage_prefix<'a, 'b, N: Namada<'a>, T>( + context: &'b N, key: &storage::Key, -) -> Result>, error::Error> +) -> Result>, error::Error> where T: BorshDeserialize, { - let values = convert_response::( + let values = convert_response::( RPC.shell() - .storage_prefix(client, None, None, false, key) + .storage_prefix(context.client(), None, None, false, key) .await, )?; let decode = @@ -347,7 +341,7 @@ where ) { Err(err) => { edisplay_line!( - IO, + context.io(), "Skipping a value for key {}. Error in decoding: {}", key, err @@ -436,16 +430,18 @@ pub async fn query_tx_events( } /// Dry run a transaction -pub async fn dry_run_tx( - client: &C, +pub async fn dry_run_tx<'a, N: Namada<'a>>( + context: &N, tx_bytes: Vec, ) -> Result { let (data, height, prove) = (Some(tx_bytes), None, false); - let result = convert_response::( - RPC.shell().dry_run_tx(client, data, height, prove).await, + let result = convert_response::( + RPC.shell() + .dry_run_tx(context.client(), data, height, prove) + .await, )? .data; - display_line!(IO, "Dry-run result: {}", result); + display_line!(context.io(), "Dry-run result: {}", result); Ok(result) } @@ -786,15 +782,14 @@ pub async fn get_public_key_at( } /// Query a validator's unbonds for a given epoch -pub async fn query_and_print_unbonds< - C: crate::ledger::queries::Client + Sync, ->( - client: &C, +pub async fn query_and_print_unbonds<'a>( + context: &impl Namada<'a>, source: &Address, validator: &Address, ) -> Result<(), error::Error> { - let unbonds = query_unbond_with_slashing(client, source, validator).await?; - let current_epoch = query_epoch(client).await?; + let unbonds = + query_unbond_with_slashing(context.client(), source, validator).await?; + let current_epoch = query_epoch(context.client()).await?; let mut total_withdrawable = token::Amount::default(); let mut not_yet_withdrawable = HashMap::::new(); @@ -809,17 +804,17 @@ pub async fn query_and_print_unbonds< } if total_withdrawable != token::Amount::default() { display_line!( - StdIo, + context.io(), "Total withdrawable now: {}.", total_withdrawable.to_string_native() ); } if !not_yet_withdrawable.is_empty() { - display_line!(StdIo, "Current epoch: {current_epoch}.") + display_line!(context.io(), "Current epoch: {current_epoch}.") } for (withdraw_epoch, amount) in not_yet_withdrawable { display_line!( - StdIo, + context.io(), "Amount {} withdrawable starting from epoch {withdraw_epoch}.", amount.to_string_native() ); @@ -935,10 +930,8 @@ pub async fn enriched_bonds_and_unbonds< } /// Get the correct representation of the amount given the token type. -pub async fn validate_amount< - C: crate::ledger::queries::Client + Sync, ->( - client: &C, +pub async fn validate_amount<'a, N: Namada<'a>>( + context: &N, amount: InputAmount, token: &Address, force: bool, @@ -948,21 +941,21 @@ pub async fn validate_amount< InputAmount::Unvalidated(amt) => amt.canonical(), InputAmount::Validated(amt) => return Ok(amt), }; - let denom = match convert_response::>( - RPC.vp().token().denomination(client, token).await, + let denom = match convert_response::>( + RPC.vp().token().denomination(context.client(), token).await, )? { Some(denom) => Ok(denom), None => { if force { display_line!( - StdIo, + context.io(), "No denomination found for token: {token}, but --force \ was passed. Defaulting to the provided denomination." ); Ok(input_amount.denom) } else { display_line!( - StdIo, + context.io(), "No denomination found for token: {token}, the input \ arguments could not be parsed." ); @@ -974,7 +967,7 @@ pub async fn validate_amount< }?; if denom < input_amount.denom && !force { display_line!( - StdIo, + context.io(), "The input amount contained a higher precision than allowed by \ {token}." ); @@ -985,7 +978,7 @@ pub async fn validate_amount< } else { input_amount.increase_precision(denom).map_err(|_err| { display_line!( - StdIo, + context.io(), "The amount provided requires more the 256 bits to represent." ); Error::from(QueryError::General( @@ -998,10 +991,10 @@ pub async fn validate_amount< } /// Wait for a first block and node to be synced. -pub async fn wait_until_node_is_synched(client: &C) -> Halt<()> -where - C: crate::ledger::queries::Client + Sync, -{ +pub async fn wait_until_node_is_synched<'a>( + client: &(impl Client + Sync), + io: &impl Io, +) -> Halt<()> { let height_one = Height::try_from(1_u64).unwrap(); let try_count = Cell::new(1_u64); const MAX_TRIES: usize = 5; @@ -1023,7 +1016,7 @@ where return ControlFlow::Break(Ok(())); } display_line!( - IO, + io, " Waiting for {} ({}/{} tries)...", if is_at_least_height_one { "a first block" @@ -1038,7 +1031,7 @@ where } Err(e) => { edisplay_line!( - IO, + io, "Failed to query node status with error: {}", e ); @@ -1050,7 +1043,7 @@ where // maybe time out .try_halt(|_| { display_line!( - IO, + io, "Node is still catching up, wait for it to finish synching." ); })? @@ -1060,21 +1053,21 @@ where /// Look up the denomination of a token in order to make a correctly denominated /// amount. -pub async fn denominate_amount( - client: &C, +pub async fn denominate_amount<'a, N: Namada<'a>>( + context: &N, token: &Address, amount: token::Amount, ) -> DenominatedAmount { - let denom = convert_response::>( - RPC.vp().token().denomination(client, token).await, + let denom = convert_response::>( + RPC.vp().token().denomination(context.client(), token).await, ) .unwrap_or_else(|t| { - display_line!(StdIo, "Error in querying for denomination: {t}"); + display_line!(context.io(), "Error in querying for denomination: {t}"); None }) .unwrap_or_else(|| { display_line!( - StdIo, + context.io(), "No denomination found for token: {token}, defaulting to zero \ decimal places" ); @@ -1085,12 +1078,10 @@ pub async fn denominate_amount( /// Look up the denomination of a token in order to format it /// correctly as a string. -pub async fn format_denominated_amount< - C: crate::ledger::queries::Client + Sync, ->( - client: &C, +pub async fn format_denominated_amount<'a>( + context: &impl Namada<'a>, token: &Address, amount: token::Amount, ) -> String { - denominate_amount(client, token, amount).await.to_string() + denominate_amount(context, token, amount).await.to_string() } diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 048d72517f..4edc5e4f41 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -23,12 +23,14 @@ use serde::{Deserialize, Serialize}; use sha2::Digest; use zeroize::Zeroizing; -use crate::display_line; use super::masp::{ShieldedContext, ShieldedTransfer}; +use crate::display_line; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc_proto::google::protobuf::Any; use crate::ledger::parameters::storage as parameter_storage; +use crate::ledger::Namada; use crate::proto::{MaspBuilder, Section, Tx}; +use crate::sdk::args::SdkTypes; use crate::sdk::error::{EncodingError, Error, TxError}; use crate::sdk::masp::make_asset_type; use crate::sdk::rpc::{ @@ -44,7 +46,6 @@ pub use crate::sdk::wallet::store::AddressVpType; use crate::sdk::wallet::{Wallet, WalletIo}; use crate::sdk::{args, rpc}; use crate::types::io::*; -use crate::sdk::args::SdkTypes; use crate::types::key::*; use crate::types::masp::{ExtendedViewingKey, PaymentAddress}; use crate::types::storage::Epoch; @@ -54,7 +55,6 @@ use crate::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use crate::types::transaction::pos::InitValidator; -use crate::ledger::Namada; use crate::types::transaction::Fee; #[cfg(feature = "std")] @@ -83,27 +83,28 @@ pub struct SigningTxData { /// for it from the wallet. If the keypair is encrypted but a password is not /// supplied, then it is interactively prompted. Errors if the key cannot be /// found or loaded. -pub async fn find_pk( - client: &C, - wallet: &mut Wallet, +pub async fn find_pk<'a>( + context: &impl Namada<'a>, addr: &Address, password: Option>, ) -> Result { match addr { Address::Established(_) => { display_line!( - StdIo, + context.io(), "Looking-up public key of {} from the ledger...", addr.encode() ); - rpc::get_public_key_at(client, addr, 0) + rpc::get_public_key_at(context.client(), addr, 0) .await? .ok_or(Error::Other(format!( "No public key found for the address {}", addr.encode() ))) } - Address::Implicit(ImplicitAddress(pkh)) => Ok(wallet + Address::Implicit(ImplicitAddress(pkh)) => Ok(context + .wallet_mut() + .await .find_key_by_pkh(pkh, password) .map_err(|err| { Error::Other(format!( @@ -133,6 +134,21 @@ pub fn find_key_by_pk( // We already know the secret key corresponding to the MASP sentinal key Ok(masp_tx_key()) } else { + // Try to get the signer from the signing-keys argument + for signing_key in &args.signing_keys { + if signing_key.ref_to() == *public_key { + return Ok(signing_key.clone()); + } + } + // Try to get the signer from the wrapper-fee-payer argument + match &args.wrapper_fee_payer { + Some(wrapper_fee_payer) + if &wrapper_fee_payer.ref_to() == public_key => + { + return Ok(wrapper_fee_payer.clone()); + } + _ => {} + } // Otherwise we need to search the wallet for the secret key wallet .find_key_by_pk(public_key, args.password.clone()) @@ -171,13 +187,7 @@ pub async fn tx_signers<'a>( Some(signer) if signer == masp() => Ok(vec![masp_tx_key().ref_to()]), Some(signer) => Ok(vec![ - find_pk( - context.client(), - *context.wallet_mut().await, - &signer, - args.password.clone(), - ) - .await?, + find_pk(context, &signer, args.password.clone()).await?, ]), None => other_err( "All transactions must be signed; please either specify the key \ @@ -355,14 +365,10 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( }; let fee_amount = match args.fee_amount { Some(amount) => { - let validated_fee_amount = validate_amount( - context.client(), - amount, - &args.fee_token, - args.force, - ) - .await - .expect("Expected to be able to validate fee"); + let validated_fee_amount = + validate_amount(context, amount, &args.fee_token, args.force) + .await + .expect("Expected to be able to validate fee"); let amount = Amount::from_uint(validated_fee_amount.amount, 0).unwrap(); @@ -372,7 +378,7 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( } else if !args.force { // Update the fee amount if it's not enough display_line!( - StdIo, + context.io(), "The provided gas price {} is less than the minimum \ amount required {}, changing it to match the minimum", amount.to_string_native(), @@ -513,14 +519,14 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( let token_addr = args.fee_token.clone(); if !args.force { let fee_amount = format_denominated_amount( - context.client(), + context, &token_addr, total_fee, ) .await; let balance = format_denominated_amount( - context.client(), + context, &token_addr, updated_balance, ) @@ -539,7 +545,7 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( _ => { if args.fee_unshield.is_some() { display_line!( - StdIo, + context.io(), "Enough transparent balance to pay fees: the fee \ unshielding spending key will be ignored" ); @@ -607,10 +613,8 @@ fn make_ledger_amount_addr( /// Adds a Ledger output line describing a given transaction amount and asset /// type -async fn make_ledger_amount_asset< - C: crate::ledger::queries::Client + Sync, ->( - client: &C, +async fn make_ledger_amount_asset<'a>( + context: &impl Namada<'a>, tokens: &HashMap, output: &mut Vec, amount: u64, @@ -621,8 +625,7 @@ async fn make_ledger_amount_asset< if let Some((token, _, _epoch)) = assets.get(token) { // If the AssetType can be decoded, then at least display Addressees let formatted_amt = - format_denominated_amount(client, token, amount.into()) - .await; + format_denominated_amount(context, token, amount.into()).await; if let Some(token) = tokens.get(token) { output .push( @@ -706,10 +709,8 @@ fn format_outputs(output: &mut Vec) { /// Adds a Ledger output for the sender and destination for transparent and MASP /// transactions -pub async fn make_ledger_masp_endpoints< - C: crate::ledger::queries::Client + Sync, ->( - client: &C, +pub async fn make_ledger_masp_endpoints<'a>( + context: &impl Namada<'a>, tokens: &HashMap, output: &mut Vec, transfer: &Transfer, @@ -732,7 +733,7 @@ pub async fn make_ledger_masp_endpoints< let vk = ExtendedViewingKey::from(*sapling_input.key()); output.push(format!("Sender : {}", vk)); make_ledger_amount_asset( - client, + context, tokens, output, sapling_input.value(), @@ -759,7 +760,7 @@ pub async fn make_ledger_masp_endpoints< let pa = PaymentAddress::from(sapling_output.address()); output.push(format!("Destination : {}", pa)); make_ledger_amount_asset( - client, + context, tokens, output, sapling_output.value(), @@ -839,31 +840,24 @@ pub async fn to_ledger_vector<'a>( tx: &Tx, ) -> Result { let init_account_hash = - query_wasm_code_hash(context.client(), TX_INIT_ACCOUNT_WASM).await?; + query_wasm_code_hash(context, TX_INIT_ACCOUNT_WASM).await?; let init_validator_hash = - query_wasm_code_hash(context.client(), TX_INIT_VALIDATOR_WASM).await?; + query_wasm_code_hash(context, TX_INIT_VALIDATOR_WASM).await?; let init_proposal_hash = - query_wasm_code_hash(context.client(), TX_INIT_PROPOSAL).await?; + query_wasm_code_hash(context, TX_INIT_PROPOSAL).await?; let vote_proposal_hash = - query_wasm_code_hash(context.client(), TX_VOTE_PROPOSAL).await?; - let reveal_pk_hash = - query_wasm_code_hash(context.client(), TX_REVEAL_PK).await?; + query_wasm_code_hash(context, TX_VOTE_PROPOSAL).await?; + let reveal_pk_hash = query_wasm_code_hash(context, TX_REVEAL_PK).await?; let update_account_hash = - query_wasm_code_hash(context.client(), TX_UPDATE_ACCOUNT_WASM).await?; - let transfer_hash = - query_wasm_code_hash(context.client(), TX_TRANSFER_WASM).await?; - let ibc_hash = query_wasm_code_hash(context.client(), TX_IBC_WASM).await?; - let bond_hash = - query_wasm_code_hash(context.client(), TX_BOND_WASM).await?; - let unbond_hash = - query_wasm_code_hash(context.client(), TX_UNBOND_WASM).await?; - let withdraw_hash = - query_wasm_code_hash(context.client(), TX_WITHDRAW_WASM).await?; + query_wasm_code_hash(context, TX_UPDATE_ACCOUNT_WASM).await?; + let transfer_hash = query_wasm_code_hash(context, TX_TRANSFER_WASM).await?; + let ibc_hash = query_wasm_code_hash(context, TX_IBC_WASM).await?; + let bond_hash = query_wasm_code_hash(context, TX_BOND_WASM).await?; + let unbond_hash = query_wasm_code_hash(context, TX_UNBOND_WASM).await?; + let withdraw_hash = query_wasm_code_hash(context, TX_WITHDRAW_WASM).await?; let change_commission_hash = - query_wasm_code_hash(context.client(), TX_CHANGE_COMMISSION_WASM) - .await?; - let user_hash = - query_wasm_code_hash(context.client(), VP_USER_WASM).await?; + query_wasm_code_hash(context, TX_CHANGE_COMMISSION_WASM).await?; + let user_hash = query_wasm_code_hash(context, VP_USER_WASM).await?; // To facilitate lookups of human-readable token names let wallet = context.wallet().await; @@ -1160,7 +1154,7 @@ pub async fn to_ledger_vector<'a>( tv.output.push("Type : Transfer".to_string()); make_ledger_masp_endpoints( - context.client(), + context, &tokens, &mut tv.output, &transfer, @@ -1169,7 +1163,7 @@ pub async fn to_ledger_vector<'a>( ) .await; make_ledger_masp_endpoints( - context.client(), + context, &tokens, &mut tv.output_expert, &transfer, @@ -1342,13 +1336,13 @@ pub async fn to_ledger_vector<'a>( if let Some(wrapper) = tx.header.wrapper() { let gas_token = wrapper.fee.token.clone(); let gas_limit = format_denominated_amount( - context.client(), + context, &gas_token, Amount::from(wrapper.gas_limit), ) .await; let fee_amount_per_gas_unit = format_denominated_amount( - context.client(), + context, &gas_token, wrapper.fee.amount_per_gas_unit, ) diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index e6272c0d65..66b8847b17 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -42,23 +42,23 @@ use crate::ibc::core::timestamp::Timestamp as IbcTimestamp; use crate::ibc::core::Msg; use crate::ibc::Height as IbcHeight; use crate::ledger::ibc::storage::ibc_denom_key; -use crate::sdk::signing::SigningTxData; -use crate::sdk::masp::TransferErr::Build; -use crate::sdk::masp::{ShieldedContext, ShieldedTransfer}; -use crate::sdk::rpc::{ - self, format_denominated_amount, validate_amount, TxBroadcastData, - TxResponse, query_wasm_code_hash -}; -use crate::sdk::wallet::{Wallet, WalletIo}; use crate::ledger::Namada; use crate::proto::{MaspBuilder, Tx}; use crate::sdk::args::{self, InputAmount}; use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; -use crate::sdk::signing::{self, TxSourcePostBalance}; +use crate::sdk::masp::TransferErr::Build; +use crate::sdk::masp::{ShieldedContext, ShieldedTransfer}; +use crate::sdk::queries::Client; +use crate::sdk::rpc::{ + self, format_denominated_amount, query_wasm_code_hash, validate_amount, + TxBroadcastData, TxResponse, +}; +use crate::sdk::signing::{self, SigningTxData, TxSourcePostBalance}; +use crate::sdk::wallet::WalletIo; use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::tendermint_rpc::error::Error as RpcError; use crate::types::control_flow::{time, ProceedOrElse}; -use crate::types::io::{Io, StdIo}; +use crate::types::io::Io; use crate::types::key::*; use crate::types::masp::TransferTarget; use crate::types::storage::Epoch; @@ -133,7 +133,7 @@ impl ProcessTxResponse { } /// Build and dump a transaction either to file or to screen -pub fn dump_tx(args: &args::Tx, tx: Tx) { +pub fn dump_tx(io: &IO, args: &args::Tx, tx: Tx) { let tx_id = tx.header_hash(); let serialized_tx = tx.serialize(); match args.output_folder.to_owned() { @@ -144,14 +144,14 @@ pub fn dump_tx(args: &args::Tx, tx: Tx) { serde_json::to_writer_pretty(out, &serialized_tx) .expect("Should be able to write to file."); display_line!( - IO, + io, "Transaction serialized to {}.", tx_path.to_string_lossy() ); } None => { - display_line!(IO, "Below the serialized transaction: \n"); - display_line!(IO, "{}", serialized_tx) + display_line!(io, "Below the serialized transaction: \n"); + display_line!(io, "{}", serialized_tx) } } } @@ -169,15 +169,8 @@ pub async fn prepare_tx<'a>( if !args.dry_run { let epoch = rpc::query_epoch(context.client()).await?; - signing::wrap_tx( - context, - tx, - args, - tx_source_balance, - epoch, - fee_payer, - ) - .await + signing::wrap_tx(context, tx, args, tx_source_balance, epoch, fee_payer) + .await } else { Ok(None) } @@ -185,12 +178,8 @@ pub async fn prepare_tx<'a>( /// Submit transaction and wait for result. Returns a list of addresses /// initialized in the transaction if any. In dry run, this is always empty. -pub async fn process_tx< - C: crate::sdk::queries::Client + Sync, - U: WalletIo, ->( - client: &C, - wallet: &mut Wallet, +pub async fn process_tx<'a>( + context: &impl Namada<'a>, args: &args::Tx, tx: Tx, ) -> Result { @@ -205,7 +194,7 @@ pub async fn process_tx< // println!("HTTP request body: {}", request_body); if args.dry_run || args.dry_run_wrapper { - expect_dry_broadcast::<_, StdIo>(TxBroadcastData::DryRun(tx), client).await + expect_dry_broadcast(TxBroadcastData::DryRun(tx), context).await } else { // We use this to determine when the wrapper tx makes it on-chain let wrapper_hash = tx.header_hash().to_string(); @@ -225,14 +214,14 @@ pub async fn process_tx< // of masp epoch Either broadcast or submit transaction and // collect result into sum type if args.broadcast_only { - broadcast_tx::<_, StdIo>(client, &to_broadcast) + broadcast_tx(context, &to_broadcast) .await .map(ProcessTxResponse::Broadcast) } else { - match submit_tx::<_, StdIo>(client, to_broadcast).await { + match submit_tx(context, to_broadcast).await { Ok(x) => { - save_initialized_accounts::( - wallet, + save_initialized_accounts( + context, args, x.initialized_accounts.clone(), ) @@ -292,8 +281,8 @@ pub async fn build_reveal_pk<'a>( /// the tx has been successfully included into the mempool of a node /// /// In the case of errors in any of those stages, an error message is returned -pub async fn broadcast_tx( - rpc_cli: &C, +pub async fn broadcast_tx<'a>( + context: &impl Namada<'a>, to_broadcast: &TxBroadcastData, ) -> Result { let (tx, wrapper_tx_hash, decrypted_tx_hash) = match to_broadcast { @@ -313,21 +302,29 @@ pub async fn broadcast_tx( // TODO: configure an explicit timeout value? we need to hack away at // `tendermint-rs` for this, which is currently using a hard-coded 30s // timeout. - let response = - lift_rpc_error(rpc_cli.broadcast_tx_sync(tx.to_bytes().into()).await)?; + let response = lift_rpc_error( + context + .client() + .broadcast_tx_sync(tx.to_bytes().into()) + .await, + )?; if response.code == 0.into() { - display_line!(IO, "Transaction added to mempool: {:?}", response); + display_line!( + context.io(), + "Transaction added to mempool: {:?}", + response + ); // Print the transaction identifiers to enable the extraction of // acceptance/application results later { display_line!( - IO, + context.io(), "Wrapper transaction hash: {:?}", wrapper_tx_hash ); display_line!( - IO, + context.io(), "Inner transaction hash: {:?}", decrypted_tx_hash ); @@ -350,13 +347,10 @@ pub async fn broadcast_tx( /// 3. The decrypted payload of the tx has been included on the blockchain. /// /// In the case of errors in any of those stages, an error message is returned -pub async fn submit_tx( - client: &C, +pub async fn submit_tx<'a>( + context: &impl Namada<'a>, to_broadcast: TxBroadcastData, -) -> Result -where - C: crate::sdk::queries::Client + Sync, -{ +) -> Result { let (_, wrapper_hash, decrypted_hash) = match &to_broadcast { TxBroadcastData::Live { tx, @@ -367,7 +361,7 @@ where }?; // Broadcast the supplied transaction - broadcast_tx::<_, IO>(client, &to_broadcast).await?; + broadcast_tx(context, &to_broadcast).await?; let deadline = time::Instant::now() + time::Duration::from_secs( @@ -382,10 +376,9 @@ where let parsed = { let wrapper_query = rpc::TxEventQuery::Accepted(wrapper_hash.as_str()); - let event = - rpc::query_tx_status::<_, IO>(client, wrapper_query, deadline) - .await - .proceed_or(TxError::AcceptTimeout)?; + let event = rpc::query_tx_status(context, wrapper_query, deadline) + .await + .proceed_or(TxError::AcceptTimeout)?; let parsed = TxResponse::from_event(event); let tx_to_str = |parsed| { serde_json::to_string_pretty(parsed).map_err(|err| { @@ -393,7 +386,7 @@ where }) }; display_line!( - IO, + context.io(), "Transaction accepted with result: {}", tx_to_str(&parsed)? ); @@ -404,16 +397,13 @@ where // payload makes its way onto the blockchain let decrypted_query = rpc::TxEventQuery::Applied(decrypted_hash.as_str()); - let event = rpc::query_tx_status::<_, IO>( - client, - decrypted_query, - deadline, - ) - .await - .proceed_or(TxError::AppliedTimeout)?; + let event = + rpc::query_tx_status(context, decrypted_query, deadline) + .await + .proceed_or(TxError::AppliedTimeout)?; let parsed = TxResponse::from_event(event); display_line!( - IO, + context.io(), "Transaction applied with result: {}", tx_to_str(&parsed)? ); @@ -450,8 +440,8 @@ pub fn decode_component( } /// Save accounts initialized from a tx into the wallet, if any. -pub async fn save_initialized_accounts( - wallet: &mut Wallet, +pub async fn save_initialized_accounts<'a, N: Namada<'a>>( + context: &N, args: &args::Tx, initialized_accounts: Vec
, ) { @@ -459,7 +449,7 @@ pub async fn save_initialized_accounts( if len != 0 { // Store newly initialized account addresses in the wallet display_line!( - IO, + context.io(), "The transaction initialized {} new account{}", len, if len == 1 { "" } else { "s" } @@ -480,10 +470,10 @@ pub async fn save_initialized_accounts( format!("{}{}", initialized_account_alias, ix).into() } } - None => U::read_alias(&encoded).into(), + None => N::WalletUtils::read_alias(&encoded).into(), }; let alias = alias.into_owned(); - let added = wallet.add_address( + let added = context.wallet_mut().await.add_address( alias.clone(), address.clone(), args.wallet_alias_force, @@ -491,14 +481,18 @@ pub async fn save_initialized_accounts( match added { Some(new_alias) if new_alias != encoded => { display_line!( - IO, + context.io(), "Added alias {} for address {}.", new_alias, encoded ); } _ => { - display_line!(IO, "No alias added for address {}.", encoded) + display_line!( + context.io(), + "No alias added for address {}.", + encoded + ) } }; } @@ -532,7 +526,7 @@ pub async fn build_validator_commission_change<'a>( if rpc::is_validator(context.client(), &validator).await? { if *rate < Dec::zero() || *rate > Dec::one() { edisplay_line!( - StdIo, + context.io(), "Invalid new commission rate, received {}", rate ); @@ -556,7 +550,7 @@ pub async fn build_validator_commission_change<'a>( > max_commission_change_per_epoch { edisplay_line!( - StdIo, + context.io(), "New rate is too large of a change with respect to \ the predecessor epoch in which the rate will take \ effect." @@ -569,14 +563,17 @@ pub async fn build_validator_commission_change<'a>( } } None => { - edisplay_line!(StdIo, "Error retrieving from storage"); + edisplay_line!(context.io(), "Error retrieving from storage"); if !tx_args.force { return Err(Error::from(TxError::Retrieval)); } } } } else { - edisplay_line!(StdIo, "The given address {validator} is not a validator."); + edisplay_line!( + context.io(), + "The given address {validator} is not a validator." + ); if !tx_args.force { return Err(Error::from(TxError::InvalidValidatorAddress( validator, @@ -622,7 +619,11 @@ pub async fn build_update_steward_commission<'a>( .await?; if !rpc::is_steward(context.client(), steward).await && !tx_args.force { - edisplay_line!(StdIo, "The given address {} is not a steward.", &steward); + edisplay_line!( + context.io(), + "The given address {} is not a steward.", + &steward + ); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); }; @@ -631,7 +632,7 @@ pub async fn build_update_steward_commission<'a>( if !commission.is_valid() && !tx_args.force { edisplay_line!( - StdIo, + context.io(), "The sum of all percentage must not be greater than 1." ); return Err(Error::from(TxError::InvalidStewardCommission( @@ -676,7 +677,11 @@ pub async fn build_resign_steward<'a>( .await?; if !rpc::is_steward(context.client(), steward).await && !tx_args.force { - edisplay_line!(StdIo, "The given address {} is not a steward.", &steward); + edisplay_line!( + context.io(), + "The given address {} is not a steward.", + &steward + ); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); }; @@ -713,7 +718,7 @@ pub async fn build_unjail_validator<'a>( if !rpc::is_validator(context.client(), validator).await? { edisplay_line!( - StdIo, + context.io(), "The given address {} is not a validator.", &validator ); @@ -741,7 +746,7 @@ pub async fn build_unjail_validator<'a>( })?; if validator_state_at_pipeline != ValidatorState::Jailed { edisplay_line!( - StdIo, + context.io(), "The given validator address {} is not jailed at the pipeline \ epoch when it would be restored to one of the validator sets.", &validator @@ -766,7 +771,7 @@ pub async fn build_unjail_validator<'a>( last_slash_epoch + params.slash_processing_epoch_offset(); if current_epoch < eligible_epoch { edisplay_line!( - StdIo, + context.io(), "The given validator address {} is currently frozen and \ not yet eligible to be unjailed.", &validator @@ -827,12 +832,9 @@ pub async fn build_withdraw<'a>( let epoch = rpc::query_epoch(context.client()).await?; - let validator = known_validator_or_err( - validator.clone(), - tx_args.force, - context.client(), - ) - .await?; + let validator = + known_validator_or_err(validator.clone(), tx_args.force, context) + .await?; let source = source.clone(); @@ -848,27 +850,25 @@ pub async fn build_withdraw<'a>( if tokens.is_zero() { edisplay_line!( - StdIo, + context.io(), "There are no unbonded bonds ready to withdraw in the current \ epoch {}.", epoch ); - rpc::query_and_print_unbonds( - context.client(), - &bond_source, - &validator, - ) - .await?; + rpc::query_and_print_unbonds(context, &bond_source, &validator).await?; if !tx_args.force { return Err(Error::from(TxError::NoUnbondReady(epoch))); } } else { display_line!( - StdIo, + context.io(), "Found {} tokens that can be withdrawn.", tokens.to_string_native() ); - display_line!(StdIo, "Submitting transaction to withdraw them..."); + display_line!( + context.io(), + "Submitting transaction to withdraw them..." + ); } let data = pos::Withdraw { validator, source }; @@ -917,24 +917,21 @@ pub async fn build_unbond<'a>( let bond_source = source.clone().unwrap_or_else(|| validator.clone()); if !tx_args.force { - known_validator_or_err( - validator.clone(), - tx_args.force, - context.client(), - ) - .await?; + known_validator_or_err(validator.clone(), tx_args.force, context) + .await?; let bond_amount = - rpc::query_bond(context.client(), &bond_source, validator, None).await?; + rpc::query_bond(context.client(), &bond_source, validator, None) + .await?; display_line!( - StdIo, + context.io(), "Bond amount available for unbonding: {} NAM", bond_amount.to_string_native() ); if *amount > bond_amount { edisplay_line!( - StdIo, + context.io(), "The total bonds of the source {} is lower than the amount to \ be unbonded. Amount to unbond is {} and the total bonds is \ {}.", @@ -986,8 +983,8 @@ pub async fn build_unbond<'a>( } /// Query the unbonds post-tx -pub async fn query_unbonds( - client: &C, +pub async fn query_unbonds<'a>( + context: &impl Namada<'a>, args: args::Unbond, latest_withdrawal_pre: Option<(Epoch, token::Amount)>, ) -> Result<()> { @@ -996,9 +993,12 @@ pub async fn query_unbonds( let bond_source = source.clone().unwrap_or_else(|| args.validator.clone()); // Query the unbonds post-tx - let unbonds = - rpc::query_unbond_with_slashing(client, &bond_source, &args.validator) - .await?; + let unbonds = rpc::query_unbond_with_slashing( + context.client(), + &bond_source, + &args.validator, + ) + .await?; let mut withdrawable = BTreeMap::::new(); for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { let to_withdraw = withdrawable.entry(withdraw_epoch).or_default(); @@ -1016,7 +1016,7 @@ pub async fn query_unbonds( std::cmp::Ordering::Less => { if args.tx.force { edisplay_line!( - IO, + context.io(), "Unexpected behavior reading the unbonds data has \ occurred" ); @@ -1026,7 +1026,7 @@ pub async fn query_unbonds( } std::cmp::Ordering::Equal => { display_line!( - IO, + context.io(), "Amount {} withdrawable starting from epoch {}", (latest_withdraw_amount_post - latest_withdraw_amount_pre) .to_string_native(), @@ -1035,7 +1035,7 @@ pub async fn query_unbonds( } std::cmp::Ordering::Greater => { display_line!( - IO, + context.io(), "Amount {} withdrawable starting from epoch {}", latest_withdraw_amount_post.to_string_native(), latest_withdraw_epoch_post, @@ -1044,7 +1044,7 @@ pub async fn query_unbonds( } } else { display_line!( - IO, + context.io(), "Amount {} withdrawable starting from epoch {}", latest_withdraw_amount_post.to_string_native(), latest_withdraw_epoch_post, @@ -1075,20 +1075,15 @@ pub async fn build_bond<'a>( ) .await?; - let validator = known_validator_or_err( - validator.clone(), - tx_args.force, - context.client(), - ) - .await?; + let validator = + known_validator_or_err(validator.clone(), tx_args.force, context) + .await?; // Check that the source address exists on chain let source = match source.clone() { - Some(source) => { - source_exists_or_err(source, tx_args.force, context.client()) - .await - .map(Some) - } + Some(source) => source_exists_or_err(source, tx_args.force, context) + .await + .map(Some), None => Ok(source.clone()), }?; // Check bond's source (source for delegation or validator for self-bonds) @@ -1103,7 +1098,7 @@ pub async fn build_bond<'a>( *amount, balance_key, tx_args.force, - context.client(), + context, ) .await?; let tx_source_balance = Some(TxSourcePostBalance { @@ -1376,23 +1371,16 @@ pub async fn build_ibc_transfer<'a>( ) .await?; // Check that the source address exists on chain - let source = source_exists_or_err( - args.source.clone(), - args.tx.force, - context.client(), - ) - .await?; + let source = + source_exists_or_err(args.source.clone(), args.tx.force, context) + .await?; // We cannot check the receiver // validate the amount given - let validated_amount = validate_amount( - context.client(), - args.amount, - &args.token, - args.tx.force, - ) - .await - .expect("expected to validate amount"); + let validated_amount = + validate_amount(context, args.amount, &args.token, args.tx.force) + .await + .expect("expected to validate amount"); if validated_amount.canonical().denom.0 != 0 { return Err(Error::Other(format!( "The amount for the IBC transfer should be an integer: {}", @@ -1409,7 +1397,7 @@ pub async fn build_ibc_transfer<'a>( validated_amount.amount, balance_key, args.tx.force, - context.client(), + context, ) .await?; let tx_source_balance = Some(TxSourcePostBalance { @@ -1418,12 +1406,10 @@ pub async fn build_ibc_transfer<'a>( token: args.token.clone(), }); - let tx_code_hash = query_wasm_code_hash( - context.client(), - args.tx_code_path.to_str().unwrap(), - ) - .await - .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let tx_code_hash = + query_wasm_code_hash(context, args.tx_code_path.to_str().unwrap()) + .await + .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; let ibc_denom = match &args.token { Address::Internal(InternalAddress::IbcToken(hash)) => { @@ -1551,10 +1537,9 @@ where let mut tx_builder = Tx::new(chain_id, tx_args.expiration); - let tx_code_hash = - query_wasm_code_hash(context.client(), path.to_string_lossy()) - .await - .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let tx_code_hash = query_wasm_code_hash(context, path.to_string_lossy()) + .await + .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; on_tx(&mut tx_builder, &mut data)?; @@ -1645,18 +1630,15 @@ pub async fn build_transfer<'a, N: Namada<'a>>( let token = args.token.clone(); // Check that the source address exists on chain - source_exists_or_err(source.clone(), args.tx.force, context.client()) - .await?; + source_exists_or_err(source.clone(), args.tx.force, context).await?; // Check that the target address exists on chain - target_exists_or_err(target.clone(), args.tx.force, context.client()) - .await?; + target_exists_or_err(target.clone(), args.tx.force, context).await?; // Check source balance let balance_key = token::balance_key(&token, &source); // validate the amount given let validated_amount = - validate_amount(context.client(), args.amount, &token, args.tx.force) - .await?; + validate_amount(context, args.amount, &token, args.tx.force).await?; args.amount = InputAmount::Validated(validated_amount); let post_balance = check_balance_too_low_err( @@ -1665,7 +1647,7 @@ pub async fn build_transfer<'a, N: Namada<'a>>( validated_amount.amount, balance_key, args.tx.force, - context.client(), + context, ) .await?; let tx_source_balance = Some(TxSourcePostBalance { @@ -1810,8 +1792,7 @@ pub async fn build_init_account<'a>( let signing_data = signing::aux_signing_data(context, tx_args, None, None).await?; - let vp_code_hash = - query_wasm_code_hash_buf(context.client(), vp_code_path).await?; + let vp_code_hash = query_wasm_code_hash_buf(context, vp_code_path).await?; let threshold = match threshold { Some(threshold) => *threshold, @@ -1882,8 +1863,7 @@ pub async fn build_update_account<'a>( let vp_code_hash = match vp_code_path { Some(code_path) => { - let vp_hash = - query_wasm_code_hash_buf(context.client(), code_path).await?; + let vp_hash = query_wasm_code_hash_buf(context, code_path).await?; Some(vp_hash) } None => None, @@ -1946,7 +1926,7 @@ pub async fn build_custom<'a>( })? } else { let tx_code_hash = query_wasm_code_hash_buf( - context.client(), + context, code_path .as_ref() .ok_or(Error::Other("No code path supplied".to_string()))?, @@ -1971,16 +1951,13 @@ pub async fn build_custom<'a>( Ok((tx, signing_data, epoch)) } -async fn expect_dry_broadcast< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( +async fn expect_dry_broadcast<'a>( to_broadcast: TxBroadcastData, - client: &C, + context: &impl Namada<'a>, ) -> Result { match to_broadcast { TxBroadcastData::DryRun(tx) => { - rpc::dry_run_tx::<_, IO>(client, tx.to_bytes()).await?; + rpc::dry_run_tx(context, tx.to_bytes()).await?; Ok(ProcessTxResponse::DryRun) } TxBroadcastData::Live { @@ -1998,19 +1975,17 @@ fn lift_rpc_error(res: std::result::Result) -> Result { /// Returns the given validator if the given address is a validator, /// otherwise returns an error, force forces the address through even /// if it isn't a validator -async fn known_validator_or_err< - C: crate::ledger::queries::Client + Sync, ->( +async fn known_validator_or_err<'a>( validator: Address, force: bool, - client: &C, + context: &impl Namada<'a>, ) -> Result
{ // Check that the validator address exists on chain - let is_validator = rpc::is_validator(client, &validator).await?; + let is_validator = rpc::is_validator(context.client(), &validator).await?; if !is_validator { if force { edisplay_line!( - StdIo, + context.io(), "The address {} doesn't belong to any known validator account.", validator ); @@ -2026,21 +2001,20 @@ async fn known_validator_or_err< /// general pattern for checking if an address exists on the chain, or /// throwing an error if it's not forced. Takes a generic error /// message and the error type. -async fn address_exists_or_err( +async fn address_exists_or_err<'a, F>( addr: Address, force: bool, - client: &C, + context: &impl Namada<'a>, message: String, err: F, ) -> Result
where - C: crate::sdk::queries::Client + Sync, F: FnOnce(Address) -> Error, { - let addr_exists = rpc::known_address::(client, &addr).await?; + let addr_exists = rpc::known_address(context.client(), &addr).await?; if !addr_exists { if force { - edisplay_line!(StdIo, "{}", message); + edisplay_line!(context.io(), "{}", message); Ok(addr) } else { Err(err(addr)) @@ -2053,16 +2027,14 @@ where /// Returns the given source address if the given address exists on chain /// otherwise returns an error, force forces the address through even /// if it isn't on chain -async fn source_exists_or_err< - C: crate::ledger::queries::Client + Sync, ->( +async fn source_exists_or_err<'a>( token: Address, force: bool, - client: &C, + context: &impl Namada<'a>, ) -> Result
{ let message = format!("The source address {} doesn't exist on chain.", token); - address_exists_or_err(token, force, client, message, |err| { + address_exists_or_err(token, force, context, message, |err| { Error::from(TxError::SourceDoesNotExist(err)) }) .await @@ -2071,16 +2043,14 @@ async fn source_exists_or_err< /// Returns the given target address if the given address exists on chain /// otherwise returns an error, force forces the address through even /// if it isn't on chain -async fn target_exists_or_err< - C: crate::ledger::queries::Client + Sync, ->( +async fn target_exists_or_err<'a>( token: Address, force: bool, - client: &C, + context: &impl Namada<'a>, ) -> Result
{ let message = format!("The target address {} doesn't exist on chain.", token); - address_exists_or_err(token, force, client, message, |err| { + address_exists_or_err(token, force, context, message, |err| { Error::from(TxError::TargetLocationDoesNotExist(err)) }) .await @@ -2089,38 +2059,34 @@ async fn target_exists_or_err< /// Checks the balance at the given address is enough to transfer the /// given amount, along with the balance even existing. Force /// overrides this. Returns the updated balance for fee check if necessary -async fn check_balance_too_low_err< - C: crate::ledger::queries::Client + Sync, ->( +async fn check_balance_too_low_err<'a, N: Namada<'a>>( token: &Address, source: &Address, amount: token::Amount, balance_key: storage::Key, force: bool, - client: &C, + context: &N, ) -> Result { - match rpc::query_storage_value::(client, &balance_key) - .await + match rpc::query_storage_value::( + context.client(), + &balance_key, + ) + .await { Ok(balance) => match balance.checked_sub(amount) { Some(diff) => Ok(diff), None => { if force { edisplay_line!( - StdIo, + context.io(), "The balance of the source {} of token {} is lower \ than the amount to be transferred. Amount to \ transfer is {} and the balance is {}.", source, token, - format_denominated_amount( - client, token, amount - ) - .await, - format_denominated_amount( - client, token, balance - ) - .await, + format_denominated_amount(context, token, amount).await, + format_denominated_amount(context, token, balance) + .await, ); Ok(token::Amount::default()) } else { @@ -2138,7 +2104,7 @@ async fn check_balance_too_low_err< )) => { if force { edisplay_line!( - StdIo, + context.io(), "No balance found for the source {} of token {}", source, token @@ -2159,13 +2125,14 @@ async fn check_balance_too_low_err< #[allow(dead_code)] fn validate_untrusted_code_err( + io: &IO, vp_code: &Vec, force: bool, ) -> Result<()> { if let Err(err) = vm::validate_untrusted_wasm(vp_code) { if force { edisplay_line!( - IO, + io, "Validity predicate code validation failed with {}", err ); @@ -2177,13 +2144,11 @@ fn validate_untrusted_code_err( Ok(()) } } -async fn query_wasm_code_hash_buf< - C: crate::ledger::queries::Client + Sync, ->( - client: &C, +async fn query_wasm_code_hash_buf<'a>( + context: &impl Namada<'a>, path: &Path, ) -> Result { - query_wasm_code_hash(client, path.to_string_lossy()).await + query_wasm_code_hash(context, path.to_string_lossy()).await } /// A helper for [`fn build`] that can be used for `on_tx` arg that does nothing diff --git a/shared/src/types/io.rs b/shared/src/types/io.rs index 007d5acd93..f100ca8433 100644 --- a/shared/src/types/io.rs +++ b/shared/src/types/io.rs @@ -11,20 +11,21 @@ impl Io for StdIo {} #[async_trait::async_trait(?Send)] #[allow(missing_docs)] pub trait Io { - fn print(output: impl AsRef) { + fn print(&self, output: impl AsRef) { print!("{}", output.as_ref()); } - fn flush() { + fn flush(&self) { use std::io::Write; std::io::stdout().flush().unwrap(); } - fn println(output: impl AsRef) { + fn println(&self, output: impl AsRef) { println!("{}", output.as_ref()); } fn write( + &self, mut writer: W, output: impl AsRef, ) -> std::io::Result<()> { @@ -32,17 +33,18 @@ pub trait Io { } fn writeln( + &self, mut writer: W, output: impl AsRef, ) -> std::io::Result<()> { writeln!(writer, "{}", output.as_ref()) } - fn eprintln(output: impl AsRef) { + fn eprintln(&self, output: impl AsRef) { eprintln!("{}", output.as_ref()); } - async fn read() -> std::io::Result { + async fn read(&self) -> std::io::Result { #[cfg(not(target_family = "wasm"))] { read_aux(tokio::io::stdin()).await @@ -53,7 +55,7 @@ pub trait Io { } } - async fn prompt(question: impl AsRef) -> String { + async fn prompt(&self, question: impl AsRef) -> String { #[cfg(not(target_family = "wasm"))] { prompt_aux( @@ -111,14 +113,14 @@ where /// [`Io::print`] #[macro_export] macro_rules! display { - ($io:ty) => { - <$io>::print("") + ($io:expr) => { + $io.print("") }; - ($io:ty, $w:expr; $($args:tt)*) => { - <$io>::write($w, format_args!($($args)*).to_string()) + ($io:expr, $w:expr; $($args:tt)*) => { + $io.write($w, format_args!($($args)*).to_string()) }; - ($io:ty,$($args:tt)*) => { - <$io>::print(format_args!($($args)*).to_string()) + ($io:expr,$($args:tt)*) => { + $io.print(format_args!($($args)*).to_string()) }; } @@ -126,14 +128,14 @@ macro_rules! display { /// [`Io::println`] and [`Io::writeln`] #[macro_export] macro_rules! display_line { - ($io:ty) => { - <$io>::println("") + ($io:expr) => { + $io.println("") }; - ($io:ty, $w:expr; $($args:tt)*) => { - <$io>::writeln($w, format_args!($($args)*).to_string()) + ($io:expr, $w:expr; $($args:tt)*) => { + $io.writeln($w, format_args!($($args)*).to_string()) }; - ($io:ty,$($args:tt)*) => { - <$io>::println(format_args!($($args)*).to_string()) + ($io:expr,$($args:tt)*) => { + $io.println(format_args!($($args)*).to_string()) }; } @@ -141,8 +143,8 @@ macro_rules! display_line { /// [`Io::eprintln`] #[macro_export] macro_rules! edisplay_line { - ($io:ty,$($args:tt)*) => { - <$io>::eprintln(format_args!($($args)*).to_string()) + ($io:expr,$($args:tt)*) => { + $io.eprintln(format_args!($($args)*).to_string()) }; } @@ -150,7 +152,7 @@ macro_rules! edisplay_line { /// A convenience macro for formatting the user prompt before /// forwarding it to the [`Io::prompt`] method. macro_rules! prompt { - ($io:ty,$($arg:tt)*) => {{ - <$io>::prompt(format!("{}", format_args!($($arg)*))) + ($io:expr,$($arg:tt)*) => {{ + $io.prompt(format!("{}", format_args!($($arg)*))) }} } From 68a33934cd304e90a8b5a7ae64633a98246474fa Mon Sep 17 00:00:00 2001 From: brentstone Date: Fri, 6 Oct 2023 12:35:17 -0400 Subject: [PATCH 072/161] add checks and simplify code --- proof_of_stake/src/lib.rs | 23 ++++++++--------------- shared/src/sdk/tx.rs | 12 ++++++++++++ 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 0fbbf2231b..9051aae725 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -2355,13 +2355,13 @@ pub fn change_validator_commission_rate( where S: StorageRead + StorageWrite, { - // if new_rate < Uint::zero() { - // return Err(CommissionRateChangeError::NegativeRate( - // new_rate, - // validator.clone(), - // ) - // .into()); - // } + if new_rate.is_negative() { + return Err(CommissionRateChangeError::NegativeRate( + new_rate, + validator.clone(), + ) + .into()); + } let max_change = read_validator_max_commission_rate_change(storage, validator)?; @@ -2386,14 +2386,7 @@ where .get(storage, pipeline_epoch.prev(), ¶ms)? .expect("Could not find a rate in given epoch"); - // TODO: change this back if we use `Dec` type with a signed integer - // let change_from_prev = new_rate - rate_before_pipeline; - // if change_from_prev.abs() > max_change.unwrap() { - let change_from_prev = if new_rate > rate_before_pipeline { - new_rate - rate_before_pipeline - } else { - rate_before_pipeline - new_rate - }; + let change_from_prev = new_rate.abs_diff(&rate_before_pipeline); if change_from_prev > max_change.unwrap() { return Err(CommissionRateChangeError::RateChangeTooLarge( change_from_prev, diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 9d7fe0cfe4..c2edfc69ba 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -560,6 +560,18 @@ pub async fn build_validator_commission_change< commission_rate, max_commission_change_per_epoch, }) => { + if rate.is_negative() || rate > Dec::one() { + edisplay_line!( + IO, + "New rate is outside of the allowed range of values \ + between 0.0 and 1.0." + ); + if !tx_args.force { + return Err(Error::from( + TxError::InvalidCommissionRate(rate), + )); + } + } if rate.abs_diff(&commission_rate) > max_commission_change_per_epoch { From 2e84ddc803b3c2f632d711b6176e359bd4a92801 Mon Sep 17 00:00:00 2001 From: brentstone Date: Fri, 6 Oct 2023 12:41:54 -0400 Subject: [PATCH 073/161] changelog: add #1973 --- .changelog/unreleased/improvements/1973-refine-commission-tx.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1973-refine-commission-tx.md diff --git a/.changelog/unreleased/improvements/1973-refine-commission-tx.md b/.changelog/unreleased/improvements/1973-refine-commission-tx.md new file mode 100644 index 0000000000..04a00bac66 --- /dev/null +++ b/.changelog/unreleased/improvements/1973-refine-commission-tx.md @@ -0,0 +1,2 @@ +- Add missing checks for the commission rate change tx and code clean-up + ([\#1973](https://github.com/anoma/namada/pull/1973)) \ No newline at end of file From 615ebc8e7dbf3531c446f60ec7ac77676aef3b63 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Mon, 9 Oct 2023 15:25:56 +0200 Subject: [PATCH 074/161] SDK can now query for the native token address from the network. Also added null IO implementation. --- apps/src/bin/namada-client/main.rs | 2 +- apps/src/bin/namada-relayer/main.rs | 3 +- apps/src/bin/namada-wallet/main.rs | 2 +- apps/src/lib/cli/api.rs | 4 +- apps/src/lib/cli/client.rs | 7 +- apps/src/lib/cli/context.rs | 8 +- apps/src/lib/cli/relayer.rs | 4 +- apps/src/lib/cli/wallet.rs | 2 +- apps/src/lib/client/tx.rs | 4 +- .../lib/node/ledger/shell/testing/client.rs | 6 +- benches/lib.rs | 3 +- shared/src/ledger/mod.rs | 126 ++++++++++-------- shared/src/ledger/queries/shell.rs | 12 ++ shared/src/sdk/args.rs | 4 +- shared/src/sdk/rpc.rs | 7 + shared/src/types/io.rs | 60 ++++++++- 16 files changed, 167 insertions(+), 87 deletions(-) diff --git a/apps/src/bin/namada-client/main.rs b/apps/src/bin/namada-client/main.rs index 167674f65e..770dcf5367 100644 --- a/apps/src/bin/namada-client/main.rs +++ b/apps/src/bin/namada-client/main.rs @@ -13,7 +13,7 @@ async fn main() -> Result<()> { let _log_guard = logging::init_from_env_or(LevelFilter::INFO)?; // run the CLI - CliApi::::handle_client_command::( + CliApi::handle_client_command::( None, cli::namada_client_cli()?, &CliIo, diff --git a/apps/src/bin/namada-relayer/main.rs b/apps/src/bin/namada-relayer/main.rs index ef5e05f913..f9d98a2a4e 100644 --- a/apps/src/bin/namada-relayer/main.rs +++ b/apps/src/bin/namada-relayer/main.rs @@ -14,6 +14,5 @@ async fn main() -> Result<()> { let cmd = cli::namada_relayer_cli()?; // run the CLI - CliApi::::handle_relayer_command::(None, cmd, &CliIo) - .await + CliApi::handle_relayer_command::(None, cmd, &CliIo).await } diff --git a/apps/src/bin/namada-wallet/main.rs b/apps/src/bin/namada-wallet/main.rs index 987e9d2699..30d4a64156 100644 --- a/apps/src/bin/namada-wallet/main.rs +++ b/apps/src/bin/namada-wallet/main.rs @@ -6,5 +6,5 @@ pub fn main() -> Result<()> { color_eyre::install()?; let (cmd, ctx) = cli::namada_wallet_cli()?; // run the CLI - CliApi::::handle_wallet_command(cmd, ctx, &CliIo) + CliApi::handle_wallet_command(cmd, ctx, &CliIo) } diff --git a/apps/src/lib/cli/api.rs b/apps/src/lib/cli/api.rs index 052a834f55..1b6851f3a9 100644 --- a/apps/src/lib/cli/api.rs +++ b/apps/src/lib/cli/api.rs @@ -1,5 +1,3 @@ -use std::marker::PhantomData; - use namada::sdk::queries::Client; use namada::sdk::rpc::wait_until_node_is_synched; use namada::tendermint_rpc::HttpClient; @@ -32,4 +30,4 @@ pub struct CliIo; #[async_trait::async_trait(?Send)] impl Io for CliIo {} -pub struct CliApi(PhantomData); +pub struct CliApi; diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index ac1ca1e34d..a342e9ef25 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -15,8 +15,8 @@ fn error() -> Report { eyre!("Fatal error") } -impl CliApi { - pub async fn handle_client_command( +impl CliApi { + pub async fn handle_client_command( client: Option, cmd: cli::NamadaClient, io: &IO, @@ -139,11 +139,12 @@ impl CliApi { .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - let namada = NamadaImpl::new( + let namada = NamadaImpl::native_new( &client, &mut ctx.wallet, &mut ctx.shielded, io, + ctx.native_token, ); tx::submit_init_validator( &namada, diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index 4772ef98b9..f6c3399baf 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -161,7 +161,13 @@ impl Context { C: namada::ledger::queries::Client + Sync, IO: Io, { - NamadaImpl::new(client, &mut self.wallet, &mut self.shielded, io) + NamadaImpl::native_new( + client, + &mut self.wallet, + &mut self.shielded, + io, + self.native_token.clone(), + ) } /// Parse and/or look-up the value from the context. diff --git a/apps/src/lib/cli/relayer.rs b/apps/src/lib/cli/relayer.rs index d94fd5a09d..aadf2d3bda 100644 --- a/apps/src/lib/cli/relayer.rs +++ b/apps/src/lib/cli/relayer.rs @@ -15,11 +15,11 @@ fn error() -> Report { eyre!("Fatal error") } -impl CliApi { +impl CliApi { pub async fn handle_relayer_command( client: Option, cmd: cli::NamadaRelayer, - io: &IO, + io: &impl Io, ) -> Result<()> where C: CliClient, diff --git a/apps/src/lib/cli/wallet.rs b/apps/src/lib/cli/wallet.rs index 5dc223cd64..6247145b84 100644 --- a/apps/src/lib/cli/wallet.rs +++ b/apps/src/lib/cli/wallet.rs @@ -25,7 +25,7 @@ use crate::cli::args::CliToSdk; use crate::cli::{args, cmds, Context}; use crate::wallet::{read_and_confirm_encryption_password, CliWalletUtils}; -impl CliApi { +impl CliApi { pub fn handle_wallet_command( cmd: cmds::NamadaWallet, mut ctx: Context, diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 22f0c1b1b4..53b2232f64 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -641,7 +641,7 @@ where })?; let author_balance = rpc::get_token_balance( namada.client(), - &namada.native_token().await, + &namada.native_token(), &proposal.proposal.author, ) .await; @@ -665,7 +665,7 @@ where })?; let author_balane = rpc::get_token_balance( namada.client(), - &namada.native_token().await, + &namada.native_token(), &proposal.proposal.author, ) .await; diff --git a/apps/src/lib/node/ledger/shell/testing/client.rs b/apps/src/lib/node/ledger/shell/testing/client.rs index 504bdc7e5b..7649156b8e 100644 --- a/apps/src/lib/node/ledger/shell/testing/client.rs +++ b/apps/src/lib/node/ledger/shell/testing/client.rs @@ -47,7 +47,7 @@ pub fn run( NamadaClient::WithoutContext(sub_cmd, global) } }; - rt.block_on(CliApi::::handle_client_command( + rt.block_on(CliApi::handle_client_command( Some(node), cmd, &TestingIo, @@ -61,7 +61,7 @@ pub fn run( let cmd = cmds::NamadaWallet::parse(&matches) .expect("Could not parse wallet command"); - CliApi::::handle_wallet_command(cmd, ctx, &TestingIo) + CliApi::handle_wallet_command(cmd, ctx, &TestingIo) } Bin::Relayer => { args.insert(0, "relayer"); @@ -83,7 +83,7 @@ pub fn run( NamadaRelayer::ValidatorSet(sub_cmd) } }; - rt.block_on(CliApi::::handle_relayer_command( + rt.block_on(CliApi::handle_relayer_command( Some(node), cmd, &TestingIo, diff --git a/benches/lib.rs b/benches/lib.rs index b5036d5f66..f0cba69475 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -810,11 +810,12 @@ impl BenchShieldedCtx { &[], )) .unwrap(); - let namada = NamadaImpl::new( + let namada = NamadaImpl::native_new( &self.shell, &mut self.wallet, &mut self.shielded, &StdIo, + self.shell.wl_storage.storage.native_token.clone(), ); let shielded = async_runtime .block_on( diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 5536f46b63..aecde2d930 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -28,6 +28,7 @@ use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use crate::proto::Tx; use crate::sdk::args::{self, InputAmount, SdkTypes}; use crate::sdk::masp::{ShieldedContext, ShieldedUtils}; +use crate::sdk::rpc::query_native_token; use crate::sdk::signing::{self, SigningTxData}; use crate::sdk::tx::{ self, ProcessTxResponse, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, @@ -86,10 +87,10 @@ pub trait Namada<'a>: Sized { ) -> RwLockWriteGuard<&'a mut ShieldedContext>; /// Return the native token - async fn native_token(&self) -> Address; + fn native_token(&self) -> Address; /// Make a tx builder using no arguments - async fn tx_builder(&self) -> args::Tx { + fn tx_builder(&self) -> args::Tx { args::Tx { dry_run: false, dry_run_wrapper: false, @@ -102,7 +103,7 @@ pub trait Namada<'a>: Sized { wallet_alias_force: false, fee_amount: None, wrapper_fee_payer: None, - fee_token: self.native_token().await, + fee_token: self.native_token(), fee_unshield: None, gas_limit: GasLimit::from(20_000), expiration: None, @@ -117,7 +118,7 @@ pub trait Namada<'a>: Sized { } /// Make a TxTransfer builder from the given minimum set of arguments - async fn new_transfer( + fn new_transfer( &self, source: TransferSource, target: TransferTarget, @@ -130,24 +131,21 @@ pub trait Namada<'a>: Sized { token, amount, tx_code_path: PathBuf::from(TX_TRANSFER_WASM), - tx: self.tx_builder().await, - native_token: self.native_token().await, + tx: self.tx_builder(), + native_token: self.native_token(), } } /// Make a RevealPK builder from the given minimum set of arguments - async fn new_reveal_pk( - &self, - public_key: common::PublicKey, - ) -> args::RevealPk { + fn new_reveal_pk(&self, public_key: common::PublicKey) -> args::RevealPk { args::RevealPk { public_key, - tx: self.tx_builder().await, + tx: self.tx_builder(), } } /// Make a Bond builder from the given minimum set of arguments - async fn new_bond( + fn new_bond( &self, validator: Address, amount: token::Amount, @@ -156,14 +154,14 @@ pub trait Namada<'a>: Sized { validator, amount, source: None, - tx: self.tx_builder().await, - native_token: self.native_token().await, + tx: self.tx_builder(), + native_token: self.native_token(), tx_code_path: PathBuf::from(TX_BOND_WASM), } } /// Make a Unbond builder from the given minimum set of arguments - async fn new_unbond( + fn new_unbond( &self, validator: Address, amount: token::Amount, @@ -172,13 +170,13 @@ pub trait Namada<'a>: Sized { validator, amount, source: None, - tx: self.tx_builder().await, + tx: self.tx_builder(), tx_code_path: PathBuf::from(TX_UNBOND_WASM), } } /// Make a TxIbcTransfer builder from the given minimum set of arguments - async fn new_ibc_transfer( + fn new_ibc_transfer( &self, source: Address, receiver: String, @@ -196,41 +194,38 @@ pub trait Namada<'a>: Sized { timeout_height: None, timeout_sec_offset: None, memo: None, - tx: self.tx_builder().await, + tx: self.tx_builder(), tx_code_path: PathBuf::from(TX_IBC_WASM), } } /// Make a InitProposal builder from the given minimum set of arguments - async fn new_init_proposal( - &self, - proposal_data: Vec, - ) -> args::InitProposal { + fn new_init_proposal(&self, proposal_data: Vec) -> args::InitProposal { args::InitProposal { proposal_data, - native_token: self.native_token().await, + native_token: self.native_token(), is_offline: false, is_pgf_stewards: false, is_pgf_funding: false, tx_code_path: PathBuf::from(TX_INIT_PROPOSAL), - tx: self.tx_builder().await, + tx: self.tx_builder(), } } /// Make a TxUpdateAccount builder from the given minimum set of arguments - async fn new_update_account(&self, addr: Address) -> args::TxUpdateAccount { + fn new_update_account(&self, addr: Address) -> args::TxUpdateAccount { args::TxUpdateAccount { addr, vp_code_path: None, public_keys: vec![], threshold: None, tx_code_path: PathBuf::from(TX_UPDATE_ACCOUNT_WASM), - tx: self.tx_builder().await, + tx: self.tx_builder(), } } /// Make a VoteProposal builder from the given minimum set of arguments - async fn new_vote_prposal( + fn new_vote_prposal( &self, vote: String, voter: Address, @@ -242,13 +237,13 @@ pub trait Namada<'a>: Sized { is_offline: false, proposal_data: None, tx_code_path: PathBuf::from(TX_VOTE_PROPOSAL), - tx: self.tx_builder().await, + tx: self.tx_builder(), } } /// Make a CommissionRateChange builder from the given minimum set of /// arguments - async fn new_change_commission_rate( + fn new_change_commission_rate( &self, rate: Dec, validator: Address, @@ -257,12 +252,12 @@ pub trait Namada<'a>: Sized { rate, validator, tx_code_path: PathBuf::from(TX_CHANGE_COMMISSION_WASM), - tx: self.tx_builder().await, + tx: self.tx_builder(), } } /// Make a TxInitValidator builder from the given minimum set of arguments - async fn new_init_validator( + fn new_init_validator( &self, commission_rate: Dec, max_commission_rate_change: Dec, @@ -280,34 +275,34 @@ pub trait Namada<'a>: Sized { validator_vp_code_path: PathBuf::from(VP_USER_WASM), unsafe_dont_encrypt: false, tx_code_path: PathBuf::from(TX_INIT_VALIDATOR_WASM), - tx: self.tx_builder().await, + tx: self.tx_builder(), } } /// Make a TxUnjailValidator builder from the given minimum set of arguments - async fn new_unjail_validator( + fn new_unjail_validator( &self, validator: Address, ) -> args::TxUnjailValidator { args::TxUnjailValidator { validator, tx_code_path: PathBuf::from(TX_UNJAIL_VALIDATOR_WASM), - tx: self.tx_builder().await, + tx: self.tx_builder(), } } /// Make a Withdraw builder from the given minimum set of arguments - async fn new_withdraw(&self, validator: Address) -> args::Withdraw { + fn new_withdraw(&self, validator: Address) -> args::Withdraw { args::Withdraw { validator, source: None, tx_code_path: PathBuf::from(TX_WITHDRAW_WASM), - tx: self.tx_builder().await, + tx: self.tx_builder(), } } /// Make a Withdraw builder from the given minimum set of arguments - async fn new_add_erc20_transfer( + fn new_add_erc20_transfer( &self, sender: Address, recipient: EthAddress, @@ -324,28 +319,25 @@ pub trait Namada<'a>: Sized { denom: NATIVE_MAX_DECIMAL_PLACES.into(), }), fee_payer: None, - fee_token: self.native_token().await, + fee_token: self.native_token(), nut: false, code_path: PathBuf::from(TX_BRIDGE_POOL_WASM), - tx: self.tx_builder().await, + tx: self.tx_builder(), } } /// Make a ResignSteward builder from the given minimum set of arguments - async fn new_resign_steward( - &self, - steward: Address, - ) -> args::ResignSteward { + fn new_resign_steward(&self, steward: Address) -> args::ResignSteward { args::ResignSteward { steward, - tx: self.tx_builder().await, + tx: self.tx_builder(), tx_code_path: PathBuf::from(TX_RESIGN_STEWARD), } } /// Make a UpdateStewardCommission builder from the given minimum set of /// arguments - async fn new_update_steward_rewards( + fn new_update_steward_rewards( &self, steward: Address, commission: Vec, @@ -353,16 +345,16 @@ pub trait Namada<'a>: Sized { args::UpdateStewardCommission { steward, commission, - tx: self.tx_builder().await, + tx: self.tx_builder(), tx_code_path: PathBuf::from(TX_UPDATE_STEWARD_COMMISSION), } } /// Make a TxCustom builder from the given minimum set of arguments - async fn new_custom(&self, owner: Address) -> args::TxCustom { + fn new_custom(&self, owner: Address) -> args::TxCustom { args::TxCustom { owner, - tx: self.tx_builder().await, + tx: self.tx_builder(), code_path: None, data_path: None, serialized_tx: None, @@ -405,13 +397,12 @@ where pub shielded: RwLock<&'a mut ShieldedContext>, /// Captures the input/output streams used by this object pub io: &'a I, + /// The address of the native token + native_token: Address, /// The default builder for a Tx prototype: args::Tx, } -/// The Namada token -pub const NAM: &str = "atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5"; - impl<'a, C, U, V, I> NamadaImpl<'a, C, U, V, I> where C: crate::ledger::queries::Client + Sync, @@ -419,18 +410,20 @@ where V: ShieldedUtils, I: Io, { - /// Construct a new Namada context - pub fn new( + /// Construct a new Namada context with the given native token address + pub fn native_new( client: &'a C, wallet: &'a mut Wallet, shielded: &'a mut ShieldedContext, io: &'a I, + native_token: Address, ) -> Self { - Self { + NamadaImpl { client, wallet: RwLock::new(wallet), shielded: RwLock::new(shielded), io, + native_token: native_token.clone(), prototype: args::Tx { dry_run: false, dry_run_wrapper: false, @@ -443,7 +436,7 @@ where wallet_alias_force: false, fee_amount: None, wrapper_fee_payer: None, - fee_token: Address::from_str(NAM).unwrap(), + fee_token: native_token, fee_unshield: None, gas_limit: GasLimit::from(20_000), expiration: None, @@ -457,6 +450,23 @@ where }, } } + + /// Construct a new Namada context looking up the native token address + pub async fn new( + client: &'a C, + wallet: &'a mut Wallet, + shielded: &'a mut ShieldedContext, + io: &'a I, + ) -> crate::sdk::error::Result> { + let native_token = query_native_token(client).await?; + Ok(NamadaImpl::native_new( + client, + wallet, + shielded, + io, + native_token, + )) + } } #[async_trait::async_trait(?Send)] @@ -473,12 +483,12 @@ where type WalletUtils = U; /// Obtain the prototypical Tx builder - async fn tx_builder(&self) -> args::Tx { + fn tx_builder(&self) -> args::Tx { self.prototype.clone() } - async fn native_token(&self) -> Address { - Address::from_str(NAM).unwrap() + fn native_token(&self) -> Address { + self.native_token.clone() } fn io(&self) -> &'a Self::Io { diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index a766846916..a9f272839f 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -43,6 +43,9 @@ router! {SHELL, // Epoch of the last committed block ( "epoch" ) -> Epoch = epoch, + // The address of the native token + ( "native_token" ) -> Address = native_token, + // Epoch of the input block height ( "epoch_at_height" / [height: BlockHeight]) -> Option = epoch_at_height, @@ -288,6 +291,15 @@ where Ok(data) } +fn native_token(ctx: RequestCtx<'_, D, H>) -> storage_api::Result
+where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let data = ctx.wl_storage.storage.native_token.clone(); + Ok(data) +} + fn epoch_at_height( ctx: RequestCtx<'_, D, H>, height: BlockHeight, diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index 0853dc251b..d7556d6321 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -530,7 +530,7 @@ impl InitProposal { e.to_string(), ) })?; - let nam_address = context.native_token().await; + let nam_address = context.native_token(); let author_balance = rpc::get_token_balance( context.client(), &nam_address, @@ -558,7 +558,7 @@ impl InitProposal { e.to_string(), ) })?; - let nam_address = context.native_token().await; + let nam_address = context.native_token(); let author_balance = rpc::get_token_balance( context.client(), &nam_address, diff --git a/shared/src/sdk/rpc.rs b/shared/src/sdk/rpc.rs index eb8ebd8a11..e7da6dcbae 100644 --- a/shared/src/sdk/rpc.rs +++ b/shared/src/sdk/rpc.rs @@ -103,6 +103,13 @@ pub async fn query_epoch( convert_response::(RPC.shell().epoch(client).await) } +/// Query the address of the native token +pub async fn query_native_token( + client: &C, +) -> Result { + convert_response::(RPC.shell().native_token(client).await) +} + /// Query the epoch of the given block height, if it exists. /// Will return none if the input block height is greater than /// the latest committed block height. diff --git a/shared/src/types/io.rs b/shared/src/types/io.rs index f100ca8433..248f6f91d9 100644 --- a/shared/src/types/io.rs +++ b/shared/src/types/io.rs @@ -2,28 +2,26 @@ //! generic IO. The defaults are the obvious Rust native //! functions. -/// Rust native I/O handling. -pub struct StdIo; - +/// A trait that abstracts out I/O operations #[async_trait::async_trait(?Send)] -impl Io for StdIo {} - -#[async_trait::async_trait(?Send)] -#[allow(missing_docs)] pub trait Io { + /// Print the given string fn print(&self, output: impl AsRef) { print!("{}", output.as_ref()); } + /// Flush the output fn flush(&self) { use std::io::Write; std::io::stdout().flush().unwrap(); } + /// Print the given string with a newline fn println(&self, output: impl AsRef) { println!("{}", output.as_ref()); } + /// Print the given string into the given Writer fn write( &self, mut writer: W, @@ -32,6 +30,7 @@ pub trait Io { write!(writer, "{}", output.as_ref()) } + /// Print the given string into the given Writer and terminate with newline fn writeln( &self, mut writer: W, @@ -40,10 +39,12 @@ pub trait Io { writeln!(writer, "{}", output.as_ref()) } + /// Print the given error string fn eprintln(&self, output: impl AsRef) { eprintln!("{}", output.as_ref()); } + /// Read a string from input async fn read(&self) -> std::io::Result { #[cfg(not(target_family = "wasm"))] { @@ -55,6 +56,7 @@ pub trait Io { } } + /// Display the given prompt and return the string input async fn prompt(&self, question: impl AsRef) -> String { #[cfg(not(target_family = "wasm"))] { @@ -76,6 +78,50 @@ pub trait Io { } } +/// Rust native I/O handling. +pub struct StdIo; + +#[async_trait::async_trait(?Send)] +impl Io for StdIo {} + +/// Ignores all I/O operations. +pub struct NullIo; + +#[async_trait::async_trait(?Send)] +impl Io for NullIo { + fn print(&self, _output: impl AsRef) {} + + fn flush(&self) {} + + fn println(&self, _output: impl AsRef) {} + + fn write( + &self, + mut _writer: W, + _output: impl AsRef, + ) -> std::io::Result<()> { + Ok(()) + } + + fn writeln( + &self, + mut _writer: W, + _output: impl AsRef, + ) -> std::io::Result<()> { + Ok(()) + } + + fn eprintln(&self, _output: impl AsRef) {} + + async fn read(&self) -> std::io::Result { + panic!("Unsupported operation") + } + + async fn prompt(&self, _question: impl AsRef) -> String { + panic!("Unsupported operation") + } +} + /// A generic function for displaying a prompt to users and reading /// in their response. #[cfg(not(target_family = "wasm"))] From 9aa1d4518170396c729f8d3ec198aff83d2d0ef7 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 3 Oct 2023 10:03:57 +0100 Subject: [PATCH 075/161] Fix transmitting protocol txs if validator node --- apps/src/lib/node/ledger/shell/mod.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index a1c17fe450..5adbf14901 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -825,7 +825,15 @@ where ); response.data = root.0.to_vec(); - // validator specific actions + self.bump_last_processed_eth_block(); + self.broadcast_queued_txs(); + + response + } + + /// Updates the Ethereum oracle's last processed block. + #[inline] + fn bump_last_processed_eth_block(&mut self) { if let ShellMode::Validator { eth_oracle: Some(eth_oracle), .. @@ -851,20 +859,17 @@ where blocks" ), } - - // broadcast any queued txs - self.broadcast_queued_txs(); } - - response } /// Empties all the ledger's queues of transactions to be broadcasted /// via CometBFT's P2P network. #[inline] fn broadcast_queued_txs(&mut self) { - self.broadcast_protocol_txs(); - self.broadcast_expired_txs(); + if let ShellMode::Validator { .. } = &self.mode { + self.broadcast_protocol_txs(); + self.broadcast_expired_txs(); + } } /// Broadcast any pending protocol transactions. From 25ffc677c34a8568064a1108fed7fbec77e5aa92 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 3 Oct 2023 15:08:53 +0100 Subject: [PATCH 076/161] Allow shutting off the Ethereum oracle in tests --- apps/src/lib/node/ledger/shell/finalize_block.rs | 3 +++ apps/src/lib/node/ledger/shell/mod.rs | 14 ++++++++++++-- apps/src/lib/node/ledger/shell/prepare_proposal.rs | 1 + 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..b7063d2631 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -1982,6 +1982,7 @@ mod test_finalize_block { let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { last_height: 0, num_validators: 4, + ..Default::default() }); let mut validator_set: BTreeSet = @@ -2651,6 +2652,7 @@ mod test_finalize_block { let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { last_height: 0, num_validators, + ..Default::default() }); let mut params = read_pos_params(&shell.wl_storage).unwrap(); params.unbonding_len = 4; @@ -3029,6 +3031,7 @@ mod test_finalize_block { let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { last_height: 0, num_validators, + ..Default::default() }); let mut params = read_pos_params(&shell.wl_storage).unwrap(); params.unbonding_len = 4; diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 5adbf14901..f162b52490 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -1872,6 +1872,8 @@ mod test_utils { /// The number of validators to configure // in `InitChain`. pub num_validators: u64, + /// Whether to enable the Ethereum oracle or not. + pub enable_ethereum_oracle: bool, } impl Default for SetupCfg { @@ -1879,6 +1881,7 @@ mod test_utils { Self { last_height: H::default(), num_validators: 1, + enable_ethereum_oracle: true, } } } @@ -1890,6 +1893,7 @@ mod test_utils { SetupCfg { last_height, num_validators, + enable_ethereum_oracle, }: SetupCfg, ) -> ( TestShell, @@ -1897,8 +1901,14 @@ mod test_utils { Sender, Receiver, ) { - let (mut test, receiver, eth_receiver, control_receiver) = + let (mut test, receiver, eth_sender, control_receiver) = TestShell::new_at_height(last_height); + if !enable_ethereum_oracle { + if let ShellMode::Validator { eth_oracle, .. } = &mut test.mode { + // drop the eth oracle event receiver + _ = eth_oracle.take(); + } + } test.init_chain( RequestInitChain { time: Some(Timestamp { @@ -1911,7 +1921,7 @@ mod test_utils { num_validators, ); test.wl_storage.commit_block().expect("Test failed"); - (test, receiver, eth_receiver, control_receiver) + (test, receiver, eth_sender, control_receiver) } /// Same as [`setup_at_height`], but returns a shell at the given block diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 3687a6d39b..198ddb8b30 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -879,6 +879,7 @@ mod test_prepare_proposal { test_utils::setup_with_cfg(test_utils::SetupCfg { last_height: FIRST_HEIGHT, num_validators: 2, + ..Default::default() }); let params = shell.wl_storage.pos_queries().get_pos_params(); From 9d5b454fac329875f234d708de0c092bbd01e095 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 3 Oct 2023 15:09:14 +0100 Subject: [PATCH 077/161] Add test_broadcast_valset_upd_inspite_oracle_off() unit test --- apps/src/lib/node/ledger/shell/mod.rs | 60 ++++++++++++++++++++------- 1 file changed, 45 insertions(+), 15 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index f162b52490..e5b10a9642 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -2141,23 +2141,64 @@ mod test_utils { } } -#[cfg(all(test, not(feature = "abcipp")))] -mod abciplus_mempool_tests { +#[cfg(test)] +mod shell_tests { use namada::proto::{ - Data, Section, SignableEthMessage, Signature, Signed, Tx, + Code, Data, Section, SignableEthMessage, Signature, Signed, Tx, }; use namada::types::ethereum_events::EthereumEvent; use namada::types::key::RefTo; - use namada::types::storage::BlockHeight; + use namada::types::storage::{BlockHeight, Epoch}; use namada::types::transaction::protocol::{ ethereum_tx_data_variants, ProtocolTx, ProtocolTxType, }; + use namada::types::transaction::{Fee, WrapperTx}; use namada::types::vote_extensions::{bridge_pool_roots, ethereum_events}; use super::*; use crate::node::ledger::shell::test_utils; use crate::wallet; + const GAS_LIMIT_MULTIPLIER: u64 = 100_000; + + /// Check that the shell broadcasts validator set updates, + /// even when the Ethereum oracle is not running (e.g. + /// because the bridge is disabled). + #[tokio::test] + async fn test_broadcast_valset_upd_inspite_oracle_off() { + // this height should result in a validator set + // update being broadcasted + let (mut shell, mut broadcaster_rx, _, _) = + test_utils::setup_with_cfg(test_utils::SetupCfg { + last_height: 1, + enable_ethereum_oracle: false, + ..Default::default() + }); + + // broadcast validator set update + shell.broadcast_protocol_txs(); + + // check data inside tx - it should be a validator set update + // signed at epoch 0 + let signed_valset_upd = loop { + // attempt to receive validator set update + let serialized_tx = tokio::time::timeout( + std::time::Duration::from_secs(1), + async { broadcaster_rx.recv().await.unwrap() }, + ) + .await + .unwrap(); + let tx = Tx::try_from(&serialized_tx[..]).unwrap(); + + match ethereum_tx_data_variants::ValSetUpdateVext::try_from(&tx) { + Ok(signed_valset_upd) => break signed_valset_upd, + Err(_) => continue, + } + }; + + assert_eq!(signed_valset_upd.data.signing_epoch, Epoch(0)); + } + /// Check that broadcasting expired Ethereum events works /// as expected. #[test] @@ -2334,17 +2375,6 @@ mod abciplus_mempool_tests { let rsp = shell.mempool_validate(&tx, Default::default()); assert_eq!(rsp.code, u32::from(ErrorCodes::InvalidVoteExtension)); } -} - -#[cfg(test)] -mod tests { - use namada::proof_of_stake::Epoch; - use namada::proto::{Code, Data, Section, Signature, Tx}; - use namada::types::transaction::{Fee, WrapperTx}; - - use super::*; - - const GAS_LIMIT_MULTIPLIER: u64 = 100_000; /// Mempool validation must reject unsigned wrappers #[test] From 2368969e9f0b300dd71aebd645c30aa9b9ed3a6b Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 3 Oct 2023 10:13:39 +0100 Subject: [PATCH 078/161] Changelog for #1964 --- .changelog/unreleased/bug-fixes/1964-fix-protocol-txs.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/bug-fixes/1964-fix-protocol-txs.md diff --git a/.changelog/unreleased/bug-fixes/1964-fix-protocol-txs.md b/.changelog/unreleased/bug-fixes/1964-fix-protocol-txs.md new file mode 100644 index 0000000000..6c39e70d2e --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1964-fix-protocol-txs.md @@ -0,0 +1,2 @@ +- Fix broadcasting logic for protocol txs when a node operating the network is a + validator ([\#1964](https://github.com/anoma/namada/pull/1964)) \ No newline at end of file From af011f6fb2f44452a7beccb97047e6f02d253798 Mon Sep 17 00:00:00 2001 From: yito88 Date: Mon, 9 Oct 2023 17:40:52 +0200 Subject: [PATCH 079/161] fix error handling --- shared/src/sdk/tx.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 16aa79170a..2e506d6a63 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -2029,8 +2029,11 @@ pub async fn gen_ibc_shielded_transfer< .await?; let ibc_denom = rpc::query_ibc_denom::<_, IO>(client, &args.token, Some(&source)).await; + let prefixed_denom = ibc_denom + .parse() + .map_err(|_| Error::Other(format!("Invalid IBC denom: {ibc_denom}")))?; let token = namada_core::ledger::ibc::received_ibc_token( - &ibc_denom.parse().expect("Invalid IBC denom"), + &prefixed_denom, &src_port_id, &src_channel_id, &args.port_id, @@ -2040,9 +2043,7 @@ pub async fn gen_ibc_shielded_transfer< Error::Other(format!("Getting IBC Token failed: error {e}")) })?; let validated_amount = - validate_amount::<_, IO>(client, args.amount, &token, false) - .await - .expect("expected to validate amount"); + validate_amount::<_, IO>(client, args.amount, &token, false).await?; let shielded_transfer = shielded .gen_shielded_transfer::<_, IO>( From 66a756761a8144c3b88e0d75d6378bf1c2c74c30 Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 11 Oct 2023 11:18:34 -0400 Subject: [PATCH 080/161] check if rate > 1 in lib code --- proof_of_stake/src/lib.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 9051aae725..2c3bf37f6b 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -176,6 +176,10 @@ pub enum SlashError { pub enum CommissionRateChangeError { #[error("Unexpected negative commission rate {0} for validator {1}")] NegativeRate(Dec, Address), + #[error( + "Unexpected commission rate {0} larger than 1.0 for validator {1}" + )] + LargerThanOne(Dec, Address), #[error("Rate change of {0} is too large for validator {1}")] RateChangeTooLarge(Dec, Address), #[error( @@ -2363,6 +2367,14 @@ where .into()); } + if new_rate > Dec::one() { + return Err(CommissionRateChangeError::LargerThanOne( + new_rate, + validator.clone(), + ) + .into()); + } + let max_change = read_validator_max_commission_rate_change(storage, validator)?; if max_change.is_none() { From 78ccf429914bbd77aa5411cb78697a3df6936c1a Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 11 Oct 2023 18:14:37 +0200 Subject: [PATCH 081/161] Writes the result of a governance proposal in storage --- apps/src/lib/node/ledger/shell/governance.rs | 4 ++++ core/src/ledger/governance/storage/keys.rs | 10 ++++++++++ core/src/ledger/governance/utils.rs | 2 ++ 3 files changed, 16 insertions(+) diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index cc79c9a9f0..712a1a6944 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -74,6 +74,10 @@ where )?; let proposal_result = compute_proposal_result(votes, total_voting_power, tally_type); + let proposal_result_key = gov_storage::get_proposal_result_key(id); + shell + .wl_storage + .write(&proposal_result_key, proposal_result)?; let transfer_address = match proposal_result.result { TallyResult::Passed => { diff --git a/core/src/ledger/governance/storage/keys.rs b/core/src/ledger/governance/storage/keys.rs index a975b6541f..92beb9da36 100644 --- a/core/src/ledger/governance/storage/keys.rs +++ b/core/src/ledger/governance/storage/keys.rs @@ -26,6 +26,7 @@ struct Keys { min_grace_epoch: &'static str, counter: &'static str, pending: &'static str, + result: &'static str, } /// Check if key is inside governance address space @@ -459,6 +460,15 @@ pub fn get_proposal_execution_key(id: u64) -> Key { .expect("Cannot obtain a storage key") } +/// Get the proposal result key +pub fn get_proposal_result_key(id: u64) -> Key { + proposal_prefix() + .push(&id.to_string()) + .expect("Cannot obtain a storage key") + .push(&Keys::VALUES.result.to_owned()) + .expect("Cannot obtain a storage key") +} + /// Get proposal id from key pub fn get_proposal_id(key: &Key) -> Option { match key.get_at(2) { diff --git a/core/src/ledger/governance/utils.rs b/core/src/ledger/governance/utils.rs index 1a4bf0fc7c..33f032def1 100644 --- a/core/src/ledger/governance/utils.rs +++ b/core/src/ledger/governance/utils.rs @@ -75,6 +75,7 @@ impl TallyType { } /// The result of a proposal +#[derive(Copy, Clone, BorshSerialize, BorshDeserialize)] pub enum TallyResult { /// Proposal was accepted with the associated value Passed, @@ -126,6 +127,7 @@ impl TallyResult { } /// The result with votes of a proposal +#[derive(Clone, Copy, BorshDeserialize, BorshSerialize)] pub struct ProposalResult { /// The result of a proposal pub result: TallyResult, From 464d93a3d907f0c1cddd10d9616ab3e6c5eda0af Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 11 Oct 2023 19:02:32 +0200 Subject: [PATCH 082/161] Client first looks for governance proposal result in storage --- apps/src/lib/client/rpc.rs | 46 +++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index d750ccc759..22fb627860 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -1045,23 +1045,39 @@ pub async fn query_proposal_result< return; }; - let is_author_steward = query_pgf_stewards(client) - .await - .iter() - .any(|steward| steward.address.eq(&proposal.author)); - let tally_type = proposal.get_tally_type(is_author_steward); - let total_voting_power = - get_total_staked_tokens(client, proposal.voting_end_epoch).await; + let proposal_result_key = + governance_storage::get_proposal_result_key(proposal_id); + let proposal_result = + // Try to directly query the result in storage first + match query_storage_value(client, &proposal_result_key).await { + Ok(result) => result, + Err(_) => { + // If failure, run the tally + let is_author_steward = query_pgf_stewards(client) + .await + .iter() + .any(|steward| steward.address.eq(&proposal.author)); + let tally_type = proposal.get_tally_type(is_author_steward); + let total_voting_power = get_total_staked_tokens( + client, + proposal.voting_end_epoch, + ) + .await; - let votes = compute_proposal_votes( - client, - proposal_id, - proposal.voting_end_epoch, - ) - .await; + let votes = compute_proposal_votes( + client, + proposal_id, + proposal.voting_end_epoch, + ) + .await; - let proposal_result = - compute_proposal_result(votes, total_voting_power, tally_type); + compute_proposal_result( + votes, + total_voting_power, + tally_type, + ) + } + }; display_line!(IO, "Proposal Id: {} ", proposal_id); display_line!(IO, "{:4}{}", "", proposal_result); From 79c85cbcaf8a22299a54b26f32aa33c05d2366bf Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 11 Oct 2023 19:32:35 +0200 Subject: [PATCH 083/161] Changelog #1979 --- .../unreleased/improvements/1979-proposal-result-in-storage.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1979-proposal-result-in-storage.md diff --git a/.changelog/unreleased/improvements/1979-proposal-result-in-storage.md b/.changelog/unreleased/improvements/1979-proposal-result-in-storage.md new file mode 100644 index 0000000000..1eb15aa889 --- /dev/null +++ b/.changelog/unreleased/improvements/1979-proposal-result-in-storage.md @@ -0,0 +1,2 @@ +- Persist the results of governance proposals in storage to allow recovering old + results. ([\#1979](https://github.com/anoma/namada/pull/1979)) \ No newline at end of file From 17c3e6cdb767ed907e8519b75ab6eff536e86288 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Wed, 11 Oct 2023 10:05:02 +0200 Subject: [PATCH 084/161] Separated an SDK crate out of the shared crate. --- Cargo.lock | 51 ++ Cargo.toml | 1 + apps/Cargo.toml | 3 +- apps/src/lib/cli.rs | 2 +- apps/src/lib/cli/api.rs | 4 +- apps/src/lib/cli/client.rs | 5 +- apps/src/lib/cli/context.rs | 8 +- apps/src/lib/cli/relayer.rs | 2 +- apps/src/lib/cli/wallet.rs | 12 +- apps/src/lib/client/rpc.rs | 66 +- apps/src/lib/client/tx.rs | 7 +- apps/src/lib/client/utils.rs | 2 +- apps/src/lib/config/genesis.rs | 8 +- .../lib/node/ledger/ethereum_oracle/mod.rs | 2 +- .../lib/node/ledger/shell/finalize_block.rs | 2 +- apps/src/lib/node/ledger/shell/init_chain.rs | 2 +- apps/src/lib/node/ledger/shell/mod.rs | 2 +- .../lib/node/ledger/shell/process_proposal.rs | 2 +- apps/src/lib/node/ledger/shell/queries.rs | 9 +- .../src/lib/node/ledger/shell/testing/node.rs | 10 +- .../lib/node/ledger/shell/vote_extensions.rs | 2 +- .../shell/vote_extensions/bridge_pool_vext.rs | 4 +- .../shell/vote_extensions/eth_events.rs | 2 +- .../shell/vote_extensions/val_set_update.rs | 2 +- apps/src/lib/wallet/cli_utils.rs | 4 +- apps/src/lib/wallet/defaults.rs | 8 +- apps/src/lib/wallet/mod.rs | 16 +- apps/src/lib/wallet/pre_genesis.rs | 6 +- apps/src/lib/wallet/store.rs | 10 +- benches/Cargo.toml | 1 + benches/lib.rs | 23 +- sdk/Cargo.toml | 121 +++ {shared/src/sdk => sdk/src}/args.rs | 133 +-- .../src/control_flow/mod.rs | 0 .../types => sdk/src}/control_flow/time.rs | 0 {shared/src/sdk => sdk/src}/error.rs | 6 +- .../src}/eth_bridge/bridge_pool.rs | 55 +- .../src/eth_bridge/mod.rs | 9 +- .../src}/eth_bridge/validator_set.rs | 18 +- {shared/src/ledger => sdk/src}/events/log.rs | 7 +- .../src}/events/log/dumb_queries.rs | 9 +- .../ledger/events.rs => sdk/src/events/mod.rs | 20 +- {shared/src/types => sdk/src}/io.rs | 0 sdk/src/lib.rs | 560 ++++++++++++ {shared/src/sdk => sdk/src}/masp.rs | 38 +- .../sdk/queries.rs => sdk/src/queries/mod.rs | 205 ++++- .../src/ledger => sdk/src}/queries/router.rs | 101 +-- .../src/ledger => sdk/src}/queries/shell.rs | 315 ++----- .../src}/queries/shell/eth_bridge.rs | 88 +- .../src/ledger => sdk/src}/queries/types.rs | 29 +- .../src}/queries/vp/governance.rs | 20 +- .../src/ledger => sdk/src}/queries/vp/mod.rs | 0 .../src/ledger => sdk/src}/queries/vp/pgf.rs | 24 +- .../src/ledger => sdk/src}/queries/vp/pos.rs | 107 +-- .../ledger => sdk/src}/queries/vp/token.rs | 13 +- {shared/src/sdk => sdk/src}/rpc.rs | 131 ++- {shared/src/sdk => sdk/src}/signing.rs | 43 +- {shared/src/sdk => sdk/src}/tx.rs | 86 +- {shared/src/sdk => sdk/src}/wallet/alias.rs | 0 .../sdk => sdk/src}/wallet/derivation_path.rs | 6 +- {shared/src/sdk => sdk/src}/wallet/keys.rs | 2 +- {shared/src/sdk => sdk/src}/wallet/mod.rs | 10 +- .../src/sdk => sdk/src}/wallet/pre_genesis.rs | 6 +- {shared/src/sdk => sdk/src}/wallet/store.rs | 14 +- shared/Cargo.toml | 16 +- shared/src/ledger/governance/utils.rs | 11 + shared/src/ledger/mod.rs | 849 +++++++----------- shared/src/ledger/queries/mod.rs | 222 ----- shared/src/lib.rs | 2 +- shared/src/sdk/mod.rs | 12 - shared/src/types/mod.rs | 4 +- shared/src/vm/host_env.rs | 2 +- tests/Cargo.toml | 1 + tests/src/e2e/ledger_tests.rs | 2 +- tests/src/integration/masp.rs | 2 +- tests/src/native_vp/eth_bridge_pool.rs | 8 +- wasm/Cargo.lock | 45 + wasm_for_tests/wasm_source/Cargo.lock | 45 + 78 files changed, 2003 insertions(+), 1672 deletions(-) create mode 100644 sdk/Cargo.toml rename {shared/src/sdk => sdk/src}/args.rs (95%) rename shared/src/types/control_flow.rs => sdk/src/control_flow/mod.rs (100%) rename {shared/src/types => sdk/src}/control_flow/time.rs (100%) rename {shared/src/sdk => sdk/src}/error.rs (97%) rename {shared/src/ledger => sdk/src}/eth_bridge/bridge_pool.rs (97%) rename shared/src/ledger/eth_bridge.rs => sdk/src/eth_bridge/mod.rs (95%) rename {shared/src/ledger => sdk/src}/eth_bridge/validator_set.rs (98%) rename {shared/src/ledger => sdk/src}/events/log.rs (97%) rename {shared/src/ledger => sdk/src}/events/log/dumb_queries.rs (96%) rename shared/src/ledger/events.rs => sdk/src/events/mod.rs (94%) rename {shared/src/types => sdk/src}/io.rs (100%) create mode 100644 sdk/src/lib.rs rename {shared/src/sdk => sdk/src}/masp.rs (99%) rename shared/src/sdk/queries.rs => sdk/src/queries/mod.rs (59%) rename {shared/src/ledger => sdk/src}/queries/router.rs (92%) rename {shared/src/ledger => sdk/src}/queries/shell.rs (58%) rename {shared/src/ledger => sdk/src}/queries/shell/eth_bridge.rs (96%) rename {shared/src/ledger => sdk/src}/queries/types.rs (88%) rename {shared/src/ledger => sdk/src}/queries/vp/governance.rs (75%) rename {shared/src/ledger => sdk/src}/queries/vp/mod.rs (100%) rename {shared/src/ledger => sdk/src}/queries/vp/pgf.rs (76%) rename {shared/src/ledger => sdk/src}/queries/vp/pos.rs (90%) rename {shared/src/ledger => sdk/src}/queries/vp/token.rs (87%) rename {shared/src/sdk => sdk/src}/rpc.rs (89%) rename {shared/src/sdk => sdk/src}/signing.rs (98%) rename {shared/src/sdk => sdk/src}/tx.rs (96%) rename {shared/src/sdk => sdk/src}/wallet/alias.rs (100%) rename {shared/src/sdk => sdk/src}/wallet/derivation_path.rs (98%) rename {shared/src/sdk => sdk/src}/wallet/keys.rs (99%) rename {shared/src/sdk => sdk/src}/wallet/mod.rs (99%) rename {shared/src/sdk => sdk/src}/wallet/pre_genesis.rs (96%) rename {shared/src/sdk => sdk/src}/wallet/store.rs (99%) delete mode 100644 shared/src/ledger/queries/mod.rs delete mode 100644 shared/src/sdk/mod.rs diff --git a/Cargo.lock b/Cargo.lock index d8303e37f0..63ae872e56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4036,6 +4036,7 @@ dependencies = [ "namada_core", "namada_ethereum_bridge", "namada_proof_of_stake", + "namada_sdk", "namada_test_utils", "num256", "orion", @@ -4117,6 +4118,7 @@ dependencies = [ "masp_primitives", "masp_proofs", "namada", + "namada_sdk", "namada_test_utils", "num-derive", "num-rational 0.4.1", @@ -4176,6 +4178,7 @@ dependencies = [ "masp_proofs", "namada", "namada_apps", + "namada_sdk", "namada_test_utils", "prost", "rand 0.8.5", @@ -4306,6 +4309,53 @@ dependencies = [ "tracing-subscriber 0.3.17", ] +[[package]] +name = "namada_sdk" +version = "0.23.0" +dependencies = [ + "assert_matches", + "async-trait", + "bimap", + "borsh 0.9.4", + "circular-queue", + "data-encoding", + "derivation-path", + "ethbridge-bridge-contract", + "ethers", + "fd-lock", + "futures", + "itertools", + "masp_primitives", + "masp_proofs", + "namada_core", + "namada_ethereum_bridge", + "namada_proof_of_stake", + "namada_test_utils", + "num256", + "orion", + "owo-colors 3.5.0", + "parse_duration", + "paste", + "prost", + "rand 0.8.5", + "rand_core 0.6.4", + "ripemd", + "serde 1.0.163", + "serde_json", + "sha2 0.9.9", + "slip10_ed25519", + "tempfile", + "tendermint-rpc", + "thiserror", + "tiny-bip39", + "tiny-hderive", + "tokio", + "toml 0.5.9", + "tracing 0.1.37", + "wasmtimer", + "zeroize", +] + [[package]] name = "namada_test_utils" version = "0.23.0" @@ -4339,6 +4389,7 @@ dependencies = [ "namada", "namada_apps", "namada_core", + "namada_sdk", "namada_test_utils", "namada_tx_prelude", "namada_vp_prelude", diff --git a/Cargo.toml b/Cargo.toml index 9c731f0fdf..813cf99d9a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ members = [ "macros", "vp_prelude", "encoding_spec", + "sdk", ] # wasm packages have to be built separately diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 1d33f55df7..02bcf60373 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -55,7 +55,7 @@ mainnet = [ "namada/mainnet", ] dev = ["namada/dev"] -std = ["ed25519-consensus/std", "rand/std", "rand_core/std", "namada/std"] +std = ["ed25519-consensus/std", "rand/std", "rand_core/std", "namada/std", "namada_sdk/std"] # for integration tests and test utilies testing = ["dev"] @@ -67,6 +67,7 @@ abciplus = [ [dependencies] namada = {path = "../shared", features = ["ferveo-tpke", "masp-tx-gen", "multicore", "http-client"]} +namada_sdk = {path = "../sdk", default-features = false, features = ["wasm-runtime", "masp-tx-gen"]} ark-serialize.workspace = true ark-std.workspace = true arse-merkle-tree = { workspace = true, features = ["blake2b"] } diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index ce5bf600b5..421ada0e69 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -2517,7 +2517,6 @@ pub mod args { use std::str::FromStr; use namada::ibc::core::ics24_host::identifier::{ChannelId, PortId}; - pub use namada::sdk::args::*; use namada::types::address::Address; use namada::types::chain::{ChainId, ChainIdPrefix}; use namada::types::dec::Dec; @@ -2530,6 +2529,7 @@ pub mod args { use namada::types::token; use namada::types::token::NATIVE_MAX_DECIMAL_PLACES; use namada::types::transaction::GasLimit; + pub use namada_sdk::args::*; use super::context::*; use super::utils::*; diff --git a/apps/src/lib/cli/api.rs b/apps/src/lib/cli/api.rs index 1b6851f3a9..79c8be3fa9 100644 --- a/apps/src/lib/cli/api.rs +++ b/apps/src/lib/cli/api.rs @@ -1,8 +1,8 @@ -use namada::sdk::queries::Client; -use namada::sdk::rpc::wait_until_node_is_synched; use namada::tendermint_rpc::HttpClient; use namada::types::control_flow::Halt; use namada::types::io::Io; +use namada_sdk::queries::Client; +use namada_sdk::rpc::wait_until_node_is_synched; use tendermint_config::net::Address as TendermintAddress; use crate::client::utils; diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index a342e9ef25..977442b9cb 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -1,9 +1,8 @@ use color_eyre::eyre::{eyre, Report, Result}; -use namada::ledger::{Namada, NamadaImpl}; -use namada::sdk::signing; -use namada::sdk::tx::dump_tx; use namada::types::control_flow::ProceedOrElse; use namada::types::io::Io; +use namada_sdk::tx::dump_tx; +use namada_sdk::{signing, Namada, NamadaImpl}; use crate::cli; use crate::cli::api::{CliApi, CliClient}; diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index f6c3399baf..a65d5c1830 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -6,16 +6,16 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use color_eyre::eyre::Result; -use namada::ledger::{Namada, NamadaImpl}; -use namada::sdk::masp::fs::FsShieldedUtils; -use namada::sdk::masp::ShieldedContext; -use namada::sdk::wallet::Wallet; use namada::types::address::{Address, InternalAddress}; use namada::types::chain::ChainId; use namada::types::ethereum_events::EthAddress; use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::*; +use namada_sdk::masp::fs::FsShieldedUtils; +use namada_sdk::masp::ShieldedContext; +use namada_sdk::wallet::Wallet; +use namada_sdk::{Namada, NamadaImpl}; use super::args; #[cfg(any(test, feature = "dev"))] diff --git a/apps/src/lib/cli/relayer.rs b/apps/src/lib/cli/relayer.rs index aadf2d3bda..497c69c819 100644 --- a/apps/src/lib/cli/relayer.rs +++ b/apps/src/lib/cli/relayer.rs @@ -2,9 +2,9 @@ use std::sync::Arc; use color_eyre::eyre::{eyre, Report, Result}; use namada::eth_bridge::ethers::providers::{Http, Provider}; -use namada::ledger::eth_bridge::{bridge_pool, validator_set}; use namada::types::control_flow::ProceedOrElse; use namada::types::io::Io; +use namada_sdk::eth_bridge::{bridge_pool, validator_set}; use crate::cli; use crate::cli::api::{CliApi, CliClient}; diff --git a/apps/src/lib/cli/wallet.rs b/apps/src/lib/cli/wallet.rs index 6247145b84..247835f46b 100644 --- a/apps/src/lib/cli/wallet.rs +++ b/apps/src/lib/cli/wallet.rs @@ -7,15 +7,15 @@ use borsh::BorshSerialize; use color_eyre::eyre::Result; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::sdk::masp::find_valid_diversifier; -use namada::sdk::wallet::{ - DecryptionError, FindKeyError, GenRestoreKeyError, Wallet, WalletIo, - WalletStorage, -}; use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::{MaspValue, PaymentAddress}; -use namada::{display, display_line, edisplay_line}; +use namada_sdk::masp::find_valid_diversifier; +use namada_sdk::wallet::{ + DecryptionError, FindKeyError, GenRestoreKeyError, Wallet, WalletIo, + WalletStorage, +}; +use namada_sdk::{display, display_line, edisplay_line}; use rand::RngCore; use rand_core::OsRng; diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 24b7708f42..d5dc2f23a3 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -33,16 +33,7 @@ use namada::ledger::parameters::{storage as param_storage, EpochDuration}; use namada::ledger::pos::{CommissionPair, PosParams, Slash}; use namada::ledger::queries::RPC; use namada::ledger::storage::ConversionState; -use namada::ledger::Namada; use namada::proof_of_stake::types::{ValidatorState, WeightedValidator}; -use namada::sdk::error; -use namada::sdk::error::{is_pinned_error, Error, PinnedBalanceError}; -use namada::sdk::masp::{Conversions, MaspAmount, MaspChange}; -use namada::sdk::rpc::{ - self, enriched_bonds_and_unbonds, format_denominated_amount, query_epoch, - TxResponse, -}; -use namada::sdk::wallet::AddressVpType; use namada::types::address::{masp, Address}; use namada::types::control_flow::ProceedOrElse; use namada::types::hash::Hash; @@ -52,7 +43,14 @@ use namada::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; use namada::types::storage::{BlockHeight, BlockResults, Epoch, Key, KeySeg}; use namada::types::token::{Change, MaspDenom}; use namada::types::{storage, token}; -use namada::{display, display_line, edisplay_line, prompt}; +use namada_sdk::error::{is_pinned_error, Error, PinnedBalanceError}; +use namada_sdk::masp::{Conversions, MaspAmount, MaspChange}; +use namada_sdk::rpc::{ + self, enriched_bonds_and_unbonds, format_denominated_amount, query_epoch, + TxResponse, +}; +use namada_sdk::wallet::AddressVpType; +use namada_sdk::{display, display_line, edisplay_line, error, prompt, Namada}; use tokio::time::Instant; use crate::cli::{self, args}; @@ -65,7 +63,7 @@ use crate::facade::tendermint_rpc::error::Error as TError; /// error. pub async fn query_tx_status<'a>( namada: &impl Namada<'a>, - status: namada::sdk::rpc::TxEventQuery<'_>, + status: namada_sdk::rpc::TxEventQuery<'_>, deadline: Instant, ) -> Event { rpc::query_tx_status(namada, status, deadline) @@ -82,7 +80,7 @@ pub async fn query_and_print_epoch<'a>(context: &impl Namada<'a>) -> Epoch { /// Query the last committed block pub async fn query_block<'a>(context: &impl Namada<'a>) { - let block = namada::sdk::rpc::query_block(context.client()) + let block = namada_sdk::rpc::query_block(context.client()) .await .unwrap(); match block { @@ -677,7 +675,7 @@ pub async fn query_proposal_by_id( client: &C, proposal_id: u64, ) -> Result, error::Error> { - namada::sdk::rpc::query_proposal_by_id(client, proposal_id).await + namada_sdk::rpc::query_proposal_by_id(client, proposal_id).await } /// Query token shielded balance(s) @@ -1003,7 +1001,7 @@ pub async fn get_token_balance( token: &Address, owner: &Address, ) -> token::Amount { - namada::sdk::rpc::get_token_balance(client, token, owner) + namada_sdk::rpc::get_token_balance(client, token, owner) .await .unwrap() } @@ -2083,7 +2081,7 @@ pub async fn is_validator( client: &C, address: &Address, ) -> bool { - namada::sdk::rpc::is_validator(client, address) + namada_sdk::rpc::is_validator(client, address) .await .unwrap() } @@ -2093,7 +2091,7 @@ pub async fn is_delegator( client: &C, address: &Address, ) -> bool { - namada::sdk::rpc::is_delegator(client, address) + namada_sdk::rpc::is_delegator(client, address) .await .unwrap() } @@ -2103,7 +2101,7 @@ pub async fn is_delegator_at( address: &Address, epoch: Epoch, ) -> bool { - namada::sdk::rpc::is_delegator_at(client, address, epoch) + namada_sdk::rpc::is_delegator_at(client, address, epoch) .await .unwrap() } @@ -2115,7 +2113,7 @@ pub async fn known_address( client: &C, address: &Address, ) -> bool { - namada::sdk::rpc::known_address(client, address) + namada_sdk::rpc::known_address(client, address) .await .unwrap() } @@ -2202,7 +2200,7 @@ pub async fn query_conversion( masp_primitives::transaction::components::I32Sum, MerklePath, )> { - namada::sdk::rpc::query_conversion(client, asset_type).await + namada_sdk::rpc::query_conversion(client, asset_type).await } /// Query a wasm code hash @@ -2221,7 +2219,7 @@ pub async fn query_storage_value( where T: BorshDeserialize, { - namada::sdk::rpc::query_storage_value(client, key).await + namada_sdk::rpc::query_storage_value(client, key).await } /// Query a storage value and the proof without decoding. @@ -2233,7 +2231,7 @@ pub async fn query_storage_value_bytes< height: Option, prove: bool, ) -> (Option>, Option) { - namada::sdk::rpc::query_storage_value_bytes(client, key, height, prove) + namada_sdk::rpc::query_storage_value_bytes(client, key, height, prove) .await .unwrap() } @@ -2258,7 +2256,7 @@ pub async fn query_has_storage_key< client: &C, key: &storage::Key, ) -> bool { - namada::sdk::rpc::query_has_storage_key(client, key) + namada_sdk::rpc::query_has_storage_key(client, key) .await .unwrap() } @@ -2267,21 +2265,21 @@ pub async fn query_has_storage_key< /// the current status of a transation. pub async fn query_tx_events( client: &C, - tx_event_query: namada::sdk::rpc::TxEventQuery<'_>, + tx_event_query: namada_sdk::rpc::TxEventQuery<'_>, ) -> std::result::Result< Option, ::Error, > { - namada::sdk::rpc::query_tx_events(client, tx_event_query).await + namada_sdk::rpc::query_tx_events(client, tx_event_query).await } /// Lookup the full response accompanying the specified transaction event // TODO: maybe remove this in favor of `query_tx_status` pub async fn query_tx_response( client: &C, - tx_query: namada::sdk::rpc::TxEventQuery<'_>, + tx_query: namada_sdk::rpc::TxEventQuery<'_>, ) -> Result { - namada::sdk::rpc::query_tx_response(client, tx_query).await + namada_sdk::rpc::query_tx_response(client, tx_query).await } /// Lookup the results of applying the specified transaction to the @@ -2293,7 +2291,7 @@ pub async fn query_result<'a>( // First try looking up application event pertaining to given hash. let tx_response = query_tx_response( context.client(), - namada::sdk::rpc::TxEventQuery::Applied(&args.tx_hash), + namada_sdk::rpc::TxEventQuery::Applied(&args.tx_hash), ) .await; match tx_response { @@ -2308,7 +2306,7 @@ pub async fn query_result<'a>( // If this fails then instead look for an acceptance event. let tx_response = query_tx_response( context.client(), - namada::sdk::rpc::TxEventQuery::Accepted(&args.tx_hash), + namada_sdk::rpc::TxEventQuery::Accepted(&args.tx_hash), ) .await; match tx_response { @@ -2359,7 +2357,7 @@ pub async fn get_all_validators( client: &C, epoch: Epoch, ) -> HashSet
{ - namada::sdk::rpc::get_all_validators(client, epoch) + namada_sdk::rpc::get_all_validators(client, epoch) .await .unwrap() } @@ -2370,7 +2368,7 @@ pub async fn get_total_staked_tokens< client: &C, epoch: Epoch, ) -> token::Amount { - namada::sdk::rpc::get_total_staked_tokens(client, epoch) + namada_sdk::rpc::get_total_staked_tokens(client, epoch) .await .unwrap() } @@ -2398,7 +2396,7 @@ pub async fn get_delegators_delegation< client: &C, address: &Address, ) -> HashSet
{ - namada::sdk::rpc::get_delegators_delegation(client, address) + namada_sdk::rpc::get_delegators_delegation(client, address) .await .unwrap() } @@ -2410,7 +2408,7 @@ pub async fn get_delegators_delegation_at< address: &Address, epoch: Epoch, ) -> HashMap { - namada::sdk::rpc::get_delegators_delegation_at(client, address, epoch) + namada_sdk::rpc::get_delegators_delegation_at(client, address, epoch) .await .unwrap() } @@ -2420,7 +2418,7 @@ pub async fn query_governance_parameters< >( client: &C, ) -> GovernanceParameters { - namada::sdk::rpc::query_governance_parameters(client).await + namada_sdk::rpc::query_governance_parameters(client).await } /// A helper to unwrap client's response. Will shut down process on error. @@ -2503,7 +2501,7 @@ pub async fn compute_proposal_votes< proposal_id: u64, epoch: Epoch, ) -> ProposalVotes { - let votes = namada::sdk::rpc::query_proposal_votes(client, proposal_id) + let votes = namada_sdk::rpc::query_proposal_votes(client, proposal_id) .await .unwrap(); diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 53b2232f64..1afefab825 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -6,17 +6,16 @@ use namada::core::ledger::governance::cli::offline::{ use namada::core::ledger::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, }; -use namada::ledger::{pos, Namada}; +use namada::ledger::pos; use namada::proof_of_stake::parameters::PosParams; use namada::proto::Tx; -use namada::sdk::rpc::{TxBroadcastData, TxResponse}; -use namada::sdk::{error, signing, tx}; use namada::types::address::{Address, ImplicitAddress}; use namada::types::dec::Dec; use namada::types::io::Io; use namada::types::key::{self, *}; use namada::types::transaction::pos::InitValidator; -use namada::{display_line, edisplay_line}; +use namada_sdk::rpc::{TxBroadcastData, TxResponse}; +use namada_sdk::{display_line, edisplay_line, error, signing, tx, Namada}; use super::rpc; use crate::cli::{args, safe_exit}; diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index f508267db3..83fd499071 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -9,12 +9,12 @@ use borsh::BorshSerialize; use flate2::read::GzDecoder; use flate2::write::GzEncoder; use flate2::Compression; -use namada::sdk::wallet::Wallet; use namada::types::address; use namada::types::chain::ChainId; use namada::types::dec::Dec; use namada::types::key::*; use namada::vm::validate_untrusted_wasm; +use namada_sdk::wallet::Wallet; use prost::bytes::Bytes; use rand::prelude::ThreadRng; use rand::thread_rng; diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index b7281fd4ce..222e7b4f1f 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -6,7 +6,6 @@ use borsh::{BorshDeserialize, BorshSerialize}; use derivative::Derivative; use namada::core::ledger::governance::parameters::GovernanceParameters; use namada::core::ledger::pgf::parameters::PgfParameters; -use namada::ledger::eth_bridge::EthereumBridgeConfig; use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::{Dec, GenesisValidator, PosParams}; use namada::types::address::Address; @@ -17,6 +16,7 @@ use namada::types::time::{DateTimeUtc, DurationSecs}; use namada::types::token::Denomination; use namada::types::uint::Uint; use namada::types::{storage, token}; +use namada_sdk::eth_bridge::EthereumBridgeConfig; /// Genesis configuration file format pub mod genesis_config { @@ -900,14 +900,14 @@ pub fn genesis( } #[cfg(any(test, feature = "dev"))] pub fn genesis(num_validators: u64) -> Genesis { - use namada::ledger::eth_bridge::{ - Contracts, Erc20WhitelistEntry, UpgradeableContract, - }; use namada::types::address::{ self, apfel, btc, dot, eth, kartoffel, nam, schnitzel, wnam, }; use namada::types::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; use namada::types::ethereum_events::EthAddress; + use namada_sdk::eth_bridge::{ + Contracts, Erc20WhitelistEntry, UpgradeableContract, + }; use crate::wallet; diff --git a/apps/src/lib/node/ledger/ethereum_oracle/mod.rs b/apps/src/lib/node/ledger/ethereum_oracle/mod.rs index 6980778c07..300ea85347 100644 --- a/apps/src/lib/node/ledger/ethereum_oracle/mod.rs +++ b/apps/src/lib/node/ledger/ethereum_oracle/mod.rs @@ -12,9 +12,9 @@ use namada::core::types::ethereum_structs; use namada::eth_bridge::ethers; use namada::eth_bridge::ethers::providers::{Http, Middleware, Provider}; use namada::eth_bridge::oracle::config::Config; -use namada::ledger::eth_bridge::{eth_syncing_status_timeout, SyncStatus}; use namada::types::control_flow::time::{Constant, Duration, Instant, Sleep}; use namada::types::ethereum_events::EthereumEvent; +use namada_sdk::eth_bridge::{eth_syncing_status_timeout, SyncStatus}; use num256::Uint256; use thiserror::Error; use tokio::sync::mpsc::error::TryRecvError; diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..9a32f6dd60 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -1049,7 +1049,6 @@ mod test_finalize_block { self, get_key_from_hash, get_nonce_key, get_signed_root_key, }; use namada::eth_bridge::storage::min_confirmations_key; - use namada::ledger::eth_bridge::MinimumConfirmations; use namada::ledger::gas::VpGasMeter; use namada::ledger::native_vp::parameters::ParametersVp; use namada::ledger::native_vp::NativeVp; @@ -1087,6 +1086,7 @@ mod test_finalize_block { use namada::types::transaction::{Fee, WrapperTx}; use namada::types::uint::Uint; use namada::types::vote_extensions::ethereum_events; + use namada_sdk::eth_bridge::MinimumConfirmations; use namada_test_utils::TestWasms; use test_log::test; diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index d6b2efe4dd..6faf48f2d1 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use std::hash::Hash; -use namada::ledger::eth_bridge::EthBridgeStatus; use namada::ledger::parameters::{self, Parameters}; use namada::ledger::pos::{staking_token_address, PosParams}; use namada::ledger::storage::traits::StorageHasher; @@ -17,6 +16,7 @@ use namada::types::hash::Hash as CodeHash; use namada::types::key::*; use namada::types::time::{DateTimeUtc, TimeZone, Utc}; use namada::vm::validate_untrusted_wasm; +use namada_sdk::eth_bridge::EthBridgeStatus; use super::*; use crate::facade::tendermint_proto::google::protobuf; diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index a1c17fe450..eecf6aea06 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -30,7 +30,6 @@ use borsh::{BorshDeserialize, BorshSerialize}; use masp_primitives::transaction::Transaction; use namada::core::hints; use namada::core::ledger::eth_bridge; -use namada::ledger::eth_bridge::{EthBridgeQueries, EthereumOracleConfig}; use namada::ledger::events::log::EventLog; use namada::ledger::events::Event; use namada::ledger::gas::{Gas, TxGasMeter}; @@ -66,6 +65,7 @@ use namada::types::transaction::{ use namada::types::{address, hash, token}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::{WasmCacheAccess, WasmCacheRwAccess}; +use namada_sdk::eth_bridge::{EthBridgeQueries, EthereumOracleConfig}; use num_derive::{FromPrimitive, ToPrimitive}; use num_traits::{FromPrimitive, ToPrimitive}; use thiserror::Error; diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index ab544de3f8..e5b1e94e13 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -4,7 +4,6 @@ use data_encoding::HEXUPPER; use namada::core::hints; use namada::core::ledger::storage::WlStorage; -use namada::ledger::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use namada::ledger::pos::PosQueries; use namada::ledger::protocol::get_fee_unshielding_transaction; use namada::ledger::storage::TempWlStorage; @@ -15,6 +14,7 @@ use namada::types::transaction::protocol::{ }; #[cfg(feature = "abcipp")] use namada::types::voting_power::FractionalVotingPower; +use namada_sdk::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use super::block_alloc::{BlockSpace, EncryptedTxsBins}; use super::*; diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index a62c3ec4b4..f1e56c295f 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -2,6 +2,7 @@ use borsh::BorshSerialize; use ferveo_common::TendermintValidator; +use namada::ledger::dry_run_tx; use namada::ledger::pos::into_tm_voting_power; use namada::ledger::queries::{RequestCtx, ResponseQuery}; use namada::ledger::storage_api::token; @@ -49,7 +50,11 @@ where }; // Invoke the root RPC handler - returns borsh-encoded data on success - let result = namada::ledger::queries::handle_path(ctx, &request); + let result = if request.path == "/shell/dry_run_tx" { + dry_run_tx(ctx, &request) + } else { + namada::ledger::queries::handle_path(ctx, &request) + }; match result { Ok(ResponseQuery { data, info, proof }) => response::Query { value: data, @@ -137,10 +142,10 @@ where #[cfg(not(feature = "abcipp"))] mod test_queries { use namada::core::ledger::storage::EPOCH_SWITCH_BLOCKS_DELAY; - use namada::ledger::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::types::WeightedValidator; use namada::types::storage::Epoch; + use namada_sdk::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use super::*; use crate::facade::tendermint_proto::abci::VoteInfo; diff --git a/apps/src/lib/node/ledger/shell/testing/node.rs b/apps/src/lib/node/ledger/shell/testing/node.rs index 034ac80845..7ebf3ce0b8 100644 --- a/apps/src/lib/node/ledger/shell/testing/node.rs +++ b/apps/src/lib/node/ledger/shell/testing/node.rs @@ -6,6 +6,7 @@ use std::sync::{Arc, Mutex}; use color_eyre::eyre::{Report, Result}; use data_encoding::HEXUPPER; use lazy_static::lazy_static; +use namada::ledger::dry_run_tx; use namada::ledger::events::log::dumb_queries; use namada::ledger::queries::{ EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, @@ -19,7 +20,6 @@ use namada::proof_of_stake::{ read_consensus_validator_set_addresses_with_stake, validator_consensus_key_handle, }; -use namada::sdk::queries::Client; use namada::tendermint_proto::abci::VoteInfo; use namada::tendermint_rpc::endpoint::abci_info; use namada::tendermint_rpc::SimpleRequest; @@ -27,6 +27,7 @@ use namada::types::hash::Hash; use namada::types::key::tm_consensus_key_raw_hash; use namada::types::storage::{BlockHash, BlockHeight, Epoch, Header}; use namada::types::time::DateTimeUtc; +use namada_sdk::queries::Client; use num_traits::cast::FromPrimitive; use regex::Regex; use tokio::sync::mpsc::UnboundedReceiver; @@ -352,7 +353,12 @@ impl<'a> Client for &'a MockNode { tx_wasm_cache: borrowed.tx_wasm_cache.read_only(), storage_read_past_height_limit: None, }; - rpc.handle(ctx, &request).map_err(Report::new) + if request.path == "/shell/dry_run_tx" { + dry_run_tx(ctx, &request) + } else { + rpc.handle(ctx, &request) + } + .map_err(Report::new) } async fn perform( diff --git a/apps/src/lib/node/ledger/shell/vote_extensions.rs b/apps/src/lib/node/ledger/shell/vote_extensions.rs index 658c35a121..e7e19bf96d 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions.rs @@ -4,7 +4,6 @@ pub mod bridge_pool_vext; pub mod eth_events; pub mod val_set_update; -use namada::ledger::eth_bridge::{EthBridgeQueries, SendValsetUpd}; #[cfg(feature = "abcipp")] use namada::ledger::pos::PosQueries; use namada::proto::{SignableEthMessage, Signed}; @@ -15,6 +14,7 @@ use namada::types::vote_extensions::VoteExtensionDigest; use namada::types::vote_extensions::{ bridge_pool_roots, ethereum_events, validator_set_update, VoteExtension, }; +use namada_sdk::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use super::*; #[cfg(feature = "abcipp")] diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs index 002bd18904..201c96983f 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs @@ -273,8 +273,6 @@ mod test_bp_vote_extensions { use borsh::BorshSerialize; #[cfg(not(feature = "abcipp"))] use namada::core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; - #[cfg(not(feature = "abcipp"))] - use namada::ledger::eth_bridge::EthBridgeQueries; use namada::ledger::pos::PosQueries; use namada::ledger::storage_api::StorageWrite; use namada::proof_of_stake::types::{ @@ -297,6 +295,8 @@ mod test_bp_vote_extensions { use namada::types::vote_extensions::bridge_pool_roots; #[cfg(feature = "abcipp")] use namada::types::vote_extensions::VoteExtension; + #[cfg(not(feature = "abcipp"))] + use namada_sdk::eth_bridge::EthBridgeQueries; #[cfg(feature = "abcipp")] use tendermint_proto_abcipp::abci::response_verify_vote_extension::VerifyStatus; #[cfg(feature = "abcipp")] diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs index 0dd85bfd70..e7dbdc255b 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs @@ -2,7 +2,6 @@ use std::collections::{BTreeMap, HashMap}; -use namada::ledger::eth_bridge::EthBridgeQueries; use namada::ledger::pos::PosQueries; use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::{DBIter, DB}; @@ -15,6 +14,7 @@ use namada::types::vote_extensions::ethereum_events::{ }; #[cfg(feature = "abcipp")] use namada::types::voting_power::FractionalVotingPower; +use namada_sdk::eth_bridge::EthBridgeQueries; use super::*; use crate::node::ledger::shell::{Shell, ShellMode}; diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs index 03843b4717..4888c10be1 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs @@ -311,7 +311,6 @@ mod test_vote_extensions { use namada::core::ledger::storage_api::collections::lazy_map::{ NestedSubKey, SubKey, }; - use namada::ledger::eth_bridge::EthBridgeQueries; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::types::WeightedValidator; use namada::proof_of_stake::{ @@ -337,6 +336,7 @@ mod test_vote_extensions { use namada::types::vote_extensions::validator_set_update; #[cfg(feature = "abcipp")] use namada::types::vote_extensions::VoteExtension; + use namada_sdk::eth_bridge::EthBridgeQueries; #[cfg(feature = "abcipp")] use crate::facade::tendermint_proto::abci::response_verify_vote_extension::VerifyStatus; diff --git a/apps/src/lib/wallet/cli_utils.rs b/apps/src/lib/wallet/cli_utils.rs index ada1b16684..46349167ef 100644 --- a/apps/src/lib/wallet/cli_utils.rs +++ b/apps/src/lib/wallet/cli_utils.rs @@ -4,10 +4,10 @@ use std::io::{self, Write}; use borsh::BorshSerialize; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::sdk::masp::find_valid_diversifier; -use namada::sdk::wallet::{DecryptionError, FindKeyError, GenRestoreKeyError}; use namada::types::key::{PublicKeyHash, RefTo}; use namada::types::masp::{MaspValue, PaymentAddress}; +use namada_sdk::masp::find_valid_diversifier; +use namada_sdk::wallet::{DecryptionError, FindKeyError, GenRestoreKeyError}; use rand_core::OsRng; use crate::cli; diff --git a/apps/src/lib/wallet/defaults.rs b/apps/src/lib/wallet/defaults.rs index 00b0f49d26..82a9524daa 100644 --- a/apps/src/lib/wallet/defaults.rs +++ b/apps/src/lib/wallet/defaults.rs @@ -8,10 +8,10 @@ pub use dev::{ validator_keys, }; use namada::core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; -use namada::ledger::{eth_bridge, governance, pgf, pos}; -use namada::sdk::wallet::alias::Alias; +use namada::ledger::{governance, pgf, pos}; use namada::types::address::Address; use namada::types::key::*; +use namada_sdk::wallet::alias::Alias; use crate::config::genesis::genesis_config::GenesisConfig; @@ -22,7 +22,7 @@ pub fn addresses_from_genesis(genesis: GenesisConfig) -> Vec<(Alias, Address)> { ("pos".into(), pos::ADDRESS), ("pos_slash_pool".into(), pos::SLASH_POOL_ADDRESS), ("governance".into(), governance::ADDRESS), - ("eth_bridge".into(), eth_bridge::ADDRESS), + ("eth_bridge".into(), namada_sdk::eth_bridge::ADDRESS), ("bridge_pool".into(), BRIDGE_POOL_ADDRESS), ("pgf".into(), pgf::ADDRESS), ]; @@ -78,12 +78,12 @@ mod dev { use borsh::BorshDeserialize; use namada::ledger::{governance, pgf, pos}; - use namada::sdk::wallet::alias::Alias; use namada::types::address::{ apfel, btc, dot, eth, kartoffel, nam, schnitzel, Address, }; use namada::types::key::dkg_session_keys::DkgKeypair; use namada::types::key::*; + use namada_sdk::wallet::alias::Alias; /// Generate a new protocol signing keypair, eth hot key and DKG session /// keypair diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index 61c49fe580..18818daef5 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -9,16 +9,16 @@ use std::str::FromStr; use std::{env, fs}; use namada::bip39::{Language, Mnemonic}; -pub use namada::sdk::wallet::alias::Alias; -use namada::sdk::wallet::fs::FsWalletStorage; -use namada::sdk::wallet::store::Store; -use namada::sdk::wallet::{ +use namada::types::address::Address; +use namada::types::key::*; +pub use namada_sdk::wallet::alias::Alias; +use namada_sdk::wallet::fs::FsWalletStorage; +use namada_sdk::wallet::store::Store; +use namada_sdk::wallet::{ AddressVpType, ConfirmationResponse, FindKeyError, GenRestoreKeyError, Wallet, WalletIo, }; -pub use namada::sdk::wallet::{ValidatorData, ValidatorKeys}; -use namada::types::address::Address; -use namada::types::key::*; +pub use namada_sdk::wallet::{ValidatorData, ValidatorKeys}; use rand_core::OsRng; pub use store::wallet_file; use zeroize::Zeroizing; @@ -317,7 +317,7 @@ pub fn read_and_confirm_encryption_password( #[cfg(test)] mod tests { use namada::bip39::MnemonicType; - use namada::sdk::wallet::WalletIo; + use namada_sdk::wallet::WalletIo; use rand_core; use super::CliWalletUtils; diff --git a/apps/src/lib/wallet/pre_genesis.rs b/apps/src/lib/wallet/pre_genesis.rs index 13a2c21f2b..da12c2dcce 100644 --- a/apps/src/lib/wallet/pre_genesis.rs +++ b/apps/src/lib/wallet/pre_genesis.rs @@ -3,11 +3,11 @@ use std::path::{Path, PathBuf}; use ark_serialize::{Read, Write}; use fd_lock::RwLock; -use namada::sdk::wallet::pre_genesis::{ +use namada::types::key::SchemeType; +use namada_sdk::wallet::pre_genesis::{ ReadError, ValidatorStore, ValidatorWallet, }; -use namada::sdk::wallet::{gen_key_to_store, WalletIo}; -use namada::types::key::SchemeType; +use namada_sdk::wallet::{gen_key_to_store, WalletIo}; use zeroize::Zeroizing; use crate::wallet::store::gen_validator_keys; diff --git a/apps/src/lib/wallet/store.rs b/apps/src/lib/wallet/store.rs index c035925160..62eae8ac0e 100644 --- a/apps/src/lib/wallet/store.rs +++ b/apps/src/lib/wallet/store.rs @@ -5,14 +5,14 @@ use std::str::FromStr; use ark_std::rand::prelude::*; use ark_std::rand::SeedableRng; #[cfg(not(feature = "dev"))] -use namada::sdk::wallet::store::AddressVpType; -#[cfg(feature = "dev")] -use namada::sdk::wallet::StoredKeypair; -use namada::sdk::wallet::{gen_sk_rng, LoadStoreError, Store, ValidatorKeys}; -#[cfg(not(feature = "dev"))] use namada::types::address::Address; use namada::types::key::*; use namada::types::transaction::EllipticCurve; +#[cfg(not(feature = "dev"))] +use namada_sdk::wallet::store::AddressVpType; +#[cfg(feature = "dev")] +use namada_sdk::wallet::StoredKeypair; +use namada_sdk::wallet::{gen_sk_rng, LoadStoreError, Store, ValidatorKeys}; use crate::config::genesis::genesis_config::GenesisConfig; use crate::wallet::CliWalletUtils; diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 91a5d45333..ebd99eedda 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -49,6 +49,7 @@ masp_primitives.workspace = true masp_proofs.workspace = true namada = { path = "../shared", features = ["testing"] } namada_apps = { path = "../apps", features = ["testing"] } +namada_sdk = {path = "../sdk", features = ["testing"] } namada_test_utils = { path = "../test_utils" } prost.workspace = true rand.workspace = true diff --git a/benches/lib.rs b/benches/lib.rs index f0cba69475..038d7017f0 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -63,20 +63,15 @@ use namada::ibc::core::Msg; use namada::ibc::Height as IbcHeight; use namada::ibc_proto::google::protobuf::Any; use namada::ibc_proto::protobuf::Protobuf; +use namada::ledger::dry_run_tx; use namada::ledger::gas::TxGasMeter; use namada::ledger::ibc::storage::{channel_key, connection_key}; use namada::ledger::queries::{ Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada::ledger::storage_api::StorageRead; -use namada::ledger::NamadaImpl; use namada::proof_of_stake; use namada::proto::{Code, Data, Section, Signature, Tx}; -use namada::sdk::args::InputAmount; -use namada::sdk::masp::{ - self, ShieldedContext, ShieldedTransfer, ShieldedUtils, -}; -use namada::sdk::wallet::Wallet; use namada::tendermint::Hash; use namada::tendermint_rpc::{self}; use namada::types::address::InternalAddress; @@ -101,6 +96,12 @@ use namada_apps::facade::tendermint_proto::google::protobuf::Timestamp; use namada_apps::node::ledger::shell::Shell; use namada_apps::wallet::{defaults, CliWalletUtils}; use namada_apps::{config, wasm_loader}; +use namada_sdk::args::InputAmount; +use namada_sdk::masp::{ + self, ShieldedContext, ShieldedTransfer, ShieldedUtils, +}; +use namada_sdk::wallet::Wallet; +use namada_sdk::NamadaImpl; use namada_test_utils::tx_data::TxWriteData; use rand_core::OsRng; use sha2::{Digest, Sha256}; @@ -670,8 +671,12 @@ impl Client for BenchShell { storage_read_past_height_limit: None, }; - RPC.handle(ctx, &request) - .map_err(|_| std::io::Error::from(std::io::ErrorKind::NotFound)) + if request.path == "/shell/dry_run_tx" { + dry_run_tx(ctx, &request) + } else { + RPC.handle(ctx, &request) + } + .map_err(|_| std::io::Error::from(std::io::ErrorKind::NotFound)) } async fn perform( @@ -727,7 +732,7 @@ impl Default for BenchShieldedCtx { .fvk .vk; let (div, _g_d) = - namada::sdk::masp::find_valid_diversifier(&mut OsRng); + namada_sdk::masp::find_valid_diversifier(&mut OsRng); let payment_addr = viewing_key.to_payment_address(div).unwrap(); let _ = ctx .wallet diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml new file mode 100644 index 0000000000..f0eb9289ed --- /dev/null +++ b/sdk/Cargo.toml @@ -0,0 +1,121 @@ +[package] +name = "namada_sdk" +description = "The main Namada SDK crate" +resolver = "2" +authors.workspace = true +edition.workspace = true +documentation.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +abciplus = [ + "namada_core/abciplus", + "namada_proof_of_stake/abciplus", + "namada_ethereum_bridge/abciplus", +] + +ferveo-tpke = [ + "namada_core/ferveo-tpke", +] + +masp-tx-gen = [ + "rand", + "rand_core", +] + +multicore = ["masp_proofs/multicore"] + +namada-sdk = [ + "tendermint-rpc", + "masp-tx-gen", + "ferveo-tpke", + "masp_primitives/transparent-inputs" +] + +std = ["fd-lock"] + +# tendermint-rpc support +tendermint-rpc = [ + "async-client", + "dep:tendermint-rpc", +] + +wasm-runtime = [ + "namada_core/wasm-runtime", +] + +# Enable queries support for an async client +async-client = [ + "async-trait", +] + +ibc-mocks = [ + "namada_core/ibc-mocks", +] + +# for integration tests and test utilies +testing = [ + "namada_core/testing", + "namada_ethereum_bridge/testing", + "namada_proof_of_stake/testing", + "async-client", + "rand_core", + "rand", +] + +[dependencies] +async-trait = {version = "0.1.51", optional = true} +bimap.workspace = true +borsh.workspace = true +circular-queue.workspace = true +data-encoding.workspace = true +derivation-path.workspace = true +ethbridge-bridge-contract.workspace = true +ethers.workspace = true +fd-lock = { workspace = true, optional = true } +futures.workspace = true +itertools.workspace = true +masp_primitives.workspace = true +masp_proofs = { workspace = true, features = ["download-params"] } +namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign"]} +namada_ethereum_bridge = {path = "../ethereum_bridge", default-features = false} +namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} +num256.workspace = true +orion.workspace = true +owo-colors = "3.5.0" +parse_duration = "2.1.1" +paste.workspace = true +prost.workspace = true +rand = {optional = true, workspace = true} +rand_core = {optional = true, workspace = true} +ripemd.workspace = true +serde.workspace = true +serde_json.workspace = true +sha2.workspace = true +slip10_ed25519.workspace = true +tendermint-rpc = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "b7d1e5afc6f2ccb3fd1545c2174bab1cc48d7fa7", default-features = false, features = ["trait-client"], optional = true} +thiserror.workspace = true +tiny-bip39.workspace = true +tiny-hderive.workspace = true +toml.workspace = true +tracing.workspace = true +zeroize.workspace = true + +[target.'cfg(not(target_family = "wasm"))'.dependencies] +tokio = {workspace = true, features = ["full"]} + +[target.'cfg(target_family = "wasm")'.dependencies] +tokio = {workspace = true, default-features = false, features = ["sync"]} +wasmtimer = "0.2.0" + +[dev-dependencies] +assert_matches.workspace = true +namada_test_utils = {path = "../test_utils"} +tempfile.workspace = true diff --git a/shared/src/sdk/args.rs b/sdk/src/args.rs similarity index 95% rename from shared/src/sdk/args.rs rename to sdk/src/args.rs index d7556d6321..de4fd5bb98 100644 --- a/shared/src/sdk/args.rs +++ b/sdk/src/args.rs @@ -7,25 +7,24 @@ use std::time::Duration as StdDuration; use namada_core::ledger::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, }; +use namada_core::types::address::Address; use namada_core::types::chain::ChainId; use namada_core::types::dec::Dec; use namada_core::types::ethereum_events::EthAddress; +use namada_core::types::keccak::KeccakHash; +use namada_core::types::key::{common, SchemeType}; +use namada_core::types::masp::MaspValue; +use namada_core::types::storage::Epoch; use namada_core::types::time::DateTimeUtc; +use namada_core::types::transaction::GasLimit; +use namada_core::types::{storage, token}; use serde::{Deserialize, Serialize}; use zeroize::Zeroizing; +use crate::eth_bridge::bridge_pool; use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::ledger::eth_bridge::bridge_pool; -use crate::ledger::Namada; -use crate::sdk::signing::SigningTxData; -use crate::sdk::{rpc, tx}; -use crate::types::address::Address; -use crate::types::keccak::KeccakHash; -use crate::types::key::{common, SchemeType}; -use crate::types::masp::MaspValue; -use crate::types::storage::Epoch; -use crate::types::transaction::GasLimit; -use crate::types::{storage, token}; +use crate::signing::SigningTxData; +use crate::{rpc, tx, Namada}; /// [`Duration`](StdDuration) wrapper that provides a /// method to parse a value from a string. @@ -179,11 +178,8 @@ impl TxCustom { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_custom(context, self).await } } @@ -288,11 +284,8 @@ impl TxTransfer { pub async fn build<'a>( &mut self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_transfer(context, self).await } } @@ -405,11 +398,8 @@ impl TxIbcTransfer { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_ibc_transfer(context, self).await } } @@ -497,11 +487,8 @@ impl InitProposal { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { let current_epoch = rpc::query_epoch(context.client()).await?; let governance_parameters = rpc::query_governance_parameters(context.client()).await; @@ -511,13 +498,13 @@ impl InitProposal { self.proposal_data.as_ref(), ) .map_err(|e| { - crate::sdk::error::TxError::FailedGovernaneProposalDeserialize( + crate::error::TxError::FailedGovernaneProposalDeserialize( e.to_string(), ) })? .validate(&governance_parameters, current_epoch, self.tx.force) .map_err(|e| { - crate::sdk::error::TxError::InvalidProposal(e.to_string()) + crate::error::TxError::InvalidProposal(e.to_string()) })?; tx::build_pgf_funding_proposal(context, self, proposal).await @@ -526,7 +513,7 @@ impl InitProposal { self.proposal_data.as_ref(), ) .map_err(|e| { - crate::sdk::error::TxError::FailedGovernaneProposalDeserialize( + crate::error::TxError::FailedGovernaneProposalDeserialize( e.to_string(), ) })?; @@ -545,7 +532,7 @@ impl InitProposal { self.tx.force, ) .map_err(|e| { - crate::sdk::error::TxError::InvalidProposal(e.to_string()) + crate::error::TxError::InvalidProposal(e.to_string()) })?; tx::build_pgf_stewards_proposal(context, self, proposal).await @@ -554,7 +541,7 @@ impl InitProposal { self.proposal_data.as_ref(), ) .map_err(|e| { - crate::sdk::error::TxError::FailedGovernaneProposalDeserialize( + crate::error::TxError::FailedGovernaneProposalDeserialize( e.to_string(), ) })?; @@ -573,7 +560,7 @@ impl InitProposal { self.tx.force, ) .map_err(|e| { - crate::sdk::error::TxError::InvalidProposal(e.to_string()) + crate::error::TxError::InvalidProposal(e.to_string()) })?; tx::build_default_proposal(context, self, proposal).await } @@ -657,11 +644,8 @@ impl VoteProposal { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { let current_epoch = rpc::query_epoch(context.client()).await?; tx::build_vote_proposal(context, self, current_epoch).await } @@ -786,11 +770,8 @@ impl TxUpdateAccount { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_update_account(context, self).await } } @@ -867,11 +848,8 @@ impl Bond { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_bond(context, self).await } } @@ -897,7 +875,7 @@ impl Unbond { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( + ) -> crate::error::Result<( crate::proto::Tx, SigningTxData, Option, @@ -981,11 +959,8 @@ impl RevealPk { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_reveal_pk(context, &self.tx, &self.public_key).await } } @@ -1068,11 +1043,8 @@ impl Withdraw { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_withdraw(context, self).await } } @@ -1204,11 +1176,8 @@ impl CommissionRateChange { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_validator_commission_change(context, self).await } } @@ -1263,11 +1232,8 @@ impl UpdateStewardCommission { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_update_steward_commission(context, self).await } } @@ -1315,11 +1281,8 @@ impl ResignSteward { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_resign_steward(context, self).await } } @@ -1367,11 +1330,8 @@ impl TxUnjailValidator { pub async fn build<'a>( &self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { tx::build_unjail_validator(context, self).await } } @@ -1892,11 +1852,8 @@ impl EthereumBridgePool { pub async fn build<'a>( self, context: &impl Namada<'a>, - ) -> crate::sdk::error::Result<( - crate::proto::Tx, - SigningTxData, - Option, - )> { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { bridge_pool::build_bridge_pool_tx(context, self).await } } diff --git a/shared/src/types/control_flow.rs b/sdk/src/control_flow/mod.rs similarity index 100% rename from shared/src/types/control_flow.rs rename to sdk/src/control_flow/mod.rs diff --git a/shared/src/types/control_flow/time.rs b/sdk/src/control_flow/time.rs similarity index 100% rename from shared/src/types/control_flow/time.rs rename to sdk/src/control_flow/time.rs diff --git a/shared/src/sdk/error.rs b/sdk/src/error.rs similarity index 97% rename from shared/src/sdk/error.rs rename to sdk/src/error.rs index b103a9523f..a3091a3d7c 100644 --- a/shared/src/sdk/error.rs +++ b/sdk/src/error.rs @@ -9,8 +9,7 @@ use prost::EncodeError; use tendermint_rpc::Error as RpcError; use thiserror::Error; -use crate::sdk::error::Error::Pinned; -use crate::vm::WasmValidationError; +use crate::error::Error::Pinned; /// The standard Result type that most code ought to return pub type Result = std::result::Result; @@ -222,9 +221,6 @@ pub enum TxError { /// Error in the fee unshielding transaction #[error("Error in fee unshielding: {0}")] FeeUnshieldingError(String), - /// Wasm validation failed - #[error("Validity predicate code validation failed with {0}")] - WasmValidationFailure(WasmValidationError), /// Encoding transaction failure #[error("Encoding tx data, {0}, shouldn't fail")] EncodeTxFailure(String), diff --git a/shared/src/ledger/eth_bridge/bridge_pool.rs b/sdk/src/eth_bridge/bridge_pool.rs similarity index 97% rename from shared/src/ledger/eth_bridge/bridge_pool.rs rename to sdk/src/eth_bridge/bridge_pool.rs index b4c633ca78..da80bdf41f 100644 --- a/shared/src/ledger/eth_bridge/bridge_pool.rs +++ b/sdk/src/eth_bridge/bridge_pool.rs @@ -9,36 +9,33 @@ use borsh::BorshSerialize; use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; +use namada_core::types::address::Address; +use namada_core::types::eth_abi::Encode; +use namada_core::types::eth_bridge_pool::{ + GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, +}; +use namada_core::types::keccak::KeccakHash; use namada_core::types::storage::Epoch; +use namada_core::types::token::{Amount, DenominatedAmount}; +use namada_core::types::voting_power::FractionalVotingPower; use owo_colors::OwoColorize; use serde::{Deserialize, Serialize}; use super::{block_on_eth_sync, eth_sync_or_exit, BlockOnEthSync}; +use crate::control_flow::time::{Duration, Instant}; +use crate::control_flow::{self, install_shutdown_signal, Halt, TryHalt}; +use crate::error::Error; use crate::eth_bridge::ethers::abi::AbiDecode; -use crate::ledger::queries::{ +use crate::io::Io; +use crate::proto::Tx; +use crate::queries::{ Client, GenBridgePoolProofReq, GenBridgePoolProofRsp, TransferToErcArgs, RPC, }; -use crate::ledger::signing::aux_signing_data; -use crate::ledger::tx::prepare_tx; -use crate::ledger::{args, Namada, SigningTxData}; -use crate::proto::Tx; -use crate::sdk::error::Error; -use crate::sdk::rpc::{query_wasm_code_hash, validate_amount}; -use crate::types::address::Address; -use crate::types::control_flow::time::{Duration, Instant}; -use crate::types::control_flow::{ - self, install_shutdown_signal, Halt, TryHalt, -}; -use crate::types::eth_abi::Encode; -use crate::types::eth_bridge_pool::{ - GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, -}; -use crate::types::io::Io; -use crate::types::keccak::KeccakHash; -use crate::types::token::{Amount, DenominatedAmount}; -use crate::types::voting_power::FractionalVotingPower; -use crate::{display, display_line}; +use crate::rpc::{query_wasm_code_hash, validate_amount}; +use crate::signing::aux_signing_data; +use crate::tx::prepare_tx; +use crate::{args, display, display_line, Namada, SigningTxData}; /// Craft a transaction that adds a transfer to the Ethereum bridge pool. pub async fn build_bridge_pool_tx<'a>( @@ -456,7 +453,12 @@ mod recommendations { use std::collections::BTreeSet; use borsh::BorshDeserialize; + use namada_core::types::ethereum_events::Uint as EthUint; + use namada_core::types::storage::BlockHeight; use namada_core::types::uint::{self, Uint, I256}; + use namada_core::types::vote_extensions::validator_set_update::{ + EthAddrBook, VotingPowersMap, VotingPowersMapExt, + }; use super::*; use crate::edisplay_line; @@ -464,12 +466,7 @@ mod recommendations { get_nonce_key, get_signed_root_key, }; use crate::eth_bridge::storage::proof::BridgePoolRootProof; - use crate::types::ethereum_events::Uint as EthUint; - use crate::types::io::Io; - use crate::types::storage::BlockHeight; - use crate::types::vote_extensions::validator_set_update::{ - EthAddrBook, VotingPowersMap, VotingPowersMapExt, - }; + use crate::io::Io; const fn unsigned_transfer_fee() -> Uint { Uint::from_u64(37_500_u64) @@ -920,8 +917,8 @@ mod recommendations { use namada_core::types::ethereum_events::EthAddress; use super::*; - use crate::types::control_flow::ProceedOrElse; - use crate::types::io::StdIo; + use crate::control_flow::ProceedOrElse; + use crate::io::StdIo; /// An established user address for testing & development pub fn bertha_address() -> Address { diff --git a/shared/src/ledger/eth_bridge.rs b/sdk/src/eth_bridge/mod.rs similarity index 95% rename from shared/src/ledger/eth_bridge.rs rename to sdk/src/eth_bridge/mod.rs index 66ec22a63b..49b77705a3 100644 --- a/shared/src/ledger/eth_bridge.rs +++ b/sdk/src/eth_bridge/mod.rs @@ -5,19 +5,22 @@ pub mod validator_set; use std::ops::ControlFlow; +pub use ethers; use ethers::providers::Middleware; use itertools::Either; pub use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; pub use namada_core::ledger::eth_bridge::{ADDRESS, INTERNAL_ADDRESS}; +pub use namada_core::types::ethereum_structs as structs; pub use namada_ethereum_bridge::parameters::*; pub use namada_ethereum_bridge::storage::eth_bridge_queries::*; +pub use namada_ethereum_bridge::*; use num256::Uint256; -use crate::types::control_flow::time::{ +use crate::control_flow::time::{ Constant, Duration, Error as TimeoutError, Instant, LinearBackoff, Sleep, }; -use crate::types::control_flow::{self, Halt, TryHalt}; -use crate::types::io::Io; +use crate::control_flow::{self, Halt, TryHalt}; +use crate::io::Io; use crate::{display_line, edisplay_line}; const DEFAULT_BACKOFF: Duration = std::time::Duration::from_millis(500); diff --git a/shared/src/ledger/eth_bridge/validator_set.rs b/sdk/src/eth_bridge/validator_set.rs similarity index 98% rename from shared/src/ledger/eth_bridge/validator_set.rs rename to sdk/src/eth_bridge/validator_set.rs index 90f043fe18..5c98b39ae0 100644 --- a/shared/src/ledger/eth_bridge/validator_set.rs +++ b/sdk/src/eth_bridge/validator_set.rs @@ -12,23 +12,19 @@ use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use futures::future::{self, FutureExt}; use namada_core::hints; +use namada_core::types::ethereum_events::EthAddress; use namada_core::types::storage::Epoch; +use namada_core::types::vote_extensions::validator_set_update::ValidatorSetArgs; use super::{block_on_eth_sync, eth_sync_or, eth_sync_or_exit, BlockOnEthSync}; +use crate::control_flow::time::{self, Duration, Instant}; +use crate::control_flow::{self, install_shutdown_signal, Halt, TryHalt}; use crate::eth_bridge::ethers::abi::{AbiDecode, AbiType, Tokenizable}; use crate::eth_bridge::ethers::core::types::TransactionReceipt; use crate::eth_bridge::structs::Signature; -use crate::ledger::queries::RPC; -use crate::sdk::args; -use crate::sdk::queries::Client; -use crate::types::control_flow::time::{self, Duration, Instant}; -use crate::types::control_flow::{ - self, install_shutdown_signal, Halt, TryHalt, -}; -use crate::types::ethereum_events::EthAddress; -use crate::types::io::Io; -use crate::types::vote_extensions::validator_set_update::ValidatorSetArgs; -use crate::{display_line, edisplay_line}; +use crate::io::Io; +use crate::queries::{Client, RPC}; +use crate::{args, display_line, edisplay_line}; /// Relayer related errors. #[derive(Debug, Default)] diff --git a/shared/src/ledger/events/log.rs b/sdk/src/events/log.rs similarity index 97% rename from shared/src/ledger/events/log.rs rename to sdk/src/events/log.rs index a2dc3978d0..596c23bdc9 100644 --- a/shared/src/ledger/events/log.rs +++ b/sdk/src/events/log.rs @@ -8,7 +8,7 @@ use std::default::Default; use circular_queue::CircularQueue; -use crate::ledger::events::Event; +use crate::events::Event; pub mod dumb_queries; @@ -85,9 +85,10 @@ impl EventLog { #[cfg(test)] mod tests { + use namada_core::types::hash::Hash; + use super::*; - use crate::ledger::events::{EventLevel, EventType}; - use crate::types::hash::Hash; + use crate::events::{EventLevel, EventType}; const HASH: &str = "DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"; diff --git a/shared/src/ledger/events/log/dumb_queries.rs b/sdk/src/events/log/dumb_queries.rs similarity index 96% rename from shared/src/ledger/events/log/dumb_queries.rs rename to sdk/src/events/log/dumb_queries.rs index 5ff7c8d54f..44988fb0dc 100644 --- a/shared/src/ledger/events/log/dumb_queries.rs +++ b/sdk/src/events/log/dumb_queries.rs @@ -8,12 +8,13 @@ use std::collections::HashMap; +use namada_core::types::hash::Hash; +use namada_core::types::storage::BlockHeight; + +use crate::events::{Event, EventType}; use crate::ibc::core::ics04_channel::packet::Sequence; use crate::ibc::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; use crate::ibc::Height as IbcHeight; -use crate::ledger::events::{Event, EventType}; -use crate::types::hash::Hash; -use crate::types::storage::BlockHeight; /// A [`QueryMatcher`] verifies if a Namada event matches a /// given Tendermint query. @@ -118,7 +119,7 @@ impl QueryMatcher { #[cfg(test)] mod tests { use super::*; - use crate::ledger::events::EventLevel; + use crate::events::EventLevel; /// Test if query matching is working as expected. #[test] diff --git a/shared/src/ledger/events.rs b/sdk/src/events/mod.rs similarity index 94% rename from shared/src/ledger/events.rs rename to sdk/src/events/mod.rs index ff5b9f108d..141867c63d 100644 --- a/shared/src/ledger/events.rs +++ b/sdk/src/events/mod.rs @@ -8,14 +8,14 @@ use std::ops::{Index, IndexMut}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::types::ibc::IbcEvent; +#[cfg(feature = "ferveo-tpke")] +use namada_core::types::transaction::TxType; use serde_json::Value; -use crate::ledger::governance::utils::ProposalEvent; -use crate::sdk::error::{EncodingError, Error, EventError}; +// use crate::ledger::governance::utils::ProposalEvent; +use crate::error::{EncodingError, Error, EventError}; use crate::tendermint_proto::abci::EventAttribute; -use crate::types::ibc::IbcEvent; -#[cfg(feature = "ferveo-tpke")] -use crate::types::transaction::TxType; /// Indicates if an event is emitted do to /// an individual Tx or the nature of a finalized block @@ -171,16 +171,6 @@ impl From for Event { } } -impl From for Event { - fn from(proposal_event: ProposalEvent) -> Self { - Self { - event_type: EventType::Proposal, - level: EventLevel::Block, - attributes: proposal_event.attributes, - } - } -} - /// Convert our custom event into the necessary tendermint proto type impl From for crate::tendermint_proto::abci::Event { fn from(event: Event) -> Self { diff --git a/shared/src/types/io.rs b/sdk/src/io.rs similarity index 100% rename from shared/src/types/io.rs rename to sdk/src/io.rs diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs new file mode 100644 index 0000000000..78a5caf812 --- /dev/null +++ b/sdk/src/lib.rs @@ -0,0 +1,560 @@ +pub use namada_core::proto; +#[cfg(feature = "tendermint-rpc")] +pub use tendermint_rpc; +#[cfg(feature = "tendermint-rpc-abcipp")] +pub use tendermint_rpc_abcipp as tendermint_rpc; +pub use {bip39, namada_core as core, namada_proof_of_stake as proof_of_stake}; +#[cfg(feature = "abcipp")] +pub use { + ibc_abcipp as ibc, ibc_proto_abcipp as ibc_proto, + tendermint_abcipp as tendermint, + tendermint_proto_abcipp as tendermint_proto, +}; +#[cfg(feature = "abciplus")] +pub use { + namada_core::ibc, namada_core::ibc_proto, namada_core::tendermint, + namada_core::tendermint_proto, +}; + +pub mod eth_bridge; + +pub mod rpc; + +pub mod args; +pub mod masp; +pub mod signing; +#[allow(clippy::result_large_err)] +pub mod tx; + +pub mod control_flow; +pub mod error; +pub mod events; +pub mod io; +pub mod queries; +pub mod wallet; + +use std::path::PathBuf; +use std::str::FromStr; + +use args::{InputAmount, SdkTypes}; +use namada_core::types::address::Address; +use namada_core::types::dec::Dec; +use namada_core::types::ethereum_events::EthAddress; +use namada_core::types::key::*; +use namada_core::types::masp::{TransferSource, TransferTarget}; +use namada_core::types::token; +use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; +use namada_core::types::transaction::GasLimit; +use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + +use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::io::Io; +use crate::masp::{ShieldedContext, ShieldedUtils}; +use crate::proto::Tx; +use crate::rpc::query_native_token; +use crate::signing::SigningTxData; +use crate::tx::{ + ProcessTxResponse, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, + TX_CHANGE_COMMISSION_WASM, TX_IBC_WASM, TX_INIT_PROPOSAL, + TX_INIT_VALIDATOR_WASM, TX_RESIGN_STEWARD, TX_REVEAL_PK, TX_TRANSFER_WASM, + TX_UNBOND_WASM, TX_UNJAIL_VALIDATOR_WASM, TX_UPDATE_ACCOUNT_WASM, + TX_UPDATE_STEWARD_COMMISSION, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, + VP_USER_WASM, +}; +use crate::wallet::{Wallet, WalletIo, WalletStorage}; + +#[async_trait::async_trait(?Send)] +/// An interface for high-level interaction with the Namada SDK +pub trait Namada<'a>: Sized { + /// A client with async request dispatcher method + type Client: 'a + queries::Client + Sync; + /// Captures the interactive parts of the wallet's functioning + type WalletUtils: 'a + WalletIo + WalletStorage; + /// Abstracts platform specific details away from the logic of shielded pool + /// operations. + type ShieldedUtils: 'a + ShieldedUtils; + /// Captures the input/output streams used by this object + type Io: 'a + Io; + + /// Obtain the client for communicating with the ledger + fn client(&self) -> &'a Self::Client; + + /// Obtain the input/output handle for this context + fn io(&self) -> &'a Self::Io; + + /// Obtain read guard on the wallet + async fn wallet( + &self, + ) -> RwLockReadGuard<&'a mut Wallet>; + + /// Obtain write guard on the wallet + async fn wallet_mut( + &self, + ) -> RwLockWriteGuard<&'a mut Wallet>; + + /// Obtain read guard on the shielded context + async fn shielded( + &self, + ) -> RwLockReadGuard<&'a mut ShieldedContext>; + + /// Obtain write guard on the shielded context + async fn shielded_mut( + &self, + ) -> RwLockWriteGuard<&'a mut ShieldedContext>; + + /// Return the native token + fn native_token(&self) -> Address; + + /// Make a tx builder using no arguments + fn tx_builder(&self) -> args::Tx { + args::Tx { + dry_run: false, + dry_run_wrapper: false, + dump_tx: false, + output_folder: None, + force: false, + broadcast_only: false, + ledger_address: (), + initialized_account_alias: None, + wallet_alias_force: false, + fee_amount: None, + wrapper_fee_payer: None, + fee_token: self.native_token(), + fee_unshield: None, + gas_limit: GasLimit::from(20_000), + expiration: None, + disposable_signing_key: false, + chain_id: None, + signing_keys: vec![], + signatures: vec![], + tx_reveal_code_path: PathBuf::from(TX_REVEAL_PK), + verification_key: None, + password: None, + } + } + + /// Make a TxTransfer builder from the given minimum set of arguments + fn new_transfer( + &self, + source: TransferSource, + target: TransferTarget, + token: Address, + amount: InputAmount, + ) -> args::TxTransfer { + args::TxTransfer { + source, + target, + token, + amount, + tx_code_path: PathBuf::from(TX_TRANSFER_WASM), + tx: self.tx_builder(), + native_token: self.native_token(), + } + } + + /// Make a RevealPK builder from the given minimum set of arguments + fn new_reveal_pk(&self, public_key: common::PublicKey) -> args::RevealPk { + args::RevealPk { + public_key, + tx: self.tx_builder(), + } + } + + /// Make a Bond builder from the given minimum set of arguments + fn new_bond( + &self, + validator: Address, + amount: token::Amount, + ) -> args::Bond { + args::Bond { + validator, + amount, + source: None, + tx: self.tx_builder(), + native_token: self.native_token(), + tx_code_path: PathBuf::from(TX_BOND_WASM), + } + } + + /// Make a Unbond builder from the given minimum set of arguments + fn new_unbond( + &self, + validator: Address, + amount: token::Amount, + ) -> args::Unbond { + args::Unbond { + validator, + amount, + source: None, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_UNBOND_WASM), + } + } + + /// Make a TxIbcTransfer builder from the given minimum set of arguments + fn new_ibc_transfer( + &self, + source: Address, + receiver: String, + token: Address, + amount: InputAmount, + channel_id: ChannelId, + ) -> args::TxIbcTransfer { + args::TxIbcTransfer { + source, + receiver, + token, + amount, + channel_id, + port_id: PortId::from_str("transfer").unwrap(), + timeout_height: None, + timeout_sec_offset: None, + memo: None, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_IBC_WASM), + } + } + + /// Make a InitProposal builder from the given minimum set of arguments + fn new_init_proposal(&self, proposal_data: Vec) -> args::InitProposal { + args::InitProposal { + proposal_data, + native_token: self.native_token(), + is_offline: false, + is_pgf_stewards: false, + is_pgf_funding: false, + tx_code_path: PathBuf::from(TX_INIT_PROPOSAL), + tx: self.tx_builder(), + } + } + + /// Make a TxUpdateAccount builder from the given minimum set of arguments + fn new_update_account(&self, addr: Address) -> args::TxUpdateAccount { + args::TxUpdateAccount { + addr, + vp_code_path: None, + public_keys: vec![], + threshold: None, + tx_code_path: PathBuf::from(TX_UPDATE_ACCOUNT_WASM), + tx: self.tx_builder(), + } + } + + /// Make a VoteProposal builder from the given minimum set of arguments + fn new_vote_prposal( + &self, + vote: String, + voter: Address, + ) -> args::VoteProposal { + args::VoteProposal { + vote, + voter, + proposal_id: None, + is_offline: false, + proposal_data: None, + tx_code_path: PathBuf::from(TX_VOTE_PROPOSAL), + tx: self.tx_builder(), + } + } + + /// Make a CommissionRateChange builder from the given minimum set of + /// arguments + fn new_change_commission_rate( + &self, + rate: Dec, + validator: Address, + ) -> args::CommissionRateChange { + args::CommissionRateChange { + rate, + validator, + tx_code_path: PathBuf::from(TX_CHANGE_COMMISSION_WASM), + tx: self.tx_builder(), + } + } + + /// Make a TxInitValidator builder from the given minimum set of arguments + fn new_init_validator( + &self, + commission_rate: Dec, + max_commission_rate_change: Dec, + ) -> args::TxInitValidator { + args::TxInitValidator { + commission_rate, + max_commission_rate_change, + scheme: SchemeType::Ed25519, + account_keys: vec![], + threshold: None, + consensus_key: None, + eth_cold_key: None, + eth_hot_key: None, + protocol_key: None, + validator_vp_code_path: PathBuf::from(VP_USER_WASM), + unsafe_dont_encrypt: false, + tx_code_path: PathBuf::from(TX_INIT_VALIDATOR_WASM), + tx: self.tx_builder(), + } + } + + /// Make a TxUnjailValidator builder from the given minimum set of arguments + fn new_unjail_validator( + &self, + validator: Address, + ) -> args::TxUnjailValidator { + args::TxUnjailValidator { + validator, + tx_code_path: PathBuf::from(TX_UNJAIL_VALIDATOR_WASM), + tx: self.tx_builder(), + } + } + + /// Make a Withdraw builder from the given minimum set of arguments + fn new_withdraw(&self, validator: Address) -> args::Withdraw { + args::Withdraw { + validator, + source: None, + tx_code_path: PathBuf::from(TX_WITHDRAW_WASM), + tx: self.tx_builder(), + } + } + + /// Make a Withdraw builder from the given minimum set of arguments + fn new_add_erc20_transfer( + &self, + sender: Address, + recipient: EthAddress, + asset: EthAddress, + amount: InputAmount, + ) -> args::EthereumBridgePool { + args::EthereumBridgePool { + sender, + recipient, + asset, + amount, + fee_amount: InputAmount::Unvalidated(token::DenominatedAmount { + amount: token::Amount::default(), + denom: NATIVE_MAX_DECIMAL_PLACES.into(), + }), + fee_payer: None, + fee_token: self.native_token(), + nut: false, + code_path: PathBuf::from(TX_BRIDGE_POOL_WASM), + tx: self.tx_builder(), + } + } + + /// Make a ResignSteward builder from the given minimum set of arguments + fn new_resign_steward(&self, steward: Address) -> args::ResignSteward { + args::ResignSteward { + steward, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_RESIGN_STEWARD), + } + } + + /// Make a UpdateStewardCommission builder from the given minimum set of + /// arguments + fn new_update_steward_rewards( + &self, + steward: Address, + commission: Vec, + ) -> args::UpdateStewardCommission { + args::UpdateStewardCommission { + steward, + commission, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_UPDATE_STEWARD_COMMISSION), + } + } + + /// Make a TxCustom builder from the given minimum set of arguments + fn new_custom(&self, owner: Address) -> args::TxCustom { + args::TxCustom { + owner, + tx: self.tx_builder(), + code_path: None, + data_path: None, + serialized_tx: None, + } + } + + /// Sign the given transaction using the given signing data + async fn sign( + &self, + tx: &mut Tx, + args: &args::Tx, + signing_data: SigningTxData, + ) -> crate::error::Result<()> { + signing::sign_tx(*self.wallet_mut().await, args, tx, signing_data) + } + + /// Process the given transaction using the given flags + async fn submit( + &self, + tx: Tx, + args: &args::Tx, + ) -> crate::error::Result { + tx::process_tx(self, args, tx).await + } +} + +/// Provides convenience methods for common Namada interactions +pub struct NamadaImpl<'a, C, U, V, I> +where + C: queries::Client + Sync, + U: WalletIo, + V: ShieldedUtils, + I: Io, +{ + /// Used to send and receive messages from the ledger + pub client: &'a C, + /// Stores the addresses and keys required for ledger interactions + pub wallet: RwLock<&'a mut Wallet>, + /// Stores the current state of the shielded pool + pub shielded: RwLock<&'a mut ShieldedContext>, + /// Captures the input/output streams used by this object + pub io: &'a I, + /// The address of the native token + native_token: Address, + /// The default builder for a Tx + prototype: args::Tx, +} + +impl<'a, C, U, V, I> NamadaImpl<'a, C, U, V, I> +where + C: queries::Client + Sync, + U: WalletIo, + V: ShieldedUtils, + I: Io, +{ + /// Construct a new Namada context with the given native token address + pub fn native_new( + client: &'a C, + wallet: &'a mut Wallet, + shielded: &'a mut ShieldedContext, + io: &'a I, + native_token: Address, + ) -> Self { + NamadaImpl { + client, + wallet: RwLock::new(wallet), + shielded: RwLock::new(shielded), + io, + native_token: native_token.clone(), + prototype: args::Tx { + dry_run: false, + dry_run_wrapper: false, + dump_tx: false, + output_folder: None, + force: false, + broadcast_only: false, + ledger_address: (), + initialized_account_alias: None, + wallet_alias_force: false, + fee_amount: None, + wrapper_fee_payer: None, + fee_token: native_token, + fee_unshield: None, + gas_limit: GasLimit::from(20_000), + expiration: None, + disposable_signing_key: false, + chain_id: None, + signing_keys: vec![], + signatures: vec![], + tx_reveal_code_path: PathBuf::from(TX_REVEAL_PK), + verification_key: None, + password: None, + }, + } + } + + /// Construct a new Namada context looking up the native token address + pub async fn new( + client: &'a C, + wallet: &'a mut Wallet, + shielded: &'a mut ShieldedContext, + io: &'a I, + ) -> crate::error::Result> { + let native_token = query_native_token(client).await?; + Ok(NamadaImpl::native_new( + client, + wallet, + shielded, + io, + native_token, + )) + } +} + +#[async_trait::async_trait(?Send)] +impl<'a, C, U, V, I> Namada<'a> for NamadaImpl<'a, C, U, V, I> +where + C: queries::Client + Sync, + U: WalletIo + WalletStorage, + V: ShieldedUtils, + I: Io, +{ + type Client = C; + type Io = I; + type ShieldedUtils = V; + type WalletUtils = U; + + /// Obtain the prototypical Tx builder + fn tx_builder(&self) -> args::Tx { + self.prototype.clone() + } + + fn native_token(&self) -> Address { + self.native_token.clone() + } + + fn io(&self) -> &'a Self::Io { + self.io + } + + fn client(&self) -> &'a Self::Client { + self.client + } + + async fn wallet( + &self, + ) -> RwLockReadGuard<&'a mut Wallet> { + self.wallet.read().await + } + + async fn wallet_mut( + &self, + ) -> RwLockWriteGuard<&'a mut Wallet> { + self.wallet.write().await + } + + async fn shielded( + &self, + ) -> RwLockReadGuard<&'a mut ShieldedContext> { + self.shielded.read().await + } + + async fn shielded_mut( + &self, + ) -> RwLockWriteGuard<&'a mut ShieldedContext> { + self.shielded.write().await + } +} + +/// Allow the prototypical Tx builder to be modified +impl<'a, C, U, V, I> args::TxBuilder for NamadaImpl<'a, C, U, V, I> +where + C: queries::Client + Sync, + U: WalletIo, + V: ShieldedUtils, + I: Io, +{ + fn tx(self, func: F) -> Self + where + F: FnOnce(args::Tx) -> args::Tx, + { + Self { + prototype: func(self.prototype), + ..self + } + } +} diff --git a/shared/src/sdk/masp.rs b/sdk/src/masp.rs similarity index 99% rename from shared/src/sdk/masp.rs rename to sdk/src/masp.rs index 96ab27fbd5..b010bde7d7 100644 --- a/shared/src/sdk/masp.rs +++ b/sdk/src/masp.rs @@ -49,8 +49,18 @@ use masp_proofs::bellman::groth16::PreparedVerifyingKey; use masp_proofs::bls12_381::Bls12; use masp_proofs::prover::LocalTxProver; use masp_proofs::sapling::SaplingVerificationContext; -use namada_core::types::token::{Change, MaspDenom}; -use namada_core::types::transaction::AffineCurve; +use namada_core::types::address::{masp, Address}; +use namada_core::types::masp::{ + BalanceOwner, ExtendedViewingKey, PaymentAddress, +}; +use namada_core::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; +use namada_core::types::token; +use namada_core::types::token::{ + Change, MaspDenom, Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, +}; +use namada_core::types::transaction::{ + AffineCurve, EllipticCurve, PairingEngine, WrapperTx, +}; #[cfg(feature = "masp-tx-gen")] use rand_core::{CryptoRng, OsRng, RngCore}; use ripemd::Digest as RipemdDigest; @@ -58,26 +68,16 @@ use ripemd::Digest as RipemdDigest; use sha2::Digest; use thiserror::Error; -use crate::ledger::queries::Client; -use crate::ledger::Namada; +use crate::args::InputAmount; +use crate::error::{EncodingError, Error, PinnedBalanceError, QueryError}; +use crate::io::Io; use crate::proto::Tx; -use crate::sdk::args::InputAmount; -use crate::sdk::error::{EncodingError, Error, PinnedBalanceError, QueryError}; -use crate::sdk::rpc::{query_conversion, query_storage_value}; -use crate::sdk::tx::decode_component; -use crate::sdk::{args, rpc}; +use crate::queries::Client; +use crate::rpc::{query_conversion, query_storage_value}; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; -use crate::types::address::{masp, Address}; -use crate::types::io::Io; -use crate::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; -use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; -use crate::types::token; -use crate::types::token::{ - Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, -}; -use crate::types::transaction::{EllipticCurve, PairingEngine, WrapperTx}; -use crate::{display_line, edisplay_line}; +use crate::tx::decode_component; +use crate::{args, display_line, edisplay_line, rpc, Namada}; /// Env var to point to a dir with MASP parameters. When not specified, /// the default OS specific path is used. diff --git a/shared/src/sdk/queries.rs b/sdk/src/queries/mod.rs similarity index 59% rename from shared/src/sdk/queries.rs rename to sdk/src/queries/mod.rs index a7cb9badb1..fdd5b042a8 100644 --- a/shared/src/sdk/queries.rs +++ b/sdk/src/queries/mod.rs @@ -1,7 +1,207 @@ -//! Query functionality related to the SDK -use std::fmt::{Debug, Display}; +//! Ledger read-only queries can be handled and dispatched via the [`RPC`] +//! defined via `router!` macro. +// Re-export to show in rustdoc! +use namada_core::ledger::storage::traits::StorageHasher; +use namada_core::ledger::storage::{DBIter, DB}; +use namada_core::ledger::storage_api; use namada_core::types::storage::BlockHeight; +pub use shell::Shell; +use shell::SHELL; +pub use types::{ + EncodedResponseQuery, Error, RequestCtx, RequestQuery, ResponseQuery, + Router, +}; +use vp::{Vp, VP}; + +pub use self::shell::eth_bridge::{ + Erc20FlowControl, GenBridgePoolProofReq, GenBridgePoolProofRsp, + TransferToErcArgs, +}; + +#[macro_use] +mod router; +mod shell; +mod types; +pub mod vp; + +// Most commonly expected patterns should be declared first +router! {RPC, + // Shell provides storage read access, block metadata and can dry-run a tx + ( "shell" ) = (sub SHELL), + + // Validity-predicate's specific storage queries + ( "vp" ) = (sub VP), +} + +/// Handle RPC query request in the ledger. On success, returns response with +/// borsh-encoded data. +pub fn handle_path( + ctx: RequestCtx<'_, D, H, V, T>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + RPC.handle(ctx, request) +} + +// Handler helpers: + +/// For queries that only support latest height, check that the given height is +/// not different from latest height, otherwise return an error. +pub fn require_latest_height( + ctx: &RequestCtx<'_, D, H, V, T>, + request: &RequestQuery, +) -> storage_api::Result<()> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + if request.height != BlockHeight(0) + && request.height != ctx.wl_storage.storage.get_last_block_height() + { + return Err(storage_api::Error::new_const( + "This query doesn't support arbitrary block heights, only the \ + latest committed block height ('0' can be used as a special \ + value that means the latest block height)", + )); + } + Ok(()) +} + +/// For queries that do not support proofs, check that proof is not requested, +/// otherwise return an error. +pub fn require_no_proof(request: &RequestQuery) -> storage_api::Result<()> { + if request.prove { + return Err(storage_api::Error::new_const( + "This query doesn't support proofs", + )); + } + Ok(()) +} + +/// For queries that don't use request data, require that there are no data +/// attached. +pub fn require_no_data(request: &RequestQuery) -> storage_api::Result<()> { + if !request.data.is_empty() { + return Err(storage_api::Error::new_const( + "This query doesn't accept request data", + )); + } + Ok(()) +} + +/// Queries testing helpers +#[cfg(any(test, feature = "testing"))] +mod testing { + + use namada_core::ledger::storage::testing::TestWlStorage; + use namada_core::types::storage::BlockHeight; + use tendermint_rpc::Response; + + use super::*; + use crate::events::log::EventLog; + use crate::tendermint_rpc::error::Error as RpcError; + + /// A test client that has direct access to the storage + pub struct TestClient + where + RPC: Router, + { + /// RPC router + pub rpc: RPC, + /// storage + pub wl_storage: TestWlStorage, + /// event log + pub event_log: EventLog, + } + + impl TestClient + where + RPC: Router, + { + #[allow(dead_code)] + /// Initialize a test client for the given root RPC router + pub fn new(rpc: RPC) -> Self { + // Initialize the `TestClient` + let mut wl_storage = TestWlStorage::default(); + + // Initialize mock gas limit + let max_block_gas_key = + namada_core::ledger::parameters::storage::get_max_block_gas_key( + ); + wl_storage + .storage + .write( + &max_block_gas_key, + namada_core::ledger::storage::types::encode( + &20_000_000_u64, + ), + ) + .expect( + "Max block gas parameter must be initialized in storage", + ); + let event_log = EventLog::default(); + Self { + rpc, + wl_storage, + event_log, + } + } + } + + #[cfg_attr(feature = "async-send", async_trait::async_trait)] + #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] + impl Client for TestClient + where + RPC: Router + Sync, + { + type Error = std::io::Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height.unwrap_or_default(); + // Handle a path by invoking the `RPC.handle` directly with the + // borrowed storage + let request = RequestQuery { + data, + path, + height, + prove, + }; + let ctx = RequestCtx { + wl_storage: &self.wl_storage, + event_log: &self.event_log, + vp_wasm_cache: (), + tx_wasm_cache: (), + storage_read_past_height_limit: None, + }; + // TODO: this is a hack to propagate errors to the caller, we should + // really permit error types other than [`std::io::Error`] + self.rpc.handle(ctx, &request).map_err(|err| { + std::io::Error::new(std::io::ErrorKind::Other, err.to_string()) + }) + } + + async fn perform(&self, _request: R) -> Result + where + R: tendermint_rpc::SimpleRequest, + { + Response::from_string("TODO") + } + } +} + +use std::fmt::{Debug, Display}; + use tendermint_rpc::endpoint::{ abci_info, block, block_results, blockchain, commit, consensus_params, consensus_state, health, net_info, status, @@ -9,7 +209,6 @@ use tendermint_rpc::endpoint::{ use tendermint_rpc::query::Query; use tendermint_rpc::{Error as RpcError, Order}; -use crate::ledger::queries::{EncodedResponseQuery, Error}; use crate::tendermint::block::Height; /// A client with async request dispatcher method, which can be used to invoke diff --git a/shared/src/ledger/queries/router.rs b/sdk/src/queries/router.rs similarity index 92% rename from shared/src/ledger/queries/router.rs rename to sdk/src/queries/router.rs index 799a34e5bd..9783d21309 100644 --- a/shared/src/ledger/queries/router.rs +++ b/sdk/src/queries/router.rs @@ -82,16 +82,16 @@ macro_rules! handle_match { break } // Check that the request is not sent with unsupported non-default - $crate::ledger::queries::require_latest_height(&$ctx, $request)?; - $crate::ledger::queries::require_no_proof($request)?; - $crate::ledger::queries::require_no_data($request)?; + $crate::queries::require_latest_height(&$ctx, $request)?; + $crate::queries::require_no_proof($request)?; + $crate::queries::require_no_data($request)?; // If you get a compile error from here with `expected function, found // queries::Storage`, you're probably missing the marker `(sub _)` let data = $handle($ctx, $( $matched_args ),* )?; // Encode the returned data with borsh let data = borsh::BorshSerialize::try_to_vec(&data).into_storage_result()?; - return Ok($crate::ledger::queries::EncodedResponseQuery { + return Ok($crate::queries::EncodedResponseQuery { data, info: Default::default(), proof: None, @@ -401,22 +401,22 @@ macro_rules! pattern_and_handler_to_method { `storage_value` and `storage_prefix`) from `storage_value`."] pub async fn storage_value(&self, client: &CLIENT, data: Option>, - height: Option<$crate::types::storage::BlockHeight>, + height: Option, prove: bool, $( $param: &$param_ty ),* ) -> std::result::Result< - $crate::ledger::queries::ResponseQuery>, - ::Error + $crate::queries::ResponseQuery>, + ::Error > - where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + where CLIENT: $crate::queries::Client + std::marker::Sync { let path = self.storage_value_path( $( $param ),* ); - let $crate::ledger::queries::ResponseQuery { + let $crate::queries::ResponseQuery { data, info, proof } = client.request(path, data, height, prove).await?; - Ok($crate::ledger::queries::ResponseQuery { + Ok($crate::queries::ResponseQuery { data, info, proof, @@ -453,25 +453,25 @@ macro_rules! pattern_and_handler_to_method { `storage_value` and `storage_prefix`) from `" $handle "`."] pub async fn $handle(&self, client: &CLIENT, data: Option>, - height: Option<$crate::types::storage::BlockHeight>, + height: Option, prove: bool, $( $param: &$param_ty ),* ) -> std::result::Result< - $crate::ledger::queries::ResponseQuery<$return_type>, - ::Error + $crate::queries::ResponseQuery<$return_type>, + ::Error > - where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + where CLIENT: $crate::queries::Client + std::marker::Sync { let path = self.[<$handle _path>]( $( $param ),* ); - let $crate::ledger::queries::ResponseQuery { + let $crate::queries::ResponseQuery { data, info, proof } = client.request(path, data, height, prove).await?; let decoded: $return_type = borsh::BorshDeserialize::try_from_slice(&data[..])?; - Ok($crate::ledger::queries::ResponseQuery { + Ok($crate::queries::ResponseQuery { data: decoded, info, proof, @@ -510,9 +510,9 @@ macro_rules! pattern_and_handler_to_method { ) -> std::result::Result< $return_type, - ::Error + ::Error > - where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + where CLIENT: $crate::queries::Client + std::marker::Sync { let path = self.[<$handle _path>]( $( $param ),* ); let data = client.simple_request(path).await?; @@ -783,25 +783,25 @@ macro_rules! router { router_type!{[<$name:camel>] {}, $( $pattern $( -> $return_type )? = $handle ),* } - impl $crate::ledger::queries::Router for [<$name:camel>] { + impl $crate::queries::Router for [<$name:camel>] { // TODO: for some patterns, there's unused assignment of `$end` #[allow(unused_assignments)] - fn internal_handle( + fn internal_handle( &self, - ctx: $crate::ledger::queries::RequestCtx<'_, D, H>, - request: &$crate::ledger::queries::RequestQuery, + ctx: $crate::queries::RequestCtx<'_, D, H, V, T>, + request: &$crate::queries::RequestQuery, start: usize - ) -> $crate::ledger::storage_api::Result<$crate::ledger::queries::EncodedResponseQuery> + ) -> namada_core::ledger::storage_api::Result<$crate::queries::EncodedResponseQuery> where - D: 'static + $crate::ledger::storage::DB + for<'iter> $crate::ledger::storage::DBIter<'iter> + Sync, - H: 'static + $crate::ledger::storage::StorageHasher + Sync, + D: 'static + namada_core::ledger::storage::DB + for<'iter> namada_core::ledger::storage::DBIter<'iter> + Sync, + H: 'static + namada_core::ledger::storage::StorageHasher + Sync, { // Import for `.into_storage_result()` - use $crate::ledger::storage_api::ResultExt; + use namada_core::ledger::storage_api::ResultExt; // Import helper from this crate used inside the macros - use $crate::ledger::queries::router::find_next_slash_index; + use $crate::queries::router::find_next_slash_index; $( // This loop never repeats, it's only used for a breaking @@ -816,7 +816,7 @@ macro_rules! router { )* return Err( - $crate::ledger::queries::router::Error::WrongPath(request.path.clone())) + $crate::queries::router::Error::WrongPath(request.path.clone())) .into_storage_result(); } } @@ -835,14 +835,14 @@ macro_rules! router { #[cfg(test)] mod test_rpc_handlers { use borsh::BorshSerialize; + use namada_core::ledger::storage::{DBIter, StorageHasher, DB}; + use namada_core::ledger::storage_api::{self, ResultExt}; + use namada_core::types::storage::Epoch; + use namada_core::types::token; - use crate::ledger::queries::{ + use crate::queries::{ EncodedResponseQuery, RequestCtx, RequestQuery, ResponseQuery, }; - use crate::ledger::storage::{DBIter, StorageHasher, DB}; - use crate::ledger::storage_api::{self, ResultExt}; - use crate::types::storage::Epoch; - use crate::types::token; /// A little macro to generate boilerplate for RPC handler functions. /// These are implemented to return their name as a String, joined by @@ -854,8 +854,8 @@ mod test_rpc_handlers { // optional trailing comma $(,)? ) => { $( - pub fn $name( - _ctx: RequestCtx<'_, D, H>, + pub fn $name( + _ctx: RequestCtx<'_, D, H, V, T>, $( $( $param: $param_ty ),* )? ) -> storage_api::Result where @@ -901,8 +901,8 @@ mod test_rpc_handlers { /// This handler is hand-written, because the test helper macro doesn't /// support optional args. - pub fn b3iii( - _ctx: RequestCtx<'_, D, H>, + pub fn b3iii( + _ctx: RequestCtx<'_, D, H, V, T>, a1: token::DenominatedAmount, a2: token::DenominatedAmount, a3: Option, @@ -920,8 +920,8 @@ mod test_rpc_handlers { /// This handler is hand-written, because the test helper macro doesn't /// support optional args. - pub fn b3iiii( - _ctx: RequestCtx<'_, D, H>, + pub fn b3iiii( + _ctx: RequestCtx<'_, D, H, V, T>, a1: token::DenominatedAmount, a2: token::DenominatedAmount, a3: Option, @@ -941,8 +941,8 @@ mod test_rpc_handlers { /// This handler is hand-written, because the test helper macro doesn't /// support handlers with `with_options`. - pub fn c( - _ctx: RequestCtx<'_, D, H>, + pub fn c( + _ctx: RequestCtx<'_, D, H, V, T>, _request: &RequestQuery, ) -> storage_api::Result where @@ -963,9 +963,10 @@ mod test_rpc_handlers { /// ``` #[cfg(test)] mod test_rpc { + use namada_core::types::storage::Epoch; + use namada_core::types::token; + use super::test_rpc_handlers::*; - use crate::types::storage::Epoch; - use crate::types::token; // Setup an RPC router for testing router! {TEST_RPC, @@ -1000,14 +1001,14 @@ mod test_rpc { #[cfg(test)] mod test { + use namada_core::ledger::storage_api; + use namada_core::types::storage::Epoch; + use namada_core::types::token; use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; use super::test_rpc::TEST_RPC; - use crate::ledger::queries::testing::TestClient; - use crate::ledger::queries::{RequestCtx, RequestQuery, Router}; - use crate::ledger::storage_api; - use crate::types::storage::Epoch; - use crate::types::token; + use crate::queries::testing::TestClient; + use crate::queries::{RequestCtx, RequestQuery, Router}; /// Test all the possible paths in `TEST_RPC` router. #[tokio::test] @@ -1022,8 +1023,8 @@ mod test { let ctx = RequestCtx { event_log: &client.event_log, wl_storage: &client.wl_storage, - vp_wasm_cache: client.vp_wasm_cache.clone(), - tx_wasm_cache: client.tx_wasm_cache.clone(), + vp_wasm_cache: (), + tx_wasm_cache: (), storage_read_past_height_limit: None, }; let result = TEST_RPC.handle(ctx, &request); diff --git a/shared/src/ledger/queries/shell.rs b/sdk/src/queries/shell.rs similarity index 58% rename from shared/src/ledger/queries/shell.rs rename to sdk/src/queries/shell.rs index a9f272839f..22ba39feda 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/sdk/src/queries/shell.rs @@ -4,27 +4,27 @@ use borsh::{BorshDeserialize, BorshSerialize}; use masp_primitives::asset_type::AssetType; use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::Node; -use namada_core::ledger::storage::LastBlock; +use namada_core::ledger::storage::traits::StorageHasher; +use namada_core::ledger::storage::{DBIter, LastBlock, DB}; +use namada_core::ledger::storage_api::{self, ResultExt, StorageRead}; use namada_core::types::account::{Account, AccountPublicKeysMap}; use namada_core::types::address::Address; use namada_core::types::hash::Hash; -use namada_core::types::storage::{BlockHeight, BlockResults, KeySeg}; +use namada_core::types::storage::{ + self, BlockHeight, BlockResults, Epoch, KeySeg, PrefixValue, +}; use namada_core::types::token::MaspDenom; +#[cfg(any(test, feature = "async-client"))] +use namada_core::types::transaction::TxResult; use self::eth_bridge::{EthBridge, ETH_BRIDGE}; +use crate::events::log::dumb_queries; +use crate::events::{Event, EventType}; use crate::ibc::core::ics04_channel::packet::Sequence; use crate::ibc::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; -use crate::ledger::events::log::dumb_queries; -use crate::ledger::events::{Event, EventType}; -use crate::ledger::queries::types::{RequestCtx, RequestQuery}; -use crate::ledger::queries::{require_latest_height, EncodedResponseQuery}; -use crate::ledger::storage::traits::StorageHasher; -use crate::ledger::storage::{DBIter, DB}; -use crate::ledger::storage_api::{self, ResultExt, StorageRead}; +use crate::queries::types::{RequestCtx, RequestQuery}; +use crate::queries::{require_latest_height, EncodedResponseQuery}; use crate::tendermint::merkle::proof::Proof; -use crate::types::storage::{self, Epoch, PrefixValue}; -#[cfg(any(test, feature = "async-client"))] -use crate::types::transaction::TxResult; type Conversion = ( Address, @@ -94,114 +94,20 @@ router! {SHELL, // Handlers: -#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] -fn dry_run_tx( - mut ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, +fn dry_run_tx( + _ctx: RequestCtx<'_, D, H, V, T>, + _request: &RequestQuery, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - use namada_core::ledger::gas::{Gas, GasMetering, TxGasMeter}; - use namada_core::ledger::storage::TempWlStorage; - use namada_core::types::transaction::DecryptedTx; - - use crate::ledger::protocol::{self, ShellParams}; - use crate::proto::Tx; - use crate::types::storage::TxIndex; - use crate::types::transaction::wrapper::wrapper_tx::PairingEngine; - use crate::types::transaction::{AffineCurve, EllipticCurve, TxType}; - - let mut tx = Tx::try_from(&request.data[..]).into_storage_result()?; - tx.validate_tx().into_storage_result()?; - - let mut temp_wl_storage = TempWlStorage::new(&ctx.wl_storage.storage); - let mut cumulated_gas = Gas::default(); - - // Wrapper dry run to allow estimating the gas cost of a transaction - let mut tx_gas_meter = match tx.header().tx_type { - TxType::Wrapper(wrapper) => { - let mut tx_gas_meter = - TxGasMeter::new(wrapper.gas_limit.to_owned()); - protocol::apply_wrapper_tx( - &wrapper, - None, - &request.data, - ShellParams::new( - &mut tx_gas_meter, - &mut temp_wl_storage, - &mut ctx.vp_wasm_cache, - &mut ctx.tx_wasm_cache, - ), - None, - ) - .into_storage_result()?; - - temp_wl_storage.write_log.commit_tx(); - cumulated_gas = tx_gas_meter.get_tx_consumed_gas(); - - // NOTE: the encryption key for a dry-run should always be an - // hardcoded, dummy one - let _privkey = - ::G2Affine::prime_subgroup_generator(); - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - TxGasMeter::new_from_sub_limit(tx_gas_meter.get_available_gas()) - } - TxType::Protocol(_) | TxType::Decrypted(_) => { - // If dry run only the inner tx, use the max block gas as the gas - // limit - TxGasMeter::new( - namada_core::ledger::gas::get_max_block_gas(ctx.wl_storage) - .unwrap() - .into(), - ) - } - TxType::Raw => { - // Cast tx to a decrypted for execution - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - - // If dry run only the inner tx, use the max block gas as the gas - // limit - TxGasMeter::new( - namada_core::ledger::gas::get_max_block_gas(ctx.wl_storage) - .unwrap() - .into(), - ) - } - }; - - let mut data = protocol::apply_wasm_tx( - tx, - &TxIndex(0), - ShellParams::new( - &mut tx_gas_meter, - &mut temp_wl_storage, - &mut ctx.vp_wasm_cache, - &mut ctx.tx_wasm_cache, - ), - ) - .into_storage_result()?; - cumulated_gas = cumulated_gas - .checked_add(tx_gas_meter.get_tx_consumed_gas()) - .ok_or(namada_core::ledger::storage_api::Error::SimpleMessage( - "Overflow in gas", - ))?; - // Account gas for both inner and wrapper (if available) - data.gas_used = cumulated_gas; - // NOTE: the keys changed by the wrapper transaction (if any) are not - // returned from this function - let data = data.try_to_vec().into_storage_result()?; - Ok(EncodedResponseQuery { - data, - proof: None, - info: Default::default(), - }) + unimplemented!("Dry running tx requires \"wasm-runtime\" feature.") } /// Query to read block results from storage -pub fn read_results( - ctx: RequestCtx<'_, D, H>, +pub fn read_results( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -237,8 +143,8 @@ where } /// Query to read a conversion from storage -fn read_conversion( - ctx: RequestCtx<'_, D, H>, +fn read_conversion( + ctx: RequestCtx<'_, D, H, V, T>, asset_type: AssetType, ) -> storage_api::Result where @@ -270,19 +176,9 @@ where } } -#[cfg(not(all(feature = "wasm-runtime", feature = "ferveo-tpke")))] -fn dry_run_tx( - _ctx: RequestCtx<'_, D, H>, - _request: &RequestQuery, -) -> storage_api::Result -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - unimplemented!("Dry running tx requires \"wasm-runtime\" feature.") -} - -fn epoch(ctx: RequestCtx<'_, D, H>) -> storage_api::Result +fn epoch( + ctx: RequestCtx<'_, D, H, V, T>, +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -291,7 +187,9 @@ where Ok(data) } -fn native_token(ctx: RequestCtx<'_, D, H>) -> storage_api::Result
+fn native_token( + ctx: RequestCtx<'_, D, H, V, T>, +) -> storage_api::Result
where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -300,8 +198,8 @@ where Ok(data) } -fn epoch_at_height( - ctx: RequestCtx<'_, D, H>, +fn epoch_at_height( + ctx: RequestCtx<'_, D, H, V, T>, height: BlockHeight, ) -> storage_api::Result> where @@ -311,8 +209,8 @@ where Ok(ctx.wl_storage.storage.block.pred_epochs.get_epoch(height)) } -fn last_block( - ctx: RequestCtx<'_, D, H>, +fn last_block( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -325,8 +223,8 @@ where /// borsh-encoded types, it is safe to check `data.is_empty()` to see if the /// value was found, except for unit - see `fn query_storage_value` in /// `apps/src/lib/client/rpc.rs` for unit type handling via `storage_has_key`. -fn storage_value( - ctx: RequestCtx<'_, D, H>, +fn storage_value( + ctx: RequestCtx<'_, D, H, V, T>, request: &RequestQuery, storage_key: storage::Key, ) -> storage_api::Result @@ -392,8 +290,8 @@ where } } -fn storage_prefix( - ctx: RequestCtx<'_, D, H>, +fn storage_prefix( + ctx: RequestCtx<'_, D, H, V, T>, request: &RequestQuery, storage_key: storage::Key, ) -> storage_api::Result @@ -435,8 +333,8 @@ where }) } -fn storage_has_key( - ctx: RequestCtx<'_, D, H>, +fn storage_has_key( + ctx: RequestCtx<'_, D, H, V, T>, storage_key: storage::Key, ) -> storage_api::Result where @@ -447,8 +345,8 @@ where Ok(data) } -fn accepted( - ctx: RequestCtx<'_, D, H>, +fn accepted( + ctx: RequestCtx<'_, D, H, V, T>, tx_hash: Hash, ) -> storage_api::Result> where @@ -464,8 +362,8 @@ where .cloned()) } -fn applied( - ctx: RequestCtx<'_, D, H>, +fn applied( + ctx: RequestCtx<'_, D, H, V, T>, tx_hash: Hash, ) -> storage_api::Result> where @@ -481,8 +379,8 @@ where .cloned()) } -fn ibc_client_update( - ctx: RequestCtx<'_, D, H>, +fn ibc_client_update( + ctx: RequestCtx<'_, D, H, V, T>, client_id: ClientId, consensus_height: BlockHeight, ) -> storage_api::Result> @@ -502,8 +400,8 @@ where .cloned()) } -fn ibc_packet( - ctx: RequestCtx<'_, D, H>, +fn ibc_packet( + ctx: RequestCtx<'_, D, H, V, T>, event_type: EventType, source_port: PortId, source_channel: ChannelId, @@ -531,8 +429,8 @@ where .cloned()) } -fn account( - ctx: RequestCtx<'_, D, H>, +fn account( + ctx: RequestCtx<'_, D, H, V, T>, owner: Address, ) -> storage_api::Result> where @@ -557,8 +455,8 @@ where } } -fn revealed( - ctx: RequestCtx<'_, D, H>, +fn revealed( + ctx: RequestCtx<'_, D, H, V, T>, owner: Address, ) -> storage_api::Result where @@ -573,18 +471,9 @@ where #[cfg(test)] mod test { - use borsh::{BorshDeserialize, BorshSerialize}; - use namada_test_utils::TestWasms; - - use crate::ledger::queries::testing::TestClient; - use crate::ledger::queries::RPC; - use crate::ledger::storage_api::{self, StorageWrite}; - use crate::proto::{Code, Data, Tx}; - use crate::types::hash::Hash; - use crate::types::storage::Key; - use crate::types::transaction::decrypted::DecryptedTx; - use crate::types::transaction::TxType; - use crate::types::{address, token}; + use namada_core::types::{address, token}; + + use crate::queries::RPC; #[test] fn test_shell_queries_router_paths() { @@ -606,106 +495,4 @@ mod test { let path = RPC.shell().storage_has_key_path(&key); assert_eq!(format!("/shell/has_key/{}", key), path); } - - #[tokio::test] - async fn test_shell_queries_router_with_client() -> storage_api::Result<()> - { - // Initialize the `TestClient` - let mut client = TestClient::new(RPC); - // store the wasm code - let tx_no_op = TestWasms::TxNoOp.read_bytes(); - let tx_hash = Hash::sha256(&tx_no_op); - let key = Key::wasm_code(&tx_hash); - let len_key = Key::wasm_code_len(&tx_hash); - client.wl_storage.storage.write(&key, &tx_no_op).unwrap(); - client - .wl_storage - .storage - .write(&len_key, (tx_no_op.len() as u64).try_to_vec().unwrap()) - .unwrap(); - - // Request last committed epoch - let read_epoch = RPC.shell().epoch(&client).await.unwrap(); - let current_epoch = client.wl_storage.storage.last_epoch; - assert_eq!(current_epoch, read_epoch); - - // Request dry run tx - let mut outer_tx = - Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); - outer_tx.header.chain_id = client.wl_storage.storage.chain_id.clone(); - outer_tx.set_code(Code::from_hash(tx_hash)); - outer_tx.set_data(Data::new(vec![])); - let tx_bytes = outer_tx.to_bytes(); - let result = RPC - .shell() - .dry_run_tx(&client, Some(tx_bytes), None, false) - .await - .unwrap(); - assert!(result.data.is_accepted()); - - // Request storage value for a balance key ... - let token_addr = address::testing::established_address_1(); - let owner = address::testing::established_address_2(); - let balance_key = token::balance_key(&token_addr, &owner); - // ... there should be no value yet. - let read_balance = RPC - .shell() - .storage_value(&client, None, None, false, &balance_key) - .await - .unwrap(); - assert!(read_balance.data.is_empty()); - - // Request storage prefix iterator - let balance_prefix = token::balance_prefix(&token_addr); - let read_balances = RPC - .shell() - .storage_prefix(&client, None, None, false, &balance_prefix) - .await - .unwrap(); - assert!(read_balances.data.is_empty()); - - // Request storage has key - let has_balance_key = RPC - .shell() - .storage_has_key(&client, &balance_key) - .await - .unwrap(); - assert!(!has_balance_key); - - // Then write some balance ... - let balance = token::Amount::native_whole(1000); - StorageWrite::write(&mut client.wl_storage, &balance_key, balance)?; - // It has to be committed to be visible in a query - client.wl_storage.commit_tx(); - client.wl_storage.commit_block().unwrap(); - // ... there should be the same value now - let read_balance = RPC - .shell() - .storage_value(&client, None, None, false, &balance_key) - .await - .unwrap(); - assert_eq!( - balance, - token::Amount::try_from_slice(&read_balance.data).unwrap() - ); - - // Request storage prefix iterator - let balance_prefix = token::balance_prefix(&token_addr); - let read_balances = RPC - .shell() - .storage_prefix(&client, None, None, false, &balance_prefix) - .await - .unwrap(); - assert_eq!(read_balances.data.len(), 1); - - // Request storage has key - let has_balance_key = RPC - .shell() - .storage_has_key(&client, &balance_key) - .await - .unwrap(); - assert!(has_balance_key); - - Ok(()) - } } diff --git a/shared/src/ledger/queries/shell/eth_bridge.rs b/sdk/src/queries/shell/eth_bridge.rs similarity index 96% rename from shared/src/ledger/queries/shell/eth_bridge.rs rename to sdk/src/queries/shell/eth_bridge.rs index 0bbc0aa679..6baf649992 100644 --- a/shared/src/ledger/queries/shell/eth_bridge.rs +++ b/sdk/src/queries/shell/eth_bridge.rs @@ -12,12 +12,17 @@ use namada_core::ledger::storage_api::{ self, CustomError, ResultExt, StorageRead, }; use namada_core::types::address::Address; -use namada_core::types::eth_bridge_pool::PendingTransferAppendix; +use namada_core::types::eth_abi::{Encode, EncodeCell}; +use namada_core::types::eth_bridge_pool::{ + PendingTransfer, PendingTransferAppendix, +}; use namada_core::types::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, }; use namada_core::types::ethereum_structs; -use namada_core::types::storage::{BlockHeight, DbKeySeg, Key}; +use namada_core::types::keccak::KeccakHash; +use namada_core::types::storage::MembershipProof::BridgePool; +use namada_core::types::storage::{BlockHeight, DbKeySeg, Epoch, Key}; use namada_core::types::token::Amount; use namada_core::types::vote_extensions::validator_set_update::{ ValidatorSetArgs, VotingPowersMap, @@ -36,12 +41,7 @@ use namada_ethereum_bridge::storage::{ use namada_proof_of_stake::pos_queries::PosQueries; use crate::eth_bridge::ethers::abi::AbiDecode; -use crate::ledger::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; -use crate::types::eth_abi::{Encode, EncodeCell}; -use crate::types::eth_bridge_pool::PendingTransfer; -use crate::types::keccak::KeccakHash; -use crate::types::storage::Epoch; -use crate::types::storage::MembershipProof::BridgePool; +use crate::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; /// Contains information about the flow control of some ERC20 /// wrapped asset. @@ -167,8 +167,8 @@ router! {ETH_BRIDGE, /// Read the total supply and respective cap of some wrapped /// ERC20 token in Namada. -fn get_erc20_flow_control( - ctx: RequestCtx<'_, D, H>, +fn get_erc20_flow_control( + ctx: RequestCtx<'_, D, H, V, T>, asset: EthAddress, ) -> storage_api::Result where @@ -191,9 +191,9 @@ where } /// Helper function to read a smart contract from storage. -fn read_contract( +fn read_contract( key: &Key, - ctx: RequestCtx<'_, D, H>, + ctx: RequestCtx<'_, D, H, V, U>, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -212,8 +212,8 @@ where /// Read the address and version of the Ethereum bridge's Bridge /// smart contract. #[inline] -fn read_bridge_contract( - ctx: RequestCtx<'_, D, H>, +fn read_bridge_contract( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -225,8 +225,8 @@ where /// Read the address of the Ethereum bridge's native ERC20 /// smart contract. #[inline] -fn read_native_erc20_contract( - ctx: RequestCtx<'_, D, H>, +fn read_native_erc20_contract( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -237,8 +237,8 @@ where /// Read the current contents of the Ethereum bridge /// pool. -fn read_ethereum_bridge_pool( - ctx: RequestCtx<'_, D, H>, +fn read_ethereum_bridge_pool( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -252,8 +252,8 @@ where /// Read the contents of the Ethereum bridge /// pool covered by the latest signed root. -fn read_signed_ethereum_bridge_pool( - ctx: RequestCtx<'_, D, H>, +fn read_signed_ethereum_bridge_pool( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -272,9 +272,9 @@ where } /// Read the Ethereum bridge pool contents at a specified height. -fn read_ethereum_bridge_pool_at_height( +fn read_ethereum_bridge_pool_at_height( height: BlockHeight, - ctx: RequestCtx<'_, D, H>, + ctx: RequestCtx<'_, D, H, V, T>, ) -> Vec where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -311,8 +311,8 @@ where /// Generate a merkle proof for the inclusion of the /// requested transfers in the Ethereum bridge pool. -fn generate_bridge_pool_proof( - ctx: RequestCtx<'_, D, H>, +fn generate_bridge_pool_proof( + ctx: RequestCtx<'_, D, H, V, T>, request: &RequestQuery, ) -> storage_api::Result where @@ -444,8 +444,8 @@ where /// Iterates over all ethereum events /// and returns the amount of voting power /// backing each `TransferToEthereum` event. -fn transfer_to_ethereum_progress( - ctx: RequestCtx<'_, D, H>, +fn transfer_to_ethereum_progress( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -518,8 +518,8 @@ where /// /// This method may fail if a complete proof (i.e. with more than /// 2/3 of the total voting power behind it) is not available yet. -fn read_valset_upd_proof( - ctx: RequestCtx<'_, D, H>, +fn read_valset_upd_proof( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Epoch, ) -> storage_api::Result>> where @@ -568,8 +568,8 @@ where /// /// This method may fail if no set of validators exists yet, /// at that [`Epoch`]. -fn read_bridge_valset( - ctx: RequestCtx<'_, D, H>, +fn read_bridge_valset( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Epoch, ) -> storage_api::Result where @@ -598,8 +598,8 @@ where /// /// This method may fail if no set of validators exists yet, /// at that [`Epoch`]. -fn read_governance_valset( - ctx: RequestCtx<'_, D, H>, +fn read_governance_valset( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Epoch, ) -> storage_api::Result where @@ -626,8 +626,8 @@ where /// Retrieve the consensus validator voting powers at the /// given [`BlockHeight`]. -fn voting_powers_at_height( - ctx: RequestCtx<'_, D, H>, +fn voting_powers_at_height( + ctx: RequestCtx<'_, D, H, V, T>, height: BlockHeight, ) -> storage_api::Result where @@ -645,8 +645,8 @@ where /// Retrieve the consensus validator voting powers at the /// given [`Epoch`]. -fn voting_powers_at_epoch( - ctx: RequestCtx<'_, D, H>, +fn voting_powers_at_epoch( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Epoch, ) -> storage_api::Result where @@ -678,7 +678,13 @@ mod test_ethbridge_router { use namada_core::ledger::eth_bridge::storage::whitelist; use namada_core::ledger::storage::mockdb::MockDBWriteBatch; use namada_core::ledger::storage_api::StorageWrite; + use namada_core::types::address::nam; use namada_core::types::address::testing::established_address_1; + use namada_core::types::eth_abi::Encode; + use namada_core::types::eth_bridge_pool::{ + GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, + }; + use namada_core::types::ethereum_events::EthAddress; use namada_core::types::storage::BlockHeight; use namada_core::types::vote_extensions::validator_set_update; use namada_core::types::vote_extensions::validator_set_update::{ @@ -693,14 +699,8 @@ mod test_ethbridge_router { use super::test_utils::bertha_address; use super::*; - use crate::ledger::queries::testing::TestClient; - use crate::ledger::queries::RPC; - use crate::types::address::nam; - use crate::types::eth_abi::Encode; - use crate::types::eth_bridge_pool::{ - GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, - }; - use crate::types::ethereum_events::EthAddress; + use crate::queries::testing::TestClient; + use crate::queries::RPC; /// Test that reading the bridge validator set works. #[tokio::test] diff --git a/shared/src/ledger/queries/types.rs b/sdk/src/queries/types.rs similarity index 88% rename from shared/src/ledger/queries/types.rs rename to sdk/src/queries/types.rs index 235bf76e99..7283982099 100644 --- a/shared/src/ledger/queries/types.rs +++ b/sdk/src/queries/types.rs @@ -1,21 +1,16 @@ use std::fmt::Debug; -use namada_core::ledger::storage::WlStorage; +use namada_core::ledger::storage::{DBIter, StorageHasher, WlStorage, DB}; +use namada_core::ledger::storage_api; +use namada_core::types::storage::BlockHeight; use thiserror::Error; -use crate::ledger::events::log::EventLog; -use crate::ledger::storage::{DBIter, StorageHasher, DB}; -use crate::ledger::storage_api; +use crate::events::log::EventLog; use crate::tendermint::merkle::proof::Proof; -use crate::types::storage::BlockHeight; -#[cfg(feature = "wasm-runtime")] -use crate::vm::wasm::{TxCache, VpCache}; -#[cfg(feature = "wasm-runtime")] -use crate::vm::WasmCacheRoAccess; /// A request context provides read-only access to storage and WASM compilation /// caches to request handlers. #[derive(Debug, Clone)] -pub struct RequestCtx<'shell, D, H> +pub struct RequestCtx<'shell, D, H, VpCache, TxCache> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -25,11 +20,9 @@ where /// Log of events emitted by `FinalizeBlock` ABCI calls. pub event_log: &'shell EventLog, /// Cache of VP wasm compiled artifacts. - #[cfg(feature = "wasm-runtime")] - pub vp_wasm_cache: VpCache, + pub vp_wasm_cache: VpCache, /// Cache of transaction wasm compiled artifacts. - #[cfg(feature = "wasm-runtime")] - pub tx_wasm_cache: TxCache, + pub tx_wasm_cache: TxCache, /// Taken from config `storage_read_past_height_limit`. When set, will /// limit the how many block heights in the past can the storage be /// queried for reading values. @@ -41,9 +34,9 @@ where pub trait Router { /// Handle a given request using the provided context. This must be invoked /// on the root `Router` to be able to match the `request.path` fully. - fn handle( + fn handle( &self, - ctx: RequestCtx<'_, D, H>, + ctx: RequestCtx<'_, D, H, V, T>, request: &RequestQuery, ) -> storage_api::Result where @@ -59,9 +52,9 @@ pub trait Router { /// Handle a given request using the provided context, starting to /// try to match `request.path` against the `Router`'s patterns at the /// given `start` offset. - fn internal_handle( + fn internal_handle( &self, - ctx: RequestCtx<'_, D, H>, + ctx: RequestCtx<'_, D, H, V, T>, request: &RequestQuery, start: usize, ) -> storage_api::Result diff --git a/shared/src/ledger/queries/vp/governance.rs b/sdk/src/queries/vp/governance.rs similarity index 75% rename from shared/src/ledger/queries/vp/governance.rs rename to sdk/src/queries/vp/governance.rs index 92c3495f24..1e3a5a8ece 100644 --- a/shared/src/ledger/queries/vp/governance.rs +++ b/sdk/src/queries/vp/governance.rs @@ -1,12 +1,12 @@ // cd shared && cargo expand ledger::queries::vp::governance +use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_core::ledger::governance::storage::proposal::StorageProposal; use namada_core::ledger::governance::utils::Vote; +use namada_core::ledger::storage::{DBIter, StorageHasher, DB}; +use namada_core::ledger::storage_api; -use crate::core::ledger::governance::parameters::GovernanceParameters; -use crate::ledger::queries::types::RequestCtx; -use crate::ledger::storage::{DBIter, StorageHasher, DB}; -use crate::ledger::storage_api; +use crate::queries::types::RequestCtx; // Governance queries router! {GOV, @@ -16,8 +16,8 @@ router! {GOV, } /// Find if the given address belongs to a validator account. -fn proposal_id( - ctx: RequestCtx<'_, D, H>, +fn proposal_id( + ctx: RequestCtx<'_, D, H, V, T>, id: u64, ) -> storage_api::Result> where @@ -28,8 +28,8 @@ where } /// Find if the given address belongs to a validator account. -fn proposal_id_votes( - ctx: RequestCtx<'_, D, H>, +fn proposal_id_votes( + ctx: RequestCtx<'_, D, H, V, T>, id: u64, ) -> storage_api::Result> where @@ -40,8 +40,8 @@ where } /// Get the governane parameters -fn parameters( - ctx: RequestCtx<'_, D, H>, +fn parameters( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, diff --git a/shared/src/ledger/queries/vp/mod.rs b/sdk/src/queries/vp/mod.rs similarity index 100% rename from shared/src/ledger/queries/vp/mod.rs rename to sdk/src/queries/vp/mod.rs diff --git a/shared/src/ledger/queries/vp/pgf.rs b/sdk/src/queries/vp/pgf.rs similarity index 76% rename from shared/src/ledger/queries/vp/pgf.rs rename to sdk/src/queries/vp/pgf.rs index 8f5b14c91b..9e8ea2f5cc 100644 --- a/shared/src/ledger/queries/vp/pgf.rs +++ b/sdk/src/queries/vp/pgf.rs @@ -1,11 +1,11 @@ use namada_core::ledger::governance::storage::proposal::StoragePgfFunding; +use namada_core::ledger::pgf::parameters::PgfParameters; use namada_core::ledger::pgf::storage::steward::StewardDetail; +use namada_core::ledger::storage::{DBIter, StorageHasher, DB}; +use namada_core::ledger::storage_api; use namada_core::types::address::Address; -use crate::core::ledger::pgf::parameters::PgfParameters; -use crate::ledger::queries::types::RequestCtx; -use crate::ledger::storage::{DBIter, StorageHasher, DB}; -use crate::ledger::storage_api; +use crate::queries::types::RequestCtx; // PoS validity predicate queries router! {PGF, @@ -16,8 +16,8 @@ router! {PGF, } /// Query the currect pgf steward set -fn stewards( - ctx: RequestCtx<'_, D, H>, +fn stewards( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -27,8 +27,8 @@ where } /// Check if an address is a pgf steward -fn is_steward( - ctx: RequestCtx<'_, D, H>, +fn is_steward( + ctx: RequestCtx<'_, D, H, V, T>, address: Address, ) -> storage_api::Result where @@ -39,8 +39,8 @@ where } /// Query the continous pgf fundings -fn funding( - ctx: RequestCtx<'_, D, H>, +fn funding( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -50,8 +50,8 @@ where } /// Query the PGF parameters -fn parameters( - ctx: RequestCtx<'_, D, H>, +fn parameters( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, diff --git a/shared/src/ledger/queries/vp/pos.rs b/sdk/src/queries/vp/pos.rs similarity index 90% rename from shared/src/ledger/queries/vp/pos.rs rename to sdk/src/queries/vp/pos.rs index e78bff146b..875779447d 100644 --- a/shared/src/ledger/queries/vp/pos.rs +++ b/sdk/src/queries/vp/pos.rs @@ -3,8 +3,13 @@ use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::ledger::storage::{DBIter, StorageHasher, DB}; +use namada_core::ledger::storage_api; use namada_core::ledger::storage_api::collections::lazy_map; use namada_core::ledger::storage_api::OptionExt; +use namada_core::types::address::Address; +use namada_core::types::storage::Epoch; +use namada_core::types::token; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{ BondId, BondsAndUnbondsDetail, BondsAndUnbondsDetails, CommissionPair, @@ -21,12 +26,7 @@ use namada_proof_of_stake::{ validator_slashes_handle, validator_state_handle, }; -use crate::ledger::queries::types::RequestCtx; -use crate::ledger::storage::{DBIter, StorageHasher, DB}; -use crate::ledger::storage_api; -use crate::types::address::Address; -use crate::types::storage::Epoch; -use crate::types::token; +use crate::queries::types::RequestCtx; type AmountPair = (token::Amount, token::Amount); @@ -148,7 +148,9 @@ impl Enriched { // Handlers that implement the functions via `trait StorageRead`: /// Get the PoS parameters -fn pos_params(ctx: RequestCtx<'_, D, H>) -> storage_api::Result +fn pos_params( + ctx: RequestCtx<'_, D, H, V, T>, +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -157,8 +159,8 @@ where } /// Find if the given address belongs to a validator account. -fn is_validator( - ctx: RequestCtx<'_, D, H>, +fn is_validator( + ctx: RequestCtx<'_, D, H, V, T>, addr: Address, ) -> storage_api::Result where @@ -169,8 +171,8 @@ where } /// Find if the given address is a delegator -fn is_delegator( - ctx: RequestCtx<'_, D, H>, +fn is_delegator( + ctx: RequestCtx<'_, D, H, V, T>, addr: Address, epoch: Option, ) -> storage_api::Result @@ -183,8 +185,8 @@ where /// Get all the validator known addresses. These validators may be in any state, /// e.g. consensus, below-capacity, inactive or jailed. -fn validator_addresses( - ctx: RequestCtx<'_, D, H>, +fn validator_addresses( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Option, ) -> storage_api::Result> where @@ -196,8 +198,8 @@ where } /// Get the validator commission rate and max commission rate change per epoch -fn validator_commission( - ctx: RequestCtx<'_, D, H>, +fn validator_commission( + ctx: RequestCtx<'_, D, H, V, T>, validator: Address, epoch: Option, ) -> storage_api::Result> @@ -227,8 +229,8 @@ where } /// Get the validator state -fn validator_state( - ctx: RequestCtx<'_, D, H>, +fn validator_state( + ctx: RequestCtx<'_, D, H, V, T>, validator: Address, epoch: Option, ) -> storage_api::Result> @@ -251,8 +253,8 @@ where /// to their address. /// Returns `None` when the given address is not a validator address. For a /// validator with `0` stake, this returns `Ok(token::Amount::zero())`. -fn validator_stake( - ctx: RequestCtx<'_, D, H>, +fn validator_stake( + ctx: RequestCtx<'_, D, H, V, T>, validator: Address, epoch: Option, ) -> storage_api::Result> @@ -266,8 +268,8 @@ where } /// Get all the validator in the consensus set with their bonded stake. -fn consensus_validator_set( - ctx: RequestCtx<'_, D, H>, +fn consensus_validator_set( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Option, ) -> storage_api::Result> where @@ -279,8 +281,8 @@ where } /// Get all the validator in the below-capacity set with their bonded stake. -fn below_capacity_validator_set( - ctx: RequestCtx<'_, D, H>, +fn below_capacity_validator_set( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Option, ) -> storage_api::Result> where @@ -295,8 +297,8 @@ where } /// Get the total stake in PoS system at the given epoch or current when `None`. -fn total_stake( - ctx: RequestCtx<'_, D, H>, +fn total_stake( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Option, ) -> storage_api::Result where @@ -308,8 +310,8 @@ where read_total_stake(ctx.wl_storage, ¶ms, epoch) } -fn bond_deltas( - ctx: RequestCtx<'_, D, H>, +fn bond_deltas( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, ) -> storage_api::Result> @@ -322,8 +324,8 @@ where /// Find the sum of bond amount up the given epoch when `Some`, or up to the /// pipeline length parameter offset otherwise -fn bond( - ctx: RequestCtx<'_, D, H>, +fn bond( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, epoch: Option, @@ -343,8 +345,8 @@ where .ok_or_err_msg("Cannot find bond") } -fn bond_with_slashing( - ctx: RequestCtx<'_, D, H>, +fn bond_with_slashing( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, epoch: Option, @@ -359,8 +361,8 @@ where bond_amount(ctx.wl_storage, &bond_id, epoch) } -fn unbond( - ctx: RequestCtx<'_, D, H>, +fn unbond( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, ) -> storage_api::Result> @@ -384,8 +386,8 @@ where .collect() } -fn unbond_with_slashing( - ctx: RequestCtx<'_, D, H>, +fn unbond_with_slashing( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, ) -> storage_api::Result> @@ -410,8 +412,8 @@ where .collect() } -fn withdrawable_tokens( - ctx: RequestCtx<'_, D, H>, +fn withdrawable_tokens( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, epoch: Option, @@ -439,8 +441,8 @@ where Ok(total) } -fn bonds_and_unbonds( - ctx: RequestCtx<'_, D, H>, +fn bonds_and_unbonds( + ctx: RequestCtx<'_, D, H, V, T>, source: Option
, validator: Option
, ) -> storage_api::Result @@ -453,8 +455,8 @@ where /// Find all the validator addresses to whom the given `owner` address has /// some delegation in any epoch -fn delegation_validators( - ctx: RequestCtx<'_, D, H>, +fn delegation_validators( + ctx: RequestCtx<'_, D, H, V, T>, owner: Address, ) -> storage_api::Result> where @@ -466,8 +468,8 @@ where /// Find all the validator addresses to whom the given `owner` address has /// some delegation in any epoch -fn delegations( - ctx: RequestCtx<'_, D, H>, +fn delegations( + ctx: RequestCtx<'_, D, H, V, T>, owner: Address, epoch: Option, ) -> storage_api::Result> @@ -480,8 +482,8 @@ where } /// Validator slashes -fn validator_slashes( - ctx: RequestCtx<'_, D, H>, +fn validator_slashes( + ctx: RequestCtx<'_, D, H, V, T>, validator: Address, ) -> storage_api::Result> where @@ -493,8 +495,8 @@ where } /// All slashes -fn slashes( - ctx: RequestCtx<'_, D, H>, +fn slashes( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result>> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -504,8 +506,8 @@ where } /// Enqueued slashes -fn enqueued_slashes( - ctx: RequestCtx<'_, D, H>, +fn enqueued_slashes( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result>>> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -516,8 +518,8 @@ where } /// Native validator address by looking up the Tendermint address -fn validator_by_tm_addr( - ctx: RequestCtx<'_, D, H>, +fn validator_by_tm_addr( + ctx: RequestCtx<'_, D, H, V, T>, tm_addr: String, ) -> storage_api::Result> where @@ -531,8 +533,7 @@ where #[cfg(any(test, feature = "async-client"))] pub mod client_only_methods { use super::*; - use crate::ledger::queries::RPC; - use crate::sdk::queries::Client; + use crate::queries::{Client, RPC}; impl Pos { /// Get bonds and unbonds with all details (slashes and rewards, if any) diff --git a/shared/src/ledger/queries/vp/token.rs b/sdk/src/queries/vp/token.rs similarity index 87% rename from shared/src/ledger/queries/vp/token.rs rename to sdk/src/queries/vp/token.rs index 3b99cb0fda..0a2a5df509 100644 --- a/shared/src/ledger/queries/vp/token.rs +++ b/sdk/src/queries/vp/token.rs @@ -6,7 +6,7 @@ use namada_core::ledger::storage_api::token::read_denom; use namada_core::types::address::Address; use namada_core::types::token; -use crate::ledger::queries::RequestCtx; +use crate::queries::RequestCtx; router! {TOKEN, ( "denomination" / [addr: Address] ) -> Option = denomination, @@ -14,8 +14,8 @@ router! {TOKEN, /// Get the number of decimal places (in base 10) for a /// token specified by `addr`. -fn denomination( - ctx: RequestCtx<'_, D, H>, +fn denomination( + ctx: RequestCtx<'_, D, H, V, T>, addr: Address, ) -> storage_api::Result> where @@ -28,12 +28,11 @@ where #[cfg(any(test, feature = "async-client"))] pub mod client_only_methods { use borsh::BorshDeserialize; + use namada_core::types::address::Address; + use namada_core::types::token; use super::Token; - use crate::ledger::queries::RPC; - use crate::sdk::queries::Client; - use crate::types::address::Address; - use crate::types::token; + use crate::queries::{Client, RPC}; impl Token { /// Get the balance of the given `token` belonging to the given `owner`. diff --git a/shared/src/sdk/rpc.rs b/sdk/src/rpc.rs similarity index 89% rename from shared/src/sdk/rpc.rs rename to sdk/src/rpc.rs index e7da6dcbae..9307374663 100644 --- a/shared/src/sdk/rpc.rs +++ b/sdk/src/rpc.rs @@ -14,37 +14,35 @@ use namada_core::ledger::governance::utils::Vote; use namada_core::ledger::storage::LastBlock; use namada_core::types::account::Account; use namada_core::types::address::Address; -use namada_core::types::storage::Key; +use namada_core::types::hash::Hash; +use namada_core::types::key::common; +use namada_core::types::storage::{ + BlockHeight, BlockResults, Epoch, Key, PrefixValue, +}; use namada_core::types::token::{ Amount, DenominatedAmount, Denomination, MaspDenom, }; +use namada_core::types::{storage, token}; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{ BondsAndUnbondsDetails, CommissionPair, ValidatorState, }; use serde::Serialize; -use crate::ledger::events::Event; -use crate::ledger::queries::vp::pos::EnrichedBondsAndUnbondsDetails; -use crate::ledger::queries::RPC; -use crate::ledger::Namada; +use crate::args::InputAmount; +use crate::control_flow::{time, Halt, TryHalt}; +use crate::error::{EncodingError, Error, QueryError}; +use crate::events::Event; +use crate::io::Io; use crate::proto::Tx; -use crate::sdk::args::InputAmount; -use crate::sdk::error; -use crate::sdk::error::{EncodingError, Error, QueryError}; -use crate::sdk::queries::Client; +use crate::queries::vp::pos::EnrichedBondsAndUnbondsDetails; +use crate::queries::{Client, RPC}; use crate::tendermint::block::Height; use crate::tendermint::merkle::proof::Proof; use crate::tendermint_rpc::error::Error as TError; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; -use crate::types::control_flow::{time, Halt, TryHalt}; -use crate::types::hash::Hash; -use crate::types::io::Io; -use crate::types::key::common; -use crate::types::storage::{BlockHeight, BlockResults, Epoch, PrefixValue}; -use crate::types::{storage, token}; -use crate::{display_line, edisplay_line}; +use crate::{display_line, edisplay_line, error, Namada}; /// Query the status of a given transaction. /// @@ -97,14 +95,14 @@ pub async fn query_tx_status<'a>( } /// Query the epoch of the last committed block -pub async fn query_epoch( +pub async fn query_epoch( client: &C, ) -> Result { convert_response::(RPC.shell().epoch(client).await) } /// Query the address of the native token -pub async fn query_native_token( +pub async fn query_native_token( client: &C, ) -> Result { convert_response::(RPC.shell().native_token(client).await) @@ -113,7 +111,7 @@ pub async fn query_native_token( /// Query the epoch of the given block height, if it exists. /// Will return none if the input block height is greater than /// the latest committed block height. -pub async fn query_epoch_at_height( +pub async fn query_epoch_at_height( client: &C, height: BlockHeight, ) -> Result, error::Error> { @@ -121,7 +119,7 @@ pub async fn query_epoch_at_height( } /// Query the last committed block, if any. -pub async fn query_block( +pub async fn query_block( client: &C, ) -> Result, error::Error> { // NOTE: We're not using `client.latest_block()` because it may return an @@ -130,7 +128,7 @@ pub async fn query_block( } /// A helper to unwrap client's response. Will shut down process on error. -fn unwrap_client_response( +fn unwrap_client_response( response: Result, ) -> T { response.unwrap_or_else(|err| { @@ -141,21 +139,21 @@ fn unwrap_client_response( /// A helper to turn client's response into an error type that can be used with /// ? The exact error type is a `QueryError::NoResponse`, and thus should be /// seen as getting no response back from a query. -fn convert_response( +fn convert_response( response: Result, ) -> Result { response.map_err(|err| Error::from(QueryError::NoResponse(err.to_string()))) } /// Query the results of the last committed block -pub async fn query_results( +pub async fn query_results( client: &C, ) -> Result, Error> { convert_response::(RPC.shell().read_results(client).await) } /// Query token amount of owner. -pub async fn get_token_balance( +pub async fn get_token_balance( client: &C, token: &Address, owner: &Address, @@ -166,7 +164,7 @@ pub async fn get_token_balance( } /// Check if the given address is a known validator. -pub async fn is_validator( +pub async fn is_validator( client: &C, address: &Address, ) -> Result { @@ -174,7 +172,7 @@ pub async fn is_validator( } /// Check if the given address is a pgf steward. -pub async fn is_steward( +pub async fn is_steward( client: &C, address: &Address, ) -> bool { @@ -184,7 +182,7 @@ pub async fn is_steward( } /// Check if a given address is a known delegator -pub async fn is_delegator( +pub async fn is_delegator( client: &C, address: &Address, ) -> Result { @@ -194,7 +192,7 @@ pub async fn is_delegator( } /// Check if a given address is a known delegator at the given epoch -pub async fn is_delegator_at( +pub async fn is_delegator_at( client: &C, address: &Address, epoch: Epoch, @@ -210,7 +208,7 @@ pub async fn is_delegator_at( /// Check if the address exists on chain. Established address exists if it has a /// stored validity predicate. Implicit and internal addresses always return /// true. -pub async fn known_address( +pub async fn known_address( client: &C, address: &Address, ) -> Result { @@ -228,7 +226,7 @@ pub async fn known_address( // often ignore the optional value and do not have any error type surrounding // it. /// Query a conversion. -pub async fn query_conversion( +pub async fn query_conversion( client: &C, asset_type: AssetType, ) -> Option<( @@ -275,7 +273,7 @@ pub async fn query_storage_value( ) -> Result where T: BorshDeserialize, - C: crate::ledger::queries::Client + Sync, + C: crate::queries::Client + Sync, { // In case `T` is a unit (only thing that encodes to 0 bytes), we have to // use `storage_has_key` instead of `storage_value`, because `storage_value` @@ -306,9 +304,7 @@ where } /// Query a storage value and the proof without decoding. -pub async fn query_storage_value_bytes< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn query_storage_value_bytes( client: &C, key: &storage::Key, height: Option, @@ -365,7 +361,7 @@ where } /// Query to check if the given storage key exists. -pub async fn query_has_storage_key( +pub async fn query_has_storage_key( client: &C, key: &storage::Key, ) -> Result { @@ -415,13 +411,10 @@ impl<'a> From> for Query { /// Call the corresponding `tx_event_query` RPC method, to fetch /// the current status of a transation. -pub async fn query_tx_events( +pub async fn query_tx_events( client: &C, tx_event_query: TxEventQuery<'_>, -) -> std::result::Result< - Option, - ::Error, -> { +) -> std::result::Result, ::Error> { let tx_hash: Hash = tx_event_query.tx_hash().try_into().unwrap(); match tx_event_query { TxEventQuery::Accepted(_) => { @@ -557,7 +550,7 @@ impl TxResponse { /// Lookup the full response accompanying the specified transaction event // TODO: maybe remove this in favor of `query_tx_status` -pub async fn query_tx_response( +pub async fn query_tx_response( client: &C, tx_query: TxEventQuery<'_>, ) -> Result { @@ -627,14 +620,14 @@ pub async fn query_tx_response( } /// Get the PoS parameters -pub async fn get_pos_params( +pub async fn get_pos_params( client: &C, ) -> Result { convert_response::(RPC.vp().pos().pos_params(client).await) } /// Get all validators in the given epoch -pub async fn get_all_validators( +pub async fn get_all_validators( client: &C, epoch: Epoch, ) -> Result, error::Error> { @@ -647,9 +640,7 @@ pub async fn get_all_validators( } /// Get the total staked tokens in the given epoch -pub async fn get_total_staked_tokens< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn get_total_staked_tokens( client: &C, epoch: Epoch, ) -> Result { @@ -659,7 +650,7 @@ pub async fn get_total_staked_tokens< } /// Get the given validator's stake at the given epoch -pub async fn get_validator_stake( +pub async fn get_validator_stake( client: &C, epoch: Epoch, validator: &Address, @@ -674,7 +665,7 @@ pub async fn get_validator_stake( } /// Query and return a validator's state -pub async fn get_validator_state( +pub async fn get_validator_state( client: &C, validator: &Address, epoch: Option, @@ -688,9 +679,7 @@ pub async fn get_validator_state( } /// Get the delegator's delegation -pub async fn get_delegators_delegation< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn get_delegators_delegation( client: &C, address: &Address, ) -> Result, error::Error> { @@ -700,9 +689,7 @@ pub async fn get_delegators_delegation< } /// Get the delegator's delegation at some epoh -pub async fn get_delegators_delegation_at< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn get_delegators_delegation_at( client: &C, address: &Address, epoch: Epoch, @@ -716,7 +703,7 @@ pub async fn get_delegators_delegation_at< } /// Query proposal by Id -pub async fn query_proposal_by_id( +pub async fn query_proposal_by_id( client: &C, proposal_id: u64, ) -> Result, Error> { @@ -727,7 +714,7 @@ pub async fn query_proposal_by_id( /// Query and return validator's commission rate and max commission rate change /// per epoch -pub async fn query_commission_rate( +pub async fn query_commission_rate( client: &C, validator: &Address, epoch: Option, @@ -741,7 +728,7 @@ pub async fn query_commission_rate( } /// Query a validator's bonds for a given epoch -pub async fn query_bond( +pub async fn query_bond( client: &C, source: &Address, validator: &Address, @@ -753,7 +740,7 @@ pub async fn query_bond( } /// Query the accunt substorage space of an address -pub async fn get_account_info( +pub async fn get_account_info( client: &C, owner: &Address, ) -> Result, error::Error> { @@ -763,9 +750,7 @@ pub async fn get_account_info( } /// Query if the public_key is revealed -pub async fn is_public_key_revealed< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn is_public_key_revealed( client: &C, owner: &Address, ) -> Result { @@ -773,7 +758,7 @@ pub async fn is_public_key_revealed< } /// Query an account substorage at a specific index -pub async fn get_public_key_at( +pub async fn get_public_key_at( client: &C, owner: &Address, index: u8, @@ -830,9 +815,7 @@ pub async fn query_and_print_unbonds<'a>( } /// Query withdrawable tokens in a validator account for a given epoch -pub async fn query_withdrawable_tokens< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn query_withdrawable_tokens( client: &C, bond_source: &Address, validator: &Address, @@ -847,9 +830,7 @@ pub async fn query_withdrawable_tokens< } /// Query all unbonds for a validator, applying slashes -pub async fn query_unbond_with_slashing< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn query_unbond_with_slashing( client: &C, source: &Address, validator: &Address, @@ -863,16 +844,14 @@ pub async fn query_unbond_with_slashing< } /// Get the givernance parameters -pub async fn query_governance_parameters< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn query_governance_parameters( client: &C, ) -> GovernanceParameters { unwrap_client_response::(RPC.vp().gov().parameters(client).await) } /// Get the givernance parameters -pub async fn query_proposal_votes( +pub async fn query_proposal_votes( client: &C, proposal_id: u64, ) -> Result, error::Error> { @@ -882,7 +861,7 @@ pub async fn query_proposal_votes( } /// Get the bond amount at the given epoch -pub async fn get_bond_amount_at( +pub async fn get_bond_amount_at( client: &C, delegator: &Address, validator: &Address, @@ -899,7 +878,7 @@ pub async fn get_bond_amount_at( /// Get bonds and unbonds with all details (slashes and rewards, if any) /// grouped by their bond IDs. -pub async fn bonds_and_unbonds( +pub async fn bonds_and_unbonds( client: &C, source: &Option
, validator: &Option
, @@ -915,9 +894,7 @@ pub async fn bonds_and_unbonds( /// Get bonds and unbonds with all details (slashes and rewards, if any) /// grouped by their bond IDs, enriched with extra information calculated from /// the data. -pub async fn enriched_bonds_and_unbonds< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn enriched_bonds_and_unbonds( client: &C, current_epoch: Epoch, source: &Option
, diff --git a/shared/src/sdk/signing.rs b/sdk/src/signing.rs similarity index 98% rename from shared/src/sdk/signing.rs rename to sdk/src/signing.rs index 4edc5e4f41..7680adfdd2 100644 --- a/shared/src/sdk/signing.rs +++ b/sdk/src/signing.rs @@ -9,53 +9,50 @@ use masp_primitives::asset_type::AssetType; use masp_primitives::transaction::components::sapling::fees::{ InputView, OutputView, }; +use namada_core::ledger::parameters::storage as parameter_storage; use namada_core::proto::SignatureIndex; use namada_core::types::account::AccountPublicKeysMap; use namada_core::types::address::{ masp, masp_tx_key, Address, ImplicitAddress, }; +use namada_core::types::key::*; +use namada_core::types::masp::{ExtendedViewingKey, PaymentAddress}; +use namada_core::types::storage::Epoch; use namada_core::types::token; +use namada_core::types::token::Transfer; // use namada_core::types::storage::Key; use namada_core::types::token::{Amount, DenominatedAmount, MaspDenom}; -use namada_core::types::transaction::pos; +use namada_core::types::transaction::account::{InitAccount, UpdateAccount}; +use namada_core::types::transaction::governance::{ + InitProposalData, VoteProposalData, +}; +use namada_core::types::transaction::pos::InitValidator; +use namada_core::types::transaction::{pos, Fee}; use prost::Message; use serde::{Deserialize, Serialize}; use sha2::Digest; use zeroize::Zeroizing; use super::masp::{ShieldedContext, ShieldedTransfer}; -use crate::display_line; +use crate::args::SdkTypes; +use crate::error::{EncodingError, Error, TxError}; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc_proto::google::protobuf::Any; -use crate::ledger::parameters::storage as parameter_storage; -use crate::ledger::Namada; +use crate::io::*; +use crate::masp::make_asset_type; use crate::proto::{MaspBuilder, Section, Tx}; -use crate::sdk::args::SdkTypes; -use crate::sdk::error::{EncodingError, Error, TxError}; -use crate::sdk::masp::make_asset_type; -use crate::sdk::rpc::{ +use crate::rpc::{ format_denominated_amount, query_wasm_code_hash, validate_amount, }; -use crate::sdk::tx::{ +use crate::tx::{ TX_BOND_WASM, TX_CHANGE_COMMISSION_WASM, TX_IBC_WASM, TX_INIT_ACCOUNT_WASM, TX_INIT_PROPOSAL, TX_INIT_VALIDATOR_WASM, TX_REVEAL_PK, TX_TRANSFER_WASM, TX_UNBOND_WASM, TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, VP_USER_WASM, }; -pub use crate::sdk::wallet::store::AddressVpType; -use crate::sdk::wallet::{Wallet, WalletIo}; -use crate::sdk::{args, rpc}; -use crate::types::io::*; -use crate::types::key::*; -use crate::types::masp::{ExtendedViewingKey, PaymentAddress}; -use crate::types::storage::Epoch; -use crate::types::token::Transfer; -use crate::types::transaction::account::{InitAccount, UpdateAccount}; -use crate::types::transaction::governance::{ - InitProposalData, VoteProposalData, -}; -use crate::types::transaction::pos::InitValidator; -use crate::types::transaction::Fee; +pub use crate::wallet::store::AddressVpType; +use crate::wallet::{Wallet, WalletIo}; +use crate::{args, display_line, rpc, Namada}; #[cfg(feature = "std")] /// Env. var specifying where to store signing test vectors diff --git a/shared/src/sdk/tx.rs b/sdk/src/tx.rs similarity index 96% rename from shared/src/sdk/tx.rs rename to sdk/src/tx.rs index 66b8847b17..b4b059e249 100644 --- a/shared/src/sdk/tx.rs +++ b/sdk/src/tx.rs @@ -16,57 +16,56 @@ use masp_primitives::transaction::components::transparent::fees::{ InputView as TransparentInputView, OutputView as TransparentOutputView, }; use masp_primitives::transaction::components::I32Sum; +use namada_core::ibc::applications::transfer::msgs::transfer::MsgTransfer; +use namada_core::ibc::applications::transfer::packet::PacketData; +use namada_core::ibc::applications::transfer::PrefixedCoin; +use namada_core::ibc::core::ics04_channel::timeout::TimeoutHeight; +use namada_core::ibc::core::timestamp::Timestamp as IbcTimestamp; +use namada_core::ibc::core::Msg; +use namada_core::ibc::Height as IbcHeight; use namada_core::ledger::governance::cli::onchain::{ DefaultProposal, OnChainProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, }; use namada_core::ledger::governance::storage::proposal::ProposalType; use namada_core::ledger::governance::storage::vote::StorageProposalVote; +use namada_core::ledger::ibc::storage::ibc_denom_key; use namada_core::ledger::pgf::cli::steward::Commission; use namada_core::types::address::{masp, Address, InternalAddress}; use namada_core::types::dec::Dec; use namada_core::types::hash::Hash; +use namada_core::types::key::*; +use namada_core::types::masp::TransferTarget; +use namada_core::types::storage::Epoch; +use namada_core::types::time::DateTimeUtc; use namada_core::types::token::MaspDenom; +use namada_core::types::transaction::account::{InitAccount, UpdateAccount}; use namada_core::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use namada_core::types::transaction::pgf::UpdateStewardCommission; +use namada_core::types::transaction::{pos, TxType}; +use namada_core::types::{storage, token}; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; -use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; -use crate::ibc::applications::transfer::packet::PacketData; -use crate::ibc::applications::transfer::PrefixedCoin; -use crate::ibc::core::ics04_channel::timeout::TimeoutHeight; -use crate::ibc::core::timestamp::Timestamp as IbcTimestamp; -use crate::ibc::core::Msg; -use crate::ibc::Height as IbcHeight; -use crate::ledger::ibc::storage::ibc_denom_key; -use crate::ledger::Namada; +use crate::args::{self, InputAmount}; +use crate::control_flow::{time, ProceedOrElse}; +use crate::error::{EncodingError, Error, QueryError, Result, TxError}; +use crate::io::Io; +use crate::masp::TransferErr::Build; +use crate::masp::{ShieldedContext, ShieldedTransfer}; use crate::proto::{MaspBuilder, Tx}; -use crate::sdk::args::{self, InputAmount}; -use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; -use crate::sdk::masp::TransferErr::Build; -use crate::sdk::masp::{ShieldedContext, ShieldedTransfer}; -use crate::sdk::queries::Client; -use crate::sdk::rpc::{ +use crate::queries::Client; +use crate::rpc::{ self, format_denominated_amount, query_wasm_code_hash, validate_amount, TxBroadcastData, TxResponse, }; -use crate::sdk::signing::{self, SigningTxData, TxSourcePostBalance}; -use crate::sdk::wallet::WalletIo; +use crate::signing::{self, SigningTxData, TxSourcePostBalance}; use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::tendermint_rpc::error::Error as RpcError; -use crate::types::control_flow::{time, ProceedOrElse}; -use crate::types::io::Io; -use crate::types::key::*; -use crate::types::masp::TransferTarget; -use crate::types::storage::Epoch; -use crate::types::time::DateTimeUtc; -use crate::types::transaction::account::{InitAccount, UpdateAccount}; -use crate::types::transaction::{pos, TxType}; -use crate::types::{storage, token}; -use crate::{display_line, edisplay_line, vm}; +use crate::wallet::WalletIo; +use crate::{display_line, edisplay_line, Namada}; /// Initialize account transaction WASM pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; @@ -235,20 +234,20 @@ pub async fn process_tx<'a>( } /// Check if a reveal public key transaction is needed -pub async fn is_reveal_pk_needed( +pub async fn is_reveal_pk_needed( client: &C, address: &Address, force: bool, ) -> Result where - C: crate::sdk::queries::Client + Sync, + C: crate::queries::Client + Sync, { // Check if PK revealed Ok(force || !has_revealed_pk(client, address).await?) } /// Check if the public key for the given address has been revealed -pub async fn has_revealed_pk( +pub async fn has_revealed_pk( client: &C, address: &Address, ) -> Result { @@ -759,7 +758,7 @@ pub async fn build_unjail_validator<'a>( } let last_slash_epoch_key = - crate::ledger::pos::validator_last_slash_key(validator); + namada_proof_of_stake::storage::validator_last_slash_key(validator); let last_slash_epoch = rpc::query_storage_value::<_, Epoch>( context.client(), &last_slash_epoch_key, @@ -1496,7 +1495,7 @@ pub async fn build_ibc_transfer<'a>( #[allow(clippy::too_many_arguments)] pub async fn build<'a, F, D>( context: &impl Namada<'a>, - tx_args: &crate::sdk::args::Tx, + tx_args: &crate::args::Tx, path: PathBuf, data: D, on_tx: F, @@ -1522,7 +1521,7 @@ where #[allow(clippy::too_many_arguments)] async fn build_pow_flag<'a, F, D>( context: &impl Namada<'a>, - tx_args: &crate::sdk::args::Tx, + tx_args: &crate::args::Tx, path: PathBuf, mut data: D, on_tx: F, @@ -2123,27 +2122,6 @@ async fn check_balance_too_low_err<'a, N: Namada<'a>>( } } -#[allow(dead_code)] -fn validate_untrusted_code_err( - io: &IO, - vp_code: &Vec, - force: bool, -) -> Result<()> { - if let Err(err) = vm::validate_untrusted_wasm(vp_code) { - if force { - edisplay_line!( - io, - "Validity predicate code validation failed with {}", - err - ); - Ok(()) - } else { - Err(Error::from(TxError::WasmValidationFailure(err))) - } - } else { - Ok(()) - } -} async fn query_wasm_code_hash_buf<'a>( context: &impl Namada<'a>, path: &Path, diff --git a/shared/src/sdk/wallet/alias.rs b/sdk/src/wallet/alias.rs similarity index 100% rename from shared/src/sdk/wallet/alias.rs rename to sdk/src/wallet/alias.rs diff --git a/shared/src/sdk/wallet/derivation_path.rs b/sdk/src/wallet/derivation_path.rs similarity index 98% rename from shared/src/sdk/wallet/derivation_path.rs rename to sdk/src/wallet/derivation_path.rs index 7f639161d2..7751e51701 100644 --- a/shared/src/sdk/wallet/derivation_path.rs +++ b/sdk/src/wallet/derivation_path.rs @@ -2,6 +2,7 @@ use core::fmt; use std::str::FromStr; use derivation_path::{ChildIndex, DerivationPath as DerivationPathInner}; +use namada_core::types::key::SchemeType; use thiserror::Error; use tiny_hderive::bip44::{ DerivationPath as HDeriveDerivationPath, @@ -9,8 +10,6 @@ use tiny_hderive::bip44::{ }; use tiny_hderive::Error as HDeriveError; -use crate::types::key::SchemeType; - const ETH_COIN_TYPE: u32 = 60; const NAMADA_COIN_TYPE: u32 = 877; @@ -114,8 +113,9 @@ impl IntoHDeriveDerivationPath for DerivationPath { #[cfg(test)] mod tests { + use namada_core::types::key::SchemeType; + use super::DerivationPath; - use crate::types::key::SchemeType; #[test] fn path_is_compatible() { diff --git a/shared/src/sdk/wallet/keys.rs b/sdk/src/wallet/keys.rs similarity index 99% rename from shared/src/sdk/wallet/keys.rs rename to sdk/src/wallet/keys.rs index 749fa1e25f..a8d267c898 100644 --- a/shared/src/sdk/wallet/keys.rs +++ b/sdk/src/wallet/keys.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use zeroize::Zeroizing; -use crate::sdk::wallet::WalletIo; +use crate::wallet::WalletIo; const ENCRYPTED_KEY_PREFIX: &str = "encrypted:"; const UNENCRYPTED_KEY_PREFIX: &str = "unencrypted:"; diff --git a/shared/src/sdk/wallet/mod.rs b/sdk/src/wallet/mod.rs similarity index 99% rename from shared/src/sdk/wallet/mod.rs rename to sdk/src/wallet/mod.rs index cadfdc718c..4ee3e13947 100644 --- a/shared/src/sdk/wallet/mod.rs +++ b/sdk/src/wallet/mod.rs @@ -13,6 +13,11 @@ use alias::Alias; use bip39::{Language, Mnemonic, MnemonicType, Seed}; use borsh::{BorshDeserialize, BorshSerialize}; use masp_primitives::zip32::ExtendedFullViewingKey; +use namada_core::types::address::Address; +use namada_core::types::key::*; +use namada_core::types::masp::{ + ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, +}; pub use pre_genesis::gen_key_to_store; use rand_core::RngCore; pub use store::{gen_sk_rng, AddressVpType, Store}; @@ -22,11 +27,6 @@ use zeroize::Zeroizing; use self::derivation_path::{DerivationPath, DerivationPathError}; pub use self::keys::{DecryptionError, StoredKeypair}; pub use self::store::{ConfirmationResponse, ValidatorData, ValidatorKeys}; -use crate::types::address::Address; -use crate::types::key::*; -use crate::types::masp::{ - ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, -}; /// Errors of key generation / recovery #[derive(Error, Debug)] diff --git a/shared/src/sdk/wallet/pre_genesis.rs b/sdk/src/wallet/pre_genesis.rs similarity index 96% rename from shared/src/sdk/wallet/pre_genesis.rs rename to sdk/src/wallet/pre_genesis.rs index fd66dedbfe..916ec43781 100644 --- a/shared/src/sdk/wallet/pre_genesis.rs +++ b/sdk/src/wallet/pre_genesis.rs @@ -1,11 +1,11 @@ //! Provides functionality for managing validator keys +use namada_core::types::key::{common, SchemeType}; use serde::{Deserialize, Serialize}; use thiserror::Error; use zeroize::Zeroizing; -use crate::sdk::wallet; -use crate::sdk::wallet::{store, StoredKeypair}; -use crate::types::key::{common, SchemeType}; +use crate::wallet; +use crate::wallet::{store, StoredKeypair}; /// Ways in which wallet store operations can fail #[derive(Error, Debug)] diff --git a/shared/src/sdk/wallet/store.rs b/sdk/src/wallet/store.rs similarity index 99% rename from shared/src/sdk/wallet/store.rs rename to sdk/src/wallet/store.rs index b674391127..201cc885a4 100644 --- a/shared/src/sdk/wallet/store.rs +++ b/sdk/src/wallet/store.rs @@ -8,6 +8,12 @@ use bimap::BiHashMap; use bip39::Seed; use itertools::Itertools; use masp_primitives::zip32::ExtendedFullViewingKey; +use namada_core::types::address::{Address, ImplicitAddress}; +use namada_core::types::key::dkg_session_keys::DkgKeypair; +use namada_core::types::key::*; +use namada_core::types::masp::{ + ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, +}; #[cfg(feature = "masp-tx-gen")] use rand_core::RngCore; use serde::{Deserialize, Serialize}; @@ -17,13 +23,7 @@ use zeroize::Zeroizing; use super::alias::{self, Alias}; use super::derivation_path::DerivationPath; use super::pre_genesis; -use crate::sdk::wallet::{StoredKeypair, WalletIo}; -use crate::types::address::{Address, ImplicitAddress}; -use crate::types::key::dkg_session_keys::DkgKeypair; -use crate::types::key::*; -use crate::types::masp::{ - ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, -}; +use crate::wallet::{StoredKeypair, WalletIo}; /// Actions that can be taken when there is an alias conflict pub enum ConfirmationResponse { diff --git a/shared/Cargo.toml b/shared/Cargo.toml index f3c5428594..21eb023e18 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -24,6 +24,7 @@ std = ["fd-lock"] dev = [] ferveo-tpke = [ "namada_core/ferveo-tpke", + "namada_sdk/ferveo-tpke", ] wasm-runtime = [ "namada_core/wasm-runtime", @@ -41,6 +42,7 @@ wasm-runtime = [ # Enable queries support for an async client async-client = [ "async-trait", + "namada_sdk/async-client" ] # Requires async traits to be safe to send across threads @@ -50,6 +52,7 @@ async-send = [] tendermint-rpc = [ "async-client", "dep:tendermint-rpc", + "namada_sdk/tendermint-rpc", ] # tendermint-rpc HttpClient http-client = [ @@ -60,15 +63,18 @@ abciplus = [ "namada_core/abciplus", "namada_proof_of_stake/abciplus", "namada_ethereum_bridge/abciplus", + "namada_sdk/abciplus", ] ibc-mocks = [ "namada_core/ibc-mocks", + "namada_sdk/ibc-mocks", ] masp-tx-gen = [ "rand", "rand_core", + "namada_sdk/masp-tx-gen", ] # for integration tests and test utilies @@ -76,6 +82,7 @@ testing = [ "namada_core/testing", "namada_ethereum_bridge/testing", "namada_proof_of_stake/testing", + "namada_sdk/testing", "async-client", "proptest", "rand_core", @@ -87,13 +94,18 @@ namada-sdk = [ "tendermint-rpc", "masp-tx-gen", "ferveo-tpke", - "masp_primitives/transparent-inputs" + "masp_primitives/transparent-inputs", + "namada_sdk/namada-sdk", ] -multicore = ["masp_proofs/multicore"] +multicore = [ + "masp_proofs/multicore", + "namada_sdk/multicore", +] [dependencies] namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign"]} +namada_sdk = {path = "../sdk", default-features = false} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} namada_ethereum_bridge = {path = "../ethereum_bridge", default-features = false} async-trait = {version = "0.1.51", optional = true} diff --git a/shared/src/ledger/governance/utils.rs b/shared/src/ledger/governance/utils.rs index d4b4d1316c..a254556cce 100644 --- a/shared/src/ledger/governance/utils.rs +++ b/shared/src/ledger/governance/utils.rs @@ -3,6 +3,7 @@ use std::collections::HashMap; use namada_core::ledger::governance::utils::TallyResult; +use namada_sdk::events::{Event, EventLevel}; use thiserror::Error; use crate::ledger::events::EventType; @@ -34,6 +35,16 @@ pub struct ProposalEvent { pub attributes: HashMap, } +impl From for Event { + fn from(proposal_event: ProposalEvent) -> Self { + Self { + event_type: EventType::Proposal, + level: EventLevel::Block, + attributes: proposal_event.attributes, + } + } +} + impl ProposalEvent { /// Create a proposal event pub fn new( diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index aecde2d930..676d57190f 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -1,7 +1,6 @@ //! The ledger modules -pub mod eth_bridge; -pub mod events; +pub use namada_sdk::{eth_bridge, events}; pub mod governance; pub mod ibc; pub mod inflation; @@ -10,535 +9,367 @@ pub mod pgf; pub mod pos; #[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] pub mod protocol; -pub mod queries; +pub use namada_sdk::queries; pub mod storage; pub mod vp_host_fns; -use std::path::PathBuf; -use std::str::FromStr; - +use namada_core::ledger::storage::{DBIter, StorageHasher, DB}; +use namada_core::ledger::storage_api::ResultExt; pub use namada_core::ledger::{ gas, parameters, replay_protection, storage_api, tx_env, vp_env, }; -use namada_core::types::dec::Dec; -use namada_core::types::ethereum_events::EthAddress; -use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; - -use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::proto::Tx; -use crate::sdk::args::{self, InputAmount, SdkTypes}; -use crate::sdk::masp::{ShieldedContext, ShieldedUtils}; -use crate::sdk::rpc::query_native_token; -use crate::sdk::signing::{self, SigningTxData}; -use crate::sdk::tx::{ - self, ProcessTxResponse, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, - TX_CHANGE_COMMISSION_WASM, TX_IBC_WASM, TX_INIT_PROPOSAL, - TX_INIT_VALIDATOR_WASM, TX_RESIGN_STEWARD, TX_REVEAL_PK, TX_TRANSFER_WASM, - TX_UNBOND_WASM, TX_UNJAIL_VALIDATOR_WASM, TX_UPDATE_ACCOUNT_WASM, - TX_UPDATE_STEWARD_COMMISSION, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, - VP_USER_WASM, -}; -use crate::sdk::wallet::{Wallet, WalletIo, WalletStorage}; -use crate::types::address::Address; -use crate::types::io::Io; -use crate::types::key::*; -use crate::types::masp::{TransferSource, TransferTarget}; -use crate::types::token; -use crate::types::token::NATIVE_MAX_DECIMAL_PLACES; -use crate::types::transaction::GasLimit; - -#[async_trait::async_trait(?Send)] -/// An interface for high-level interaction with the Namada SDK -pub trait Namada<'a>: Sized { - /// A client with async request dispatcher method - type Client: 'a + crate::ledger::queries::Client + Sync; - /// Captures the interactive parts of the wallet's functioning - type WalletUtils: 'a + WalletIo + WalletStorage; - /// Abstracts platform specific details away from the logic of shielded pool - /// operations. - type ShieldedUtils: 'a + ShieldedUtils; - /// Captures the input/output streams used by this object - type Io: 'a + Io; - - /// Obtain the client for communicating with the ledger - fn client(&self) -> &'a Self::Client; - - /// Obtain the input/output handle for this context - fn io(&self) -> &'a Self::Io; - - /// Obtain read guard on the wallet - async fn wallet( - &self, - ) -> RwLockReadGuard<&'a mut Wallet>; - - /// Obtain write guard on the wallet - async fn wallet_mut( - &self, - ) -> RwLockWriteGuard<&'a mut Wallet>; - - /// Obtain read guard on the shielded context - async fn shielded( - &self, - ) -> RwLockReadGuard<&'a mut ShieldedContext>; - - /// Obtain write guard on the shielded context - async fn shielded_mut( - &self, - ) -> RwLockWriteGuard<&'a mut ShieldedContext>; - - /// Return the native token - fn native_token(&self) -> Address; - - /// Make a tx builder using no arguments - fn tx_builder(&self) -> args::Tx { - args::Tx { - dry_run: false, - dry_run_wrapper: false, - dump_tx: false, - output_folder: None, - force: false, - broadcast_only: false, - ledger_address: (), - initialized_account_alias: None, - wallet_alias_force: false, - fee_amount: None, - wrapper_fee_payer: None, - fee_token: self.native_token(), - fee_unshield: None, - gas_limit: GasLimit::from(20_000), - expiration: None, - disposable_signing_key: false, - chain_id: None, - signing_keys: vec![], - signatures: vec![], - tx_reveal_code_path: PathBuf::from(TX_REVEAL_PK), - verification_key: None, - password: None, - } - } - - /// Make a TxTransfer builder from the given minimum set of arguments - fn new_transfer( - &self, - source: TransferSource, - target: TransferTarget, - token: Address, - amount: InputAmount, - ) -> args::TxTransfer { - args::TxTransfer { - source, - target, - token, - amount, - tx_code_path: PathBuf::from(TX_TRANSFER_WASM), - tx: self.tx_builder(), - native_token: self.native_token(), - } - } - - /// Make a RevealPK builder from the given minimum set of arguments - fn new_reveal_pk(&self, public_key: common::PublicKey) -> args::RevealPk { - args::RevealPk { - public_key, - tx: self.tx_builder(), - } - } - - /// Make a Bond builder from the given minimum set of arguments - fn new_bond( - &self, - validator: Address, - amount: token::Amount, - ) -> args::Bond { - args::Bond { - validator, - amount, - source: None, - tx: self.tx_builder(), - native_token: self.native_token(), - tx_code_path: PathBuf::from(TX_BOND_WASM), - } - } - - /// Make a Unbond builder from the given minimum set of arguments - fn new_unbond( - &self, - validator: Address, - amount: token::Amount, - ) -> args::Unbond { - args::Unbond { - validator, - amount, - source: None, - tx: self.tx_builder(), - tx_code_path: PathBuf::from(TX_UNBOND_WASM), - } - } - - /// Make a TxIbcTransfer builder from the given minimum set of arguments - fn new_ibc_transfer( - &self, - source: Address, - receiver: String, - token: Address, - amount: InputAmount, - channel_id: ChannelId, - ) -> args::TxIbcTransfer { - args::TxIbcTransfer { - source, - receiver, - token, - amount, - channel_id, - port_id: PortId::from_str("transfer").unwrap(), - timeout_height: None, - timeout_sec_offset: None, - memo: None, - tx: self.tx_builder(), - tx_code_path: PathBuf::from(TX_IBC_WASM), - } - } - - /// Make a InitProposal builder from the given minimum set of arguments - fn new_init_proposal(&self, proposal_data: Vec) -> args::InitProposal { - args::InitProposal { - proposal_data, - native_token: self.native_token(), - is_offline: false, - is_pgf_stewards: false, - is_pgf_funding: false, - tx_code_path: PathBuf::from(TX_INIT_PROPOSAL), - tx: self.tx_builder(), - } - } - - /// Make a TxUpdateAccount builder from the given minimum set of arguments - fn new_update_account(&self, addr: Address) -> args::TxUpdateAccount { - args::TxUpdateAccount { - addr, - vp_code_path: None, - public_keys: vec![], - threshold: None, - tx_code_path: PathBuf::from(TX_UPDATE_ACCOUNT_WASM), - tx: self.tx_builder(), - } - } - - /// Make a VoteProposal builder from the given minimum set of arguments - fn new_vote_prposal( - &self, - vote: String, - voter: Address, - ) -> args::VoteProposal { - args::VoteProposal { - vote, - voter, - proposal_id: None, - is_offline: false, - proposal_data: None, - tx_code_path: PathBuf::from(TX_VOTE_PROPOSAL), - tx: self.tx_builder(), - } - } +use namada_sdk::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; - /// Make a CommissionRateChange builder from the given minimum set of - /// arguments - fn new_change_commission_rate( - &self, - rate: Dec, - validator: Address, - ) -> args::CommissionRateChange { - args::CommissionRateChange { - rate, - validator, - tx_code_path: PathBuf::from(TX_CHANGE_COMMISSION_WASM), - tx: self.tx_builder(), - } - } +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +use crate::vm::wasm::{TxCache, VpCache}; +use crate::vm::WasmCacheAccess; - /// Make a TxInitValidator builder from the given minimum set of arguments - fn new_init_validator( - &self, - commission_rate: Dec, - max_commission_rate_change: Dec, - ) -> args::TxInitValidator { - args::TxInitValidator { - commission_rate, - max_commission_rate_change, - scheme: SchemeType::Ed25519, - account_keys: vec![], - threshold: None, - consensus_key: None, - eth_cold_key: None, - eth_hot_key: None, - protocol_key: None, - validator_vp_code_path: PathBuf::from(VP_USER_WASM), - unsafe_dont_encrypt: false, - tx_code_path: PathBuf::from(TX_INIT_VALIDATOR_WASM), - tx: self.tx_builder(), +/// Dry run a transaction +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +pub fn dry_run_tx( + mut ctx: RequestCtx<'_, D, H, VpCache, TxCache>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, +{ + use borsh::BorshSerialize; + use namada_core::ledger::gas::{Gas, GasMetering, TxGasMeter}; + use namada_core::ledger::storage::TempWlStorage; + use namada_core::proto::Tx; + use namada_core::types::transaction::wrapper::wrapper_tx::PairingEngine; + use namada_core::types::transaction::{ + AffineCurve, DecryptedTx, EllipticCurve, + }; + + use crate::ledger::protocol::ShellParams; + use crate::types::storage::TxIndex; + use crate::types::transaction::TxType; + + let mut tx = Tx::try_from(&request.data[..]).into_storage_result()?; + tx.validate_tx().into_storage_result()?; + + let mut temp_wl_storage = TempWlStorage::new(&ctx.wl_storage.storage); + let mut cumulated_gas = Gas::default(); + + // Wrapper dry run to allow estimating the gas cost of a transaction + let mut tx_gas_meter = match tx.header().tx_type { + TxType::Wrapper(wrapper) => { + let mut tx_gas_meter = + TxGasMeter::new(wrapper.gas_limit.to_owned()); + protocol::apply_wrapper_tx( + &wrapper, + None, + &request.data, + ShellParams::new( + &mut tx_gas_meter, + &mut temp_wl_storage, + &mut ctx.vp_wasm_cache, + &mut ctx.tx_wasm_cache, + ), + None, + ) + .into_storage_result()?; + + temp_wl_storage.write_log.commit_tx(); + cumulated_gas = tx_gas_meter.get_tx_consumed_gas(); + + // NOTE: the encryption key for a dry-run should always be an + // hardcoded, dummy one + let _privkey = + ::G2Affine::prime_subgroup_generator(); + tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); + TxGasMeter::new_from_sub_limit(tx_gas_meter.get_available_gas()) } - } - - /// Make a TxUnjailValidator builder from the given minimum set of arguments - fn new_unjail_validator( - &self, - validator: Address, - ) -> args::TxUnjailValidator { - args::TxUnjailValidator { - validator, - tx_code_path: PathBuf::from(TX_UNJAIL_VALIDATOR_WASM), - tx: self.tx_builder(), + TxType::Protocol(_) | TxType::Decrypted(_) => { + // If dry run only the inner tx, use the max block gas as the gas + // limit + TxGasMeter::new( + namada_core::ledger::gas::get_max_block_gas(ctx.wl_storage) + .unwrap() + .into(), + ) } - } - - /// Make a Withdraw builder from the given minimum set of arguments - fn new_withdraw(&self, validator: Address) -> args::Withdraw { - args::Withdraw { - validator, - source: None, - tx_code_path: PathBuf::from(TX_WITHDRAW_WASM), - tx: self.tx_builder(), + TxType::Raw => { + // Cast tx to a decrypted for execution + tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); + + // If dry run only the inner tx, use the max block gas as the gas + // limit + TxGasMeter::new( + namada_core::ledger::gas::get_max_block_gas(ctx.wl_storage) + .unwrap() + .into(), + ) } - } - - /// Make a Withdraw builder from the given minimum set of arguments - fn new_add_erc20_transfer( - &self, - sender: Address, - recipient: EthAddress, - asset: EthAddress, - amount: InputAmount, - ) -> args::EthereumBridgePool { - args::EthereumBridgePool { - sender, - recipient, - asset, - amount, - fee_amount: InputAmount::Unvalidated(token::DenominatedAmount { - amount: token::Amount::default(), - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }), - fee_payer: None, - fee_token: self.native_token(), - nut: false, - code_path: PathBuf::from(TX_BRIDGE_POOL_WASM), - tx: self.tx_builder(), - } - } + }; + + let mut data = protocol::apply_wasm_tx( + tx, + &TxIndex(0), + ShellParams::new( + &mut tx_gas_meter, + &mut temp_wl_storage, + &mut ctx.vp_wasm_cache, + &mut ctx.tx_wasm_cache, + ), + ) + .into_storage_result()?; + cumulated_gas = cumulated_gas + .checked_add(tx_gas_meter.get_tx_consumed_gas()) + .ok_or(namada_core::ledger::storage_api::Error::SimpleMessage( + "Overflow in gas", + ))?; + // Account gas for both inner and wrapper (if available) + data.gas_used = cumulated_gas; + // NOTE: the keys changed by the wrapper transaction (if any) are not + // returned from this function + let data = data.try_to_vec().into_storage_result()?; + Ok(EncodedResponseQuery { + data, + proof: None, + info: Default::default(), + }) +} - /// Make a ResignSteward builder from the given minimum set of arguments - fn new_resign_steward(&self, steward: Address) -> args::ResignSteward { - args::ResignSteward { - steward, - tx: self.tx_builder(), - tx_code_path: PathBuf::from(TX_RESIGN_STEWARD), - } +#[cfg(test)] +mod test { + use borsh::{BorshDeserialize, BorshSerialize}; + use namada_core::ledger::storage::testing::TestWlStorage; + use namada_core::ledger::storage_api::{self, StorageWrite}; + use namada_core::types::hash::Hash; + use namada_core::types::storage::{BlockHeight, Key}; + use namada_core::types::transaction::decrypted::DecryptedTx; + use namada_core::types::transaction::TxType; + use namada_core::types::{address, token}; + use namada_sdk::queries::{Router, RPC}; + use namada_test_utils::TestWasms; + use tempfile::TempDir; + use tendermint_rpc::{Error as RpcError, Response}; + + use crate::ledger::events::log::EventLog; + use crate::ledger::queries::Client; + use crate::ledger::{EncodedResponseQuery, RequestCtx, RequestQuery}; + use crate::proto::{Code, Data, Tx}; + use crate::vm::wasm::{TxCache, VpCache}; + use crate::vm::{wasm, WasmCacheRoAccess}; + + /// A test client that has direct access to the storage + pub struct TestClient + where + RPC: Router, + { + /// RPC router + pub rpc: RPC, + /// storage + pub wl_storage: TestWlStorage, + /// event log + pub event_log: EventLog, + /// VP wasm compilation cache + pub vp_wasm_cache: VpCache, + /// tx wasm compilation cache + pub tx_wasm_cache: TxCache, + /// VP wasm compilation cache directory + pub vp_cache_dir: TempDir, + /// tx wasm compilation cache directory + pub tx_cache_dir: TempDir, } - /// Make a UpdateStewardCommission builder from the given minimum set of - /// arguments - fn new_update_steward_rewards( - &self, - steward: Address, - commission: Vec, - ) -> args::UpdateStewardCommission { - args::UpdateStewardCommission { - steward, - commission, - tx: self.tx_builder(), - tx_code_path: PathBuf::from(TX_UPDATE_STEWARD_COMMISSION), + impl TestClient + where + RPC: Router, + { + #[allow(dead_code)] + /// Initialize a test client for the given root RPC router + pub fn new(rpc: RPC) -> Self { + // Initialize the `TestClient` + let mut wl_storage = TestWlStorage::default(); + + // Initialize mock gas limit + let max_block_gas_key = + namada_core::ledger::parameters::storage::get_max_block_gas_key( + ); + wl_storage + .storage + .write( + &max_block_gas_key, + namada_core::ledger::storage::types::encode( + &20_000_000_u64, + ), + ) + .expect( + "Max block gas parameter must be initialized in storage", + ); + let event_log = EventLog::default(); + let (vp_wasm_cache, vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + let (tx_wasm_cache, tx_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + Self { + rpc, + wl_storage, + event_log, + vp_wasm_cache: vp_wasm_cache.read_only(), + tx_wasm_cache: tx_wasm_cache.read_only(), + vp_cache_dir, + tx_cache_dir, + } } } - /// Make a TxCustom builder from the given minimum set of arguments - fn new_custom(&self, owner: Address) -> args::TxCustom { - args::TxCustom { - owner, - tx: self.tx_builder(), - code_path: None, - data_path: None, - serialized_tx: None, + #[cfg_attr(feature = "async-send", async_trait::async_trait)] + #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] + impl Client for TestClient + where + RPC: Router + Sync, + { + type Error = std::io::Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height.unwrap_or_default(); + // Handle a path by invoking the `RPC.handle` directly with the + // borrowed storage + let request = RequestQuery { + data, + path, + height, + prove, + }; + let ctx = RequestCtx { + wl_storage: &self.wl_storage, + event_log: &self.event_log, + vp_wasm_cache: self.vp_wasm_cache.clone(), + tx_wasm_cache: self.tx_wasm_cache.clone(), + storage_read_past_height_limit: None, + }; + // TODO: this is a hack to propagate errors to the caller, we should + // really permit error types other than [`std::io::Error`] + if request.path == "/shell/dry_run_tx" { + super::dry_run_tx(ctx, &request) + } else { + self.rpc.handle(ctx, &request) + } + .map_err(|err| { + std::io::Error::new(std::io::ErrorKind::Other, err.to_string()) + }) } - } - - /// Sign the given transaction using the given signing data - async fn sign( - &self, - tx: &mut Tx, - args: &args::Tx, - signing_data: SigningTxData, - ) -> crate::sdk::error::Result<()> { - signing::sign_tx(*self.wallet_mut().await, args, tx, signing_data) - } - /// Process the given transaction using the given flags - async fn submit( - &self, - tx: Tx, - args: &args::Tx, - ) -> crate::sdk::error::Result { - tx::process_tx(self, args, tx).await - } -} - -/// Provides convenience methods for common Namada interactions -pub struct NamadaImpl<'a, C, U, V, I> -where - C: crate::ledger::queries::Client + Sync, - U: WalletIo, - V: ShieldedUtils, - I: Io, -{ - /// Used to send and receive messages from the ledger - pub client: &'a C, - /// Stores the addresses and keys required for ledger interactions - pub wallet: RwLock<&'a mut Wallet>, - /// Stores the current state of the shielded pool - pub shielded: RwLock<&'a mut ShieldedContext>, - /// Captures the input/output streams used by this object - pub io: &'a I, - /// The address of the native token - native_token: Address, - /// The default builder for a Tx - prototype: args::Tx, -} - -impl<'a, C, U, V, I> NamadaImpl<'a, C, U, V, I> -where - C: crate::ledger::queries::Client + Sync, - U: WalletIo, - V: ShieldedUtils, - I: Io, -{ - /// Construct a new Namada context with the given native token address - pub fn native_new( - client: &'a C, - wallet: &'a mut Wallet, - shielded: &'a mut ShieldedContext, - io: &'a I, - native_token: Address, - ) -> Self { - NamadaImpl { - client, - wallet: RwLock::new(wallet), - shielded: RwLock::new(shielded), - io, - native_token: native_token.clone(), - prototype: args::Tx { - dry_run: false, - dry_run_wrapper: false, - dump_tx: false, - output_folder: None, - force: false, - broadcast_only: false, - ledger_address: (), - initialized_account_alias: None, - wallet_alias_force: false, - fee_amount: None, - wrapper_fee_payer: None, - fee_token: native_token, - fee_unshield: None, - gas_limit: GasLimit::from(20_000), - expiration: None, - disposable_signing_key: false, - chain_id: None, - signing_keys: vec![], - signatures: vec![], - tx_reveal_code_path: PathBuf::from(TX_REVEAL_PK), - verification_key: None, - password: None, - }, + async fn perform(&self, _request: R) -> Result + where + R: tendermint_rpc::SimpleRequest, + { + Response::from_string("TODO") } } - /// Construct a new Namada context looking up the native token address - pub async fn new( - client: &'a C, - wallet: &'a mut Wallet, - shielded: &'a mut ShieldedContext, - io: &'a I, - ) -> crate::sdk::error::Result> { - let native_token = query_native_token(client).await?; - Ok(NamadaImpl::native_new( - client, - wallet, - shielded, - io, - native_token, - )) - } -} - -#[async_trait::async_trait(?Send)] -impl<'a, C, U, V, I> Namada<'a> for NamadaImpl<'a, C, U, V, I> -where - C: crate::ledger::queries::Client + Sync, - U: WalletIo + WalletStorage, - V: ShieldedUtils, - I: Io, -{ - type Client = C; - type Io = I; - type ShieldedUtils = V; - type WalletUtils = U; - - /// Obtain the prototypical Tx builder - fn tx_builder(&self) -> args::Tx { - self.prototype.clone() - } - - fn native_token(&self) -> Address { - self.native_token.clone() - } - - fn io(&self) -> &'a Self::Io { - self.io - } - - fn client(&self) -> &'a Self::Client { - self.client - } - - async fn wallet( - &self, - ) -> RwLockReadGuard<&'a mut Wallet> { - self.wallet.read().await - } - - async fn wallet_mut( - &self, - ) -> RwLockWriteGuard<&'a mut Wallet> { - self.wallet.write().await - } - - async fn shielded( - &self, - ) -> RwLockReadGuard<&'a mut ShieldedContext> { - self.shielded.read().await - } - - async fn shielded_mut( - &self, - ) -> RwLockWriteGuard<&'a mut ShieldedContext> { - self.shielded.write().await - } -} - -/// Allow the prototypical Tx builder to be modified -impl<'a, C, U, V, I> args::TxBuilder for NamadaImpl<'a, C, U, V, I> -where - C: crate::ledger::queries::Client + Sync, - U: WalletIo, - V: ShieldedUtils, - I: Io, -{ - fn tx(self, func: F) -> Self - where - F: FnOnce(args::Tx) -> args::Tx, + #[tokio::test] + async fn test_shell_queries_router_with_client() -> storage_api::Result<()> { - Self { - prototype: func(self.prototype), - ..self - } + // Initialize the `TestClient` + let mut client = TestClient::new(RPC); + // store the wasm code + let tx_no_op = TestWasms::TxNoOp.read_bytes(); + let tx_hash = Hash::sha256(&tx_no_op); + let key = Key::wasm_code(&tx_hash); + let len_key = Key::wasm_code_len(&tx_hash); + client.wl_storage.storage.write(&key, &tx_no_op).unwrap(); + client + .wl_storage + .storage + .write(&len_key, (tx_no_op.len() as u64).try_to_vec().unwrap()) + .unwrap(); + + // Request last committed epoch + let read_epoch = RPC.shell().epoch(&client).await.unwrap(); + let current_epoch = client.wl_storage.storage.last_epoch; + assert_eq!(current_epoch, read_epoch); + + // Request dry run tx + let mut outer_tx = + Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); + outer_tx.header.chain_id = client.wl_storage.storage.chain_id.clone(); + outer_tx.set_code(Code::from_hash(tx_hash)); + outer_tx.set_data(Data::new(vec![])); + let tx_bytes = outer_tx.to_bytes(); + let result = RPC + .shell() + .dry_run_tx(&client, Some(tx_bytes), None, false) + .await + .unwrap(); + assert!(result.data.is_accepted()); + + // Request storage value for a balance key ... + let token_addr = address::testing::established_address_1(); + let owner = address::testing::established_address_2(); + let balance_key = token::balance_key(&token_addr, &owner); + // ... there should be no value yet. + let read_balance = RPC + .shell() + .storage_value(&client, None, None, false, &balance_key) + .await + .unwrap(); + assert!(read_balance.data.is_empty()); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = RPC + .shell() + .storage_prefix(&client, None, None, false, &balance_prefix) + .await + .unwrap(); + assert!(read_balances.data.is_empty()); + + // Request storage has key + let has_balance_key = RPC + .shell() + .storage_has_key(&client, &balance_key) + .await + .unwrap(); + assert!(!has_balance_key); + + // Then write some balance ... + let balance = token::Amount::native_whole(1000); + StorageWrite::write(&mut client.wl_storage, &balance_key, balance)?; + // It has to be committed to be visible in a query + client.wl_storage.commit_tx(); + client.wl_storage.commit_block().unwrap(); + // ... there should be the same value now + let read_balance = RPC + .shell() + .storage_value(&client, None, None, false, &balance_key) + .await + .unwrap(); + assert_eq!( + balance, + token::Amount::try_from_slice(&read_balance.data).unwrap() + ); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = RPC + .shell() + .storage_prefix(&client, None, None, false, &balance_prefix) + .await + .unwrap(); + assert_eq!(read_balances.data.len(), 1); + + // Request storage has key + let has_balance_key = RPC + .shell() + .storage_has_key(&client, &balance_key) + .await + .unwrap(); + assert!(has_balance_key); + + Ok(()) } } diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs deleted file mode 100644 index e78313d804..0000000000 --- a/shared/src/ledger/queries/mod.rs +++ /dev/null @@ -1,222 +0,0 @@ -//! Ledger read-only queries can be handled and dispatched via the [`RPC`] -//! defined via `router!` macro. - -// Re-export to show in rustdoc! -pub use shell::Shell; -use shell::SHELL; -pub use types::{ - EncodedResponseQuery, Error, RequestCtx, RequestQuery, ResponseQuery, - Router, -}; -use vp::{Vp, VP}; - -pub use self::shell::eth_bridge::{ - Erc20FlowControl, GenBridgePoolProofReq, GenBridgePoolProofRsp, - TransferToErcArgs, -}; -use super::storage::traits::StorageHasher; -use super::storage::{DBIter, DB}; -use super::storage_api; -#[cfg(any(test, feature = "async-client"))] -pub use crate::sdk::queries::Client; -use crate::types::storage::BlockHeight; - -#[macro_use] -mod router; -mod shell; -mod types; -pub mod vp; - -// Most commonly expected patterns should be declared first -router! {RPC, - // Shell provides storage read access, block metadata and can dry-run a tx - ( "shell" ) = (sub SHELL), - - // Validity-predicate's specific storage queries - ( "vp" ) = (sub VP), -} - -/// Handle RPC query request in the ledger. On success, returns response with -/// borsh-encoded data. -pub fn handle_path( - ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, -) -> storage_api::Result -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - RPC.handle(ctx, request) -} - -// Handler helpers: - -/// For queries that only support latest height, check that the given height is -/// not different from latest height, otherwise return an error. -pub fn require_latest_height( - ctx: &RequestCtx<'_, D, H>, - request: &RequestQuery, -) -> storage_api::Result<()> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - if request.height != BlockHeight(0) - && request.height != ctx.wl_storage.storage.get_last_block_height() - { - return Err(storage_api::Error::new_const( - "This query doesn't support arbitrary block heights, only the \ - latest committed block height ('0' can be used as a special \ - value that means the latest block height)", - )); - } - Ok(()) -} - -/// For queries that do not support proofs, check that proof is not requested, -/// otherwise return an error. -pub fn require_no_proof(request: &RequestQuery) -> storage_api::Result<()> { - if request.prove { - return Err(storage_api::Error::new_const( - "This query doesn't support proofs", - )); - } - Ok(()) -} - -/// For queries that don't use request data, require that there are no data -/// attached. -pub fn require_no_data(request: &RequestQuery) -> storage_api::Result<()> { - if !request.data.is_empty() { - return Err(storage_api::Error::new_const( - "This query doesn't accept request data", - )); - } - Ok(()) -} - -/// Queries testing helpers -#[cfg(any(test, feature = "testing"))] -mod testing { - - use tempfile::TempDir; - use tendermint_rpc::Response; - - use super::*; - use crate::ledger::events::log::EventLog; - use crate::ledger::storage::testing::TestWlStorage; - use crate::tendermint_rpc::error::Error as RpcError; - use crate::types::storage::BlockHeight; - use crate::vm::wasm::{self, TxCache, VpCache}; - use crate::vm::WasmCacheRoAccess; - - /// A test client that has direct access to the storage - pub struct TestClient - where - RPC: Router, - { - /// RPC router - pub rpc: RPC, - /// storage - pub wl_storage: TestWlStorage, - /// event log - pub event_log: EventLog, - /// VP wasm compilation cache - pub vp_wasm_cache: VpCache, - /// tx wasm compilation cache - pub tx_wasm_cache: TxCache, - /// VP wasm compilation cache directory - pub vp_cache_dir: TempDir, - /// tx wasm compilation cache directory - pub tx_cache_dir: TempDir, - } - - impl TestClient - where - RPC: Router, - { - #[allow(dead_code)] - /// Initialize a test client for the given root RPC router - pub fn new(rpc: RPC) -> Self { - // Initialize the `TestClient` - let mut wl_storage = TestWlStorage::default(); - - // Initialize mock gas limit - let max_block_gas_key = - namada_core::ledger::parameters::storage::get_max_block_gas_key( - ); - wl_storage - .storage - .write( - &max_block_gas_key, - namada_core::ledger::storage::types::encode( - &20_000_000_u64, - ), - ) - .expect( - "Max block gas parameter must be initialized in storage", - ); - let event_log = EventLog::default(); - let (vp_wasm_cache, vp_cache_dir) = - wasm::compilation_cache::common::testing::cache(); - let (tx_wasm_cache, tx_cache_dir) = - wasm::compilation_cache::common::testing::cache(); - Self { - rpc, - wl_storage, - event_log, - vp_wasm_cache: vp_wasm_cache.read_only(), - tx_wasm_cache: tx_wasm_cache.read_only(), - vp_cache_dir, - tx_cache_dir, - } - } - } - - #[cfg_attr(feature = "async-send", async_trait::async_trait)] - #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] - impl Client for TestClient - where - RPC: Router + Sync, - { - type Error = std::io::Error; - - async fn request( - &self, - path: String, - data: Option>, - height: Option, - prove: bool, - ) -> Result { - let data = data.unwrap_or_default(); - let height = height.unwrap_or_default(); - // Handle a path by invoking the `RPC.handle` directly with the - // borrowed storage - let request = RequestQuery { - data, - path, - height, - prove, - }; - let ctx = RequestCtx { - wl_storage: &self.wl_storage, - event_log: &self.event_log, - vp_wasm_cache: self.vp_wasm_cache.clone(), - tx_wasm_cache: self.tx_wasm_cache.clone(), - storage_read_past_height_limit: None, - }; - // TODO: this is a hack to propagate errors to the caller, we should - // really permit error types other than [`std::io::Error`] - self.rpc.handle(ctx, &request).map_err(|err| { - std::io::Error::new(std::io::ErrorKind::Other, err.to_string()) - }) - } - - async fn perform(&self, _request: R) -> Result - where - R: tendermint_rpc::SimpleRequest, - { - Response::from_string("TODO") - } - } -} diff --git a/shared/src/lib.rs b/shared/src/lib.rs index 3036c4cb47..d0d1ea8b2b 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -24,7 +24,7 @@ pub use { }; pub mod ledger; pub use namada_core::proto; -pub mod sdk; +pub use namada_sdk; pub mod types; pub mod vm; diff --git a/shared/src/sdk/mod.rs b/shared/src/sdk/mod.rs deleted file mode 100644 index 381bac03d1..0000000000 --- a/shared/src/sdk/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Namada's SDK API -pub mod rpc; - -pub mod args; -pub mod masp; -pub mod signing; -#[allow(clippy::result_large_err)] -pub mod tx; - -pub mod error; -pub mod queries; -pub mod wallet; diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index 83e58f0fa8..04801cf6c5 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -1,8 +1,8 @@ //! Types definitions. -pub mod control_flow; +pub use namada_sdk::control_flow; pub mod ibc; -pub mod io; +pub use namada_sdk::io; pub mod key; pub use namada_core::types::{ diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index 7806d1abef..5229c65908 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -1885,7 +1885,7 @@ where // TODO: once the runtime gas meter is implemented we need to benchmark // this funcion and charge the gas here. For the moment, the cost of // this is included in the benchmark of the masp vp - HostEnvResult::from(crate::sdk::masp::verify_shielded_tx(&shielded)) + HostEnvResult::from(namada_sdk::masp::verify_shielded_tx(&shielded)) .to_i64(), ) } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 674bce9538..5ab740b9ca 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -31,6 +31,7 @@ wasm-runtime = ["namada/wasm-runtime"] [dependencies] namada = {path = "../shared", features = ["testing"]} namada_core = {path = "../core", features = ["testing"]} +namada_sdk = {path = "../sdk"} namada_test_utils = {path = "../test_utils"} namada_vp_prelude = {path = "../vp_prelude"} namada_tx_prelude = {path = "../tx_prelude"} diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 025f360c42..7bfbf12e2b 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -19,7 +19,6 @@ use std::time::{Duration, Instant}; use borsh::BorshSerialize; use color_eyre::eyre::Result; use data_encoding::HEXLOWER; -use namada::sdk::masp::fs::FsShieldedUtils; use namada::types::address::Address; use namada::types::storage::Epoch; use namada::types::token; @@ -32,6 +31,7 @@ use namada_apps::facade::tendermint_config::net::Address as TendermintAddress; use namada_core::ledger::governance::cli::onchain::{ PgfFunding, PgfFundingTarget, StewardsUpdate, }; +use namada_sdk::masp::fs::FsShieldedUtils; use namada_test_utils::TestWasms; use namada_vp_prelude::BTreeSet; use serde_json::json; diff --git a/tests/src/integration/masp.rs b/tests/src/integration/masp.rs index 261d3acd08..28d5375f79 100644 --- a/tests/src/integration/masp.rs +++ b/tests/src/integration/masp.rs @@ -2,12 +2,12 @@ use std::path::PathBuf; use color_eyre::eyre::Result; use color_eyre::owo_colors::OwoColorize; -use namada::sdk::masp::fs::FsShieldedUtils; use namada_apps::node::ledger::shell::testing::client::run; use namada_apps::node::ledger::shell::testing::utils::{Bin, CapturedOutput}; use namada_core::types::address::{btc, eth, masp_rewards}; use namada_core::types::token; use namada_core::types::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; +use namada_sdk::masp::fs::FsShieldedUtils; use test_log::test; use super::setup; diff --git a/tests/src/native_vp/eth_bridge_pool.rs b/tests/src/native_vp/eth_bridge_pool.rs index 364dcd074c..6fc304f909 100644 --- a/tests/src/native_vp/eth_bridge_pool.rs +++ b/tests/src/native_vp/eth_bridge_pool.rs @@ -4,10 +4,6 @@ mod test_bridge_pool_vp { use borsh::{BorshDeserialize, BorshSerialize}; use namada::core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; - use namada::ledger::eth_bridge::{ - wrapped_erc20s, Contracts, Erc20WhitelistEntry, EthereumBridgeConfig, - UpgradeableContract, - }; use namada::ledger::native_vp::ethereum_bridge::bridge_pool_vp::BridgePoolVp; use namada::proto::Tx; use namada::types::address::{nam, wnam}; @@ -20,6 +16,10 @@ mod test_bridge_pool_vp { use namada::types::token::Amount; use namada_apps::wallet::defaults::{albert_address, bertha_address}; use namada_apps::wasm_loader; + use namada_sdk::eth_bridge::{ + wrapped_erc20s, Contracts, Erc20WhitelistEntry, EthereumBridgeConfig, + UpgradeableContract, + }; use crate::native_vp::TestNativeVpEnv; use crate::tx::{tx_host_env, TestTxEnv}; diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 5dbe91c1e4..8984c1708f 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3332,6 +3332,7 @@ dependencies = [ "namada_core", "namada_ethereum_bridge", "namada_proof_of_stake", + "namada_sdk", "num256", "orion", "owo-colors", @@ -3464,6 +3465,49 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_sdk" +version = "0.23.0" +dependencies = [ + "async-trait", + "bimap", + "borsh 0.9.4", + "circular-queue", + "data-encoding", + "derivation-path", + "ethbridge-bridge-contract", + "ethers", + "futures", + "itertools", + "masp_primitives", + "masp_proofs", + "namada_core", + "namada_ethereum_bridge", + "namada_proof_of_stake", + "num256", + "orion", + "owo-colors", + "parse_duration", + "paste", + "prost", + "rand 0.8.5", + "rand_core 0.6.4", + "ripemd", + "serde", + "serde_json", + "sha2 0.9.9", + "slip10_ed25519", + "tendermint-rpc", + "thiserror", + "tiny-bip39", + "tiny-hderive", + "tokio", + "toml 0.5.11", + "tracing", + "wasmtimer", + "zeroize", +] + [[package]] name = "namada_test_utils" version = "0.23.0" @@ -3486,6 +3530,7 @@ dependencies = [ "lazy_static", "namada", "namada_core", + "namada_sdk", "namada_test_utils", "namada_tx_prelude", "namada_vp_prelude", diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 8e3bc2bb20..43c75ed658 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -3332,6 +3332,7 @@ dependencies = [ "namada_core", "namada_ethereum_bridge", "namada_proof_of_stake", + "namada_sdk", "num256", "orion", "owo-colors", @@ -3464,6 +3465,49 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_sdk" +version = "0.23.0" +dependencies = [ + "async-trait", + "bimap", + "borsh 0.9.4", + "circular-queue", + "data-encoding", + "derivation-path", + "ethbridge-bridge-contract", + "ethers", + "futures", + "itertools", + "masp_primitives", + "masp_proofs", + "namada_core", + "namada_ethereum_bridge", + "namada_proof_of_stake", + "num256", + "orion", + "owo-colors", + "parse_duration", + "paste", + "prost", + "rand 0.8.5", + "rand_core 0.6.4", + "ripemd", + "serde", + "serde_json", + "sha2 0.9.9", + "slip10_ed25519", + "tendermint-rpc", + "thiserror", + "tiny-bip39", + "tiny-hderive", + "tokio", + "toml 0.5.11", + "tracing", + "wasmtimer", + "zeroize", +] + [[package]] name = "namada_test_utils" version = "0.23.0" @@ -3486,6 +3530,7 @@ dependencies = [ "lazy_static", "namada", "namada_core", + "namada_sdk", "namada_test_utils", "namada_tx_prelude", "namada_vp_prelude", From f892ab4040465d2d6388da6e5eefdc4a29f69513 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Thu, 12 Oct 2023 09:53:16 +0200 Subject: [PATCH 085/161] Changes to enable better usage of the SDK in the absence of wallet or shielded context. --- apps/src/lib/client/rpc.rs | 109 +++++++++++++++++-------------------- sdk/src/lib.rs | 25 ++++++++- sdk/src/masp.rs | 53 +++++++++++------- sdk/src/rpc.rs | 22 +++++--- sdk/src/signing.rs | 44 +++++---------- sdk/src/tx.rs | 8 +-- 6 files changed, 136 insertions(+), 125 deletions(-) diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index d5dc2f23a3..b3902e0900 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -46,8 +46,7 @@ use namada::types::{storage, token}; use namada_sdk::error::{is_pinned_error, Error, PinnedBalanceError}; use namada_sdk::masp::{Conversions, MaspAmount, MaspChange}; use namada_sdk::rpc::{ - self, enriched_bonds_and_unbonds, format_denominated_amount, query_epoch, - TxResponse, + self, enriched_bonds_and_unbonds, query_epoch, TxResponse, }; use namada_sdk::wallet::AddressVpType; use namada_sdk::{display, display_line, edisplay_line, error, prompt, Namada}; @@ -158,7 +157,8 @@ pub async fn query_transfers<'a>( // transaction's reception let amt = shielded .compute_exchanged_amount( - context, + context.client(), + context.io(), amt, epoch, Conversions::new(), @@ -209,8 +209,7 @@ pub async fn query_transfers<'a>( context.io(), " {}{} {}", sign, - format_denominated_amount(context, asset, change.into(),) - .await, + context.format_amount(asset, change.into()).await, token_alias ); } @@ -233,12 +232,7 @@ pub async fn query_transfers<'a>( context.io(), " {}{} {}", sign, - format_denominated_amount( - context, - &token_addr, - val.into(), - ) - .await, + context.format_amount(&token_addr, val.into()).await, token_alias, ); } @@ -329,9 +323,7 @@ pub async fn query_transparent_balance<'a>( .await { Ok(balance) => { - let balance = - format_denominated_amount(context, &token, balance) - .await; + let balance = context.format_amount(&token, balance).await; display_line!(context.io(), "{}: {}", token_alias, balance); } Err(e) => { @@ -351,9 +343,7 @@ pub async fn query_transparent_balance<'a>( let balance = get_token_balance(context.client(), &token, &owner).await; if !balance.is_zero() { - let balance = - format_denominated_amount(context, &token, balance) - .await; + let balance = context.format_amount(&token, balance).await; display_line!(context.io(), "{}: {}", token_alias, balance); } } @@ -481,12 +471,9 @@ pub async fn query_pinned_balance<'a>( token_alias ); } else { - let formatted = format_denominated_amount( - context, - token, - total_balance.into(), - ) - .await; + let formatted = context + .format_amount(token, total_balance.into()) + .await; display_line!( context.io(), "Payment address {} was consumed during epoch {}. \ @@ -515,12 +502,9 @@ pub async fn query_pinned_balance<'a>( ); found_any = true; } - let formatted = format_denominated_amount( - context, - token_addr, - (*value).into(), - ) - .await; + let formatted = context + .format_amount(token_addr, (*value).into()) + .await; let token_alias = tokens .get(token_addr) .map(|a| a.to_string()) @@ -567,7 +551,7 @@ async fn print_balances<'a>( owner.clone(), format!( ": {}, owned by {}", - format_denominated_amount(context, tok, balance).await, + context.format_amount(tok, balance).await, wallet.lookup_alias(owner) ), ), @@ -736,7 +720,12 @@ pub async fn query_shielded_balance<'a>( context .shielded_mut() .await - .compute_exchanged_balance(context, &viewing_key, epoch) + .compute_exchanged_balance( + context.client(), + context.io(), + &viewing_key, + epoch, + ) .await .unwrap() .expect("context should contain viewing key") @@ -759,12 +748,12 @@ pub async fn query_shielded_balance<'a>( context.io(), "{}: {}", token_alias, - format_denominated_amount( - context, - &token, - token::Amount::from(total_balance) - ) - .await + context + .format_amount( + &token, + token::Amount::from(total_balance), + ) + .await ); } } @@ -790,7 +779,12 @@ pub async fn query_shielded_balance<'a>( context .shielded_mut() .await - .compute_exchanged_balance(context, &viewing_key, epoch) + .compute_exchanged_balance( + context.client(), + context.io(), + &viewing_key, + epoch, + ) .await .unwrap() .expect("context should contain viewing key") @@ -830,12 +824,8 @@ pub async fn query_shielded_balance<'a>( .map(|a| a.to_string()) .unwrap_or_else(|| token.to_string()); display_line!(context.io(), "Shielded Token {}:", alias); - let formatted = format_denominated_amount( - context, - &token, - token_balance.into(), - ) - .await; + let formatted = + context.format_amount(&token, token_balance.into()).await; display_line!( context.io(), " {}, owned by {}", @@ -879,7 +869,12 @@ pub async fn query_shielded_balance<'a>( context .shielded_mut() .await - .compute_exchanged_balance(context, &viewing_key, epoch) + .compute_exchanged_balance( + context.client(), + context.io(), + &viewing_key, + epoch, + ) .await .unwrap() .expect("context should contain viewing key") @@ -889,12 +884,8 @@ pub async fn query_shielded_balance<'a>( if !val.is_zero() { found_any = true; } - let formatted = format_denominated_amount( - context, - address, - (*val).into(), - ) - .await; + let formatted = + context.format_amount(address, (*val).into()).await; display_line!( context.io(), " {}, owned by {}", @@ -930,7 +921,12 @@ pub async fn query_shielded_balance<'a>( let balance = context .shielded_mut() .await - .compute_exchanged_balance(context, &viewing_key, epoch) + .compute_exchanged_balance( + context.client(), + context.io(), + &viewing_key, + epoch, + ) .await .unwrap() .expect("context should contain viewing key"); @@ -957,12 +953,7 @@ pub async fn print_decoded_balance<'a>( context.io(), "{} : {}", context.wallet().await.lookup_alias(token_addr), - format_denominated_amount( - context, - token_addr, - (*amount).into() - ) - .await, + context.format_amount(token_addr, (*amount).into()).await, ); } } @@ -990,7 +981,7 @@ pub async fn print_decoded_balance_with_epoch<'a>( "{} | {} : {}", alias, epoch, - format_denominated_amount(context, token_addr, asset_value).await, + context.format_amount(token_addr, asset_value).await, ); } } diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 78a5caf812..622a63a1d1 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -51,8 +51,11 @@ use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use crate::io::Io; use crate::masp::{ShieldedContext, ShieldedUtils}; use crate::proto::Tx; -use crate::rpc::query_native_token; +use crate::rpc::{ + denominate_amount, format_denominated_amount, query_native_token, +}; use crate::signing::SigningTxData; +use crate::token::DenominatedAmount; use crate::tx::{ ProcessTxResponse, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, TX_CHANGE_COMMISSION_WASM, TX_IBC_WASM, TX_INIT_PROPOSAL, @@ -395,6 +398,26 @@ pub trait Namada<'a>: Sized { ) -> crate::error::Result { tx::process_tx(self, args, tx).await } + + /// Look up the denomination of a token in order to make a correctly + /// denominated amount. + async fn denominate_amount( + &self, + token: &Address, + amount: token::Amount, + ) -> DenominatedAmount { + denominate_amount(self.client(), self.io(), token, amount).await + } + + /// Look up the denomination of a token in order to format it correctly as a + /// string. + async fn format_amount( + &self, + token: &Address, + amount: token::Amount, + ) -> String { + format_denominated_amount(self.client(), self.io(), token, amount).await + } } /// Provides convenience methods for common Namada interactions diff --git a/sdk/src/masp.rs b/sdk/src/masp.rs index b010bde7d7..a860839e3a 100644 --- a/sdk/src/masp.rs +++ b/sdk/src/masp.rs @@ -1030,19 +1030,20 @@ impl ShieldedContext { /// context and express that value in terms of the currently timestamped /// asset types. If the key is not in the context, then we do not know the /// balance and hence we return None. - pub async fn compute_exchanged_balance<'a>( + pub async fn compute_exchanged_balance( &mut self, - context: &impl Namada<'a>, + client: &(impl Client + Sync), + io: &impl Io, vk: &ViewingKey, target_epoch: Epoch, ) -> Result, Error> { // First get the unexchanged balance - if let Some(balance) = - self.compute_shielded_balance(context.client(), vk).await? + if let Some(balance) = self.compute_shielded_balance(client, vk).await? { let exchanged_amount = self .compute_exchanged_amount( - context, + client, + io, balance, target_epoch, BTreeMap::new(), @@ -1051,8 +1052,7 @@ impl ShieldedContext { .0; // And then exchange balance into current asset types Ok(Some( - self.decode_all_amounts(context.client(), exchanged_amount) - .await, + self.decode_all_amounts(client, exchanged_amount).await, )) } else { Ok(None) @@ -1065,9 +1065,10 @@ impl ShieldedContext { /// the trace amount that could not be converted is moved from input to /// output. #[allow(clippy::too_many_arguments)] - async fn apply_conversion<'a>( + async fn apply_conversion( &mut self, - context: &impl Namada<'a>, + client: &(impl Client + Sync), + io: &impl Io, conv: AllowedConversion, asset_type: (Epoch, Address, MaspDenom), value: i128, @@ -1087,7 +1088,7 @@ impl ShieldedContext { let threshold = -conv[&masp_asset]; if threshold == 0 { edisplay_line!( - context.io(), + io, "Asset threshold of selected conversion for asset type {} is \ 0, this is a bug, please report it.", masp_asset @@ -1106,7 +1107,7 @@ impl ShieldedContext { *usage += required; // Apply the conversions to input and move the trace amount to output *input += self - .decode_all_amounts(context.client(), conv.clone() * required) + .decode_all_amounts(client, conv.clone() * required) .await - trace.clone(); *output += trace; @@ -1117,9 +1118,10 @@ impl ShieldedContext { /// note of the conversions that were used. Note that this function does /// not assume that allowed conversions from the ledger are expressed in /// terms of the latest asset types. - pub async fn compute_exchanged_amount<'a>( + pub async fn compute_exchanged_amount( &mut self, - context: &impl Namada<'a>, + client: &(impl Client + Sync), + io: &impl Io, mut input: MaspAmount, target_epoch: Epoch, mut conversions: Conversions, @@ -1141,13 +1143,13 @@ impl ShieldedContext { let denom_value = denom.denominate_i128(&value); self.query_allowed_conversion( - context.client(), + client, target_asset_type, &mut conversions, ) .await; self.query_allowed_conversion( - context.client(), + client, asset_type, &mut conversions, ) @@ -1156,7 +1158,7 @@ impl ShieldedContext { (conversions.get_mut(&asset_type), at_target_asset_type) { display_line!( - context.io(), + io, "converting current asset type to latest asset type..." ); // Not at the target asset type, not at the latest asset @@ -1164,7 +1166,8 @@ impl ShieldedContext { // current asset type to the latest // asset type. self.apply_conversion( - context, + client, + io, conv.clone(), (asset_epoch, token_addr.clone(), denom), denom_value, @@ -1178,7 +1181,7 @@ impl ShieldedContext { at_target_asset_type, ) { display_line!( - context.io(), + io, "converting latest asset type to target asset type..." ); // Not at the target asset type, yet at the latest asset @@ -1186,7 +1189,8 @@ impl ShieldedContext { // from latest asset type to the target // asset type. self.apply_conversion( - context, + client, + io, conv.clone(), (asset_epoch, token_addr.clone(), denom), denom_value, @@ -1268,7 +1272,8 @@ impl ShieldedContext { self.decode_all_amounts(context.client(), pre_contr).await; let (contr, proposed_convs) = self .compute_exchanged_amount( - context, + context.client(), + context.io(), input, target_epoch, conversions.clone(), @@ -1422,7 +1427,13 @@ impl ShieldedContext { display_line!(context.io(), "Decoded pinned balance: {:?}", amount); // Finally, exchange the balance to the transaction's epoch let computed_amount = self - .compute_exchanged_amount(context, amount, ep, BTreeMap::new()) + .compute_exchanged_amount( + context.client(), + context.io(), + amount, + ep, + BTreeMap::new(), + ) .await? .0; display_line!(context.io(), "Exchanged amount: {:?}", computed_amount); diff --git a/sdk/src/rpc.rs b/sdk/src/rpc.rs index 9307374663..f1267fe8a8 100644 --- a/sdk/src/rpc.rs +++ b/sdk/src/rpc.rs @@ -1037,21 +1037,22 @@ pub async fn wait_until_node_is_synched<'a>( /// Look up the denomination of a token in order to make a correctly denominated /// amount. -pub async fn denominate_amount<'a, N: Namada<'a>>( - context: &N, +pub async fn denominate_amount( + client: &C, + io: &impl Io, token: &Address, amount: token::Amount, ) -> DenominatedAmount { - let denom = convert_response::>( - RPC.vp().token().denomination(context.client(), token).await, + let denom = convert_response::>( + RPC.vp().token().denomination(client, token).await, ) .unwrap_or_else(|t| { - display_line!(context.io(), "Error in querying for denomination: {t}"); + display_line!(io, "Error in querying for denomination: {t}"); None }) .unwrap_or_else(|| { display_line!( - context.io(), + io, "No denomination found for token: {token}, defaulting to zero \ decimal places" ); @@ -1062,10 +1063,13 @@ pub async fn denominate_amount<'a, N: Namada<'a>>( /// Look up the denomination of a token in order to format it /// correctly as a string. -pub async fn format_denominated_amount<'a>( - context: &impl Namada<'a>, +pub async fn format_denominated_amount( + client: &(impl Client + Sync), + io: &impl Io, token: &Address, amount: token::Amount, ) -> String { - denominate_amount(context, token, amount).await.to_string() + denominate_amount(client, io, token, amount) + .await + .to_string() } diff --git a/sdk/src/signing.rs b/sdk/src/signing.rs index 7680adfdd2..db2aab3482 100644 --- a/sdk/src/signing.rs +++ b/sdk/src/signing.rs @@ -41,9 +41,7 @@ use crate::ibc_proto::google::protobuf::Any; use crate::io::*; use crate::masp::make_asset_type; use crate::proto::{MaspBuilder, Section, Tx}; -use crate::rpc::{ - format_denominated_amount, query_wasm_code_hash, validate_amount, -}; +use crate::rpc::{query_wasm_code_hash, validate_amount}; use crate::tx::{ TX_BOND_WASM, TX_CHANGE_COMMISSION_WASM, TX_IBC_WASM, TX_INIT_ACCOUNT_WASM, TX_INIT_PROPOSAL, TX_INIT_VALIDATOR_WASM, TX_REVEAL_PK, TX_TRANSFER_WASM, @@ -515,19 +513,12 @@ pub async fn wrap_tx<'a, N: Namada<'a>>( } else { let token_addr = args.fee_token.clone(); if !args.force { - let fee_amount = format_denominated_amount( - context, - &token_addr, - total_fee, - ) - .await; - - let balance = format_denominated_amount( - context, - &token_addr, - updated_balance, - ) - .await; + let fee_amount = + context.format_amount(&token_addr, total_fee).await; + + let balance = context + .format_amount(&token_addr, updated_balance) + .await; return Err(Error::from(TxError::BalanceTooLowForFees( fee_payer_address, token_addr, @@ -621,8 +612,7 @@ async fn make_ledger_amount_asset<'a>( ) { if let Some((token, _, _epoch)) = assets.get(token) { // If the AssetType can be decoded, then at least display Addressees - let formatted_amt = - format_denominated_amount(context, token, amount.into()).await; + let formatted_amt = context.format_amount(token, amount.into()).await; if let Some(token) = tokens.get(token) { output .push( @@ -1332,18 +1322,12 @@ pub async fn to_ledger_vector<'a>( if let Some(wrapper) = tx.header.wrapper() { let gas_token = wrapper.fee.token.clone(); - let gas_limit = format_denominated_amount( - context, - &gas_token, - Amount::from(wrapper.gas_limit), - ) - .await; - let fee_amount_per_gas_unit = format_denominated_amount( - context, - &gas_token, - wrapper.fee.amount_per_gas_unit, - ) - .await; + let gas_limit = context + .format_amount(&gas_token, Amount::from(wrapper.gas_limit)) + .await; + let fee_amount_per_gas_unit = context + .format_amount(&gas_token, wrapper.fee.amount_per_gas_unit) + .await; tv.output_expert.extend(vec![ format!("Timestamp : {}", tx.header.timestamp.0), format!("PK : {}", wrapper.pk), diff --git a/sdk/src/tx.rs b/sdk/src/tx.rs index b4b059e249..acfe12f6bd 100644 --- a/sdk/src/tx.rs +++ b/sdk/src/tx.rs @@ -58,8 +58,7 @@ use crate::masp::{ShieldedContext, ShieldedTransfer}; use crate::proto::{MaspBuilder, Tx}; use crate::queries::Client; use crate::rpc::{ - self, format_denominated_amount, query_wasm_code_hash, validate_amount, - TxBroadcastData, TxResponse, + self, query_wasm_code_hash, validate_amount, TxBroadcastData, TxResponse, }; use crate::signing::{self, SigningTxData, TxSourcePostBalance}; use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; @@ -2083,9 +2082,8 @@ async fn check_balance_too_low_err<'a, N: Namada<'a>>( transfer is {} and the balance is {}.", source, token, - format_denominated_amount(context, token, amount).await, - format_denominated_amount(context, token, balance) - .await, + context.format_amount(token, amount).await, + context.format_amount(token, balance).await, ); Ok(token::Amount::default()) } else { From 94b2d4bdd0b4d668db7cf4e10358ba1a5cb98db4 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Thu, 12 Oct 2023 10:02:54 +0200 Subject: [PATCH 086/161] Added a change log entry. --- .changelog/unreleased/SDK/1963-sdk-refactor-rebased.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/SDK/1963-sdk-refactor-rebased.md diff --git a/.changelog/unreleased/SDK/1963-sdk-refactor-rebased.md b/.changelog/unreleased/SDK/1963-sdk-refactor-rebased.md new file mode 100644 index 0000000000..6add26845e --- /dev/null +++ b/.changelog/unreleased/SDK/1963-sdk-refactor-rebased.md @@ -0,0 +1,2 @@ +- Improved the usability of the SDK and moved it to separate crate. + ([\#1963](https://github.com/anoma/namada/pull/1963)) \ No newline at end of file From 0c006ecce2b346e9c3e398dcae96f05f5e8d2c95 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 26 Sep 2023 09:43:55 +0100 Subject: [PATCH 087/161] Add new SDK errors --- sdk/src/error.rs | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/sdk/src/error.rs b/sdk/src/error.rs index a3091a3d7c..b34a7a5562 100644 --- a/sdk/src/error.rs +++ b/sdk/src/error.rs @@ -38,6 +38,9 @@ pub enum Error { /// Errors that handle querying from storage #[error("Querying error: {0}")] Query(#[from] QueryError), + /// Ethereum bridge related errors + #[error("{0}")] + EthereumBridge(#[from] EthereumBridgeError), /// Any Other errors that are uncategorized #[error("{0}")] Other(String), @@ -88,6 +91,10 @@ pub enum QueryError { /// Wasm querying failure #[error("Wasm code path {0} does not exist on chain")] Wasm(String), + /// The queried node is outdated, and is in the process of + /// synchronizing with the network. + #[error("Node is still catching up with the network")] + CatchingUp, } /// Errors that deal with Decoding, Encoding, or Conversions @@ -279,6 +286,46 @@ pub enum TxError { Other(String), } +/// Ethereum bridge related errors. +#[derive(Error, Debug, Clone)] +pub enum EthereumBridgeError { + /// Error invoking smart contract function. + #[error("Smart contract call failed: {0}")] + ContractCall(String), + /// Ethereum RPC error. + #[error("RPC error: {0}")] + Rpc(String), + /// Error reading the signed Bridge pool. + #[error("Failed to read signed Bridge pool: {0}")] + ReadSignedBridgePool(String), + /// Error reading the Bridge pool. + #[error("Failed to read Bridge pool: {0}")] + ReadBridgePool(String), + /// Error querying transfer to Ethereum progress. + #[error("Failed to query transfer to Ethereum progress: {0}")] + TransferToEthProgress(String), + /// Error querying Ethereum voting powers. + #[error("Failed to query Ethereum voting powers: {0}")] + QueryVotingPowers(String), + /// Ethereum node timeout error. + #[error( + "Timed out while attempting to communicate with the Ethereum node" + )] + NodeTimeout, + /// Error generating Bridge pool proof. + #[error("Failed to generate Bridge pool proof: {0}")] + GenBridgePoolProof(String), + /// Error retrieving contract address. + #[error("Failed to retrieve contract address: {0}")] + RetrieveContract(String), + /// Error calculating relay cost. + #[error("Failed to calculate relay cost: {0}")] + RelayCost(String), + /// Invalid Bridge pool nonce error. + #[error("The Bridge pool nonce is invalid")] + InvalidBpNonce, +} + /// Checks if the given error is an invalid viewing key pub fn is_pinned_error(err: &Result) -> bool { matches!(err, Err(Pinned(PinnedBalanceError::InvalidViewingKey))) From abb67101a7a7463b8ffeaee090fa13c31948cf05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 13 Oct 2023 07:45:12 +0200 Subject: [PATCH 088/161] core/lazy_map+set: avoid calling delete when given key is not present --- core/src/ledger/storage_api/collections/lazy_map.rs | 10 ++++++---- core/src/ledger/storage_api/collections/lazy_set.rs | 8 +++++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/core/src/ledger/storage_api/collections/lazy_map.rs b/core/src/ledger/storage_api/collections/lazy_map.rs index 4f9aeb426d..1c644f7c88 100644 --- a/core/src/ledger/storage_api/collections/lazy_map.rs +++ b/core/src/ledger/storage_api/collections/lazy_map.rs @@ -482,16 +482,18 @@ where Ok(previous) } - /// Removes a key from the map, returning the value at the key if the key - /// was previously in the map. + /// Removes a key from the map if it's present, returning the value at the + /// key if the key was previously in the map. pub fn remove(&self, storage: &mut S, key: &K) -> Result> where S: StorageWrite + StorageRead, { let value = self.get(storage, key)?; - let data_key = self.get_data_key(key); - storage.delete(&data_key)?; + if value.is_some() { + let data_key = self.get_data_key(key); + storage.delete(&data_key)?; + } Ok(value) } diff --git a/core/src/ledger/storage_api/collections/lazy_set.rs b/core/src/ledger/storage_api/collections/lazy_set.rs index 038b7a87d0..9dc6747465 100644 --- a/core/src/ledger/storage_api/collections/lazy_set.rs +++ b/core/src/ledger/storage_api/collections/lazy_set.rs @@ -183,7 +183,7 @@ where storage.write(&key, ()) } - /// Removes a key from the set, returning `true` if the key + /// Removes a key from the set if it's present, returning `true` if the key /// was in the set. pub fn remove(&self, storage: &mut S, key: &K) -> Result where @@ -191,8 +191,10 @@ where { let present = self.contains(storage, key)?; - let key = self.get_key(key); - storage.delete(&key)?; + if present { + let key = self.get_key(key); + storage.delete(&key)?; + } Ok(present) } From 027a920511ba1a4a2bb7f1247d116cc42aab30ff Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 26 Sep 2023 10:20:31 +0100 Subject: [PATCH 089/161] Phase out Halt and its cousins from the SDK --- apps/src/lib/cli/api.rs | 12 +- apps/src/lib/cli/client.rs | 197 +++-------- apps/src/lib/cli/relayer.rs | 85 ++--- apps/src/lib/client/rpc.rs | 3 +- .../lib/node/ledger/shell/testing/client.rs | 11 +- sdk/src/eth_bridge/bridge_pool.rs | 305 +++++++++++------- sdk/src/eth_bridge/mod.rs | 38 ++- sdk/src/eth_bridge/validator_set.rs | 163 ++++++---- sdk/src/rpc.rs | 29 +- sdk/src/tx.rs | 10 +- 10 files changed, 407 insertions(+), 446 deletions(-) diff --git a/apps/src/lib/cli/api.rs b/apps/src/lib/cli/api.rs index 79c8be3fa9..0748e77549 100644 --- a/apps/src/lib/cli/api.rs +++ b/apps/src/lib/cli/api.rs @@ -1,6 +1,6 @@ use namada::tendermint_rpc::HttpClient; -use namada::types::control_flow::Halt; use namada::types::io::Io; +use namada_sdk::error::Error; use namada_sdk::queries::Client; use namada_sdk::rpc::wait_until_node_is_synched; use tendermint_config::net::Address as TendermintAddress; @@ -11,7 +11,10 @@ use crate::client::utils; #[async_trait::async_trait(?Send)] pub trait CliClient: Client + Sync { fn from_tendermint_address(address: &mut TendermintAddress) -> Self; - async fn wait_until_node_is_synced(&self, io: &impl Io) -> Halt<()>; + async fn wait_until_node_is_synced( + &self, + io: &impl Io, + ) -> Result<(), Error>; } #[async_trait::async_trait(?Send)] @@ -20,7 +23,10 @@ impl CliClient for HttpClient { HttpClient::new(utils::take_config_address(address)).unwrap() } - async fn wait_until_node_is_synced(&self, io: &impl Io) -> Halt<()> { + async fn wait_until_node_is_synced( + &self, + io: &impl Io, + ) -> Result<(), Error> { wait_until_node_is_synched(self, io).await } } diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index 977442b9cb..bdb5c7e3b7 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -1,5 +1,4 @@ -use color_eyre::eyre::{eyre, Report, Result}; -use namada::types::control_flow::ProceedOrElse; +use color_eyre::eyre::Result; use namada::types::io::Io; use namada_sdk::tx::dump_tx; use namada_sdk::{signing, Namada, NamadaImpl}; @@ -10,10 +9,6 @@ use crate::cli::args::CliToSdk; use crate::cli::cmds::*; use crate::client::{rpc, tx, utils}; -fn error() -> Report { - eyre!("Fatal error") -} - impl CliApi { pub async fn handle_client_command( client: Option, @@ -35,10 +30,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); let dry_run = @@ -63,10 +55,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_transfer(&namada, args).await?; @@ -77,10 +66,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_ibc_transfer(&namada, args).await?; @@ -91,10 +77,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_update_account(&namada, args).await?; @@ -105,10 +88,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); let dry_run = @@ -133,10 +113,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = NamadaImpl::native_new( &client, @@ -158,10 +135,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_init_proposal(&namada, args).await?; @@ -172,10 +146,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_vote_proposal(&namada, args).await?; @@ -186,10 +157,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_reveal_pk(&namada, args).await?; @@ -200,10 +168,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_bond(&namada, args).await?; @@ -214,10 +179,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_unbond(&namada, args).await?; @@ -228,10 +190,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_withdraw(&namada, args).await?; @@ -244,10 +203,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_validator_commission_change(&namada, args) @@ -261,10 +217,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); let tx_args = args.tx.clone(); @@ -296,10 +249,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_unjail_validator(&namada, args).await?; @@ -312,10 +262,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_update_steward_commission(&namada, args) @@ -327,10 +274,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::submit_resign_steward(&namada, args).await?; @@ -340,10 +284,7 @@ impl CliApi { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut args.ledger_address) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let namada = ctx.to_sdk(&client, io); rpc::query_and_print_epoch(&namada).await; } @@ -353,10 +294,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_and_print_validator_state(&namada, args) @@ -368,10 +306,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_transfers(&namada, args).await; @@ -382,10 +317,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_conversions(&namada, args).await; @@ -394,10 +326,7 @@ impl CliApi { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut args.ledger_address) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let namada = ctx.to_sdk(&client, io); rpc::query_block(&namada).await; } @@ -407,10 +336,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_balance(&namada, args).await; @@ -421,10 +347,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_bonds(&namada, args) @@ -437,10 +360,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_bonded_stake(&namada, args).await; @@ -451,10 +371,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_and_print_commission_rate(&namada, args) @@ -466,10 +383,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_slashes(&namada, args).await; @@ -480,10 +394,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_delegations(&namada, args).await; @@ -494,10 +405,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_find_validator(&namada, args).await; @@ -508,10 +416,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_result(&namada, args).await; @@ -522,10 +427,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_raw_bytes(&namada, args).await; @@ -536,10 +438,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_proposal(&namada, args).await; @@ -550,10 +449,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_proposal_result(&namada, args).await; @@ -566,10 +462,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_protocol_parameters(&namada, args).await; @@ -580,10 +473,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_pgf(&namada, args).await; @@ -594,10 +484,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::query_account(&namada, args).await; @@ -608,10 +495,7 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); tx::sign_tx(&namada, args).await?; @@ -647,10 +531,7 @@ impl CliApi { let mut ledger_address = args.ledger_address.clone(); let client = C::from_tendermint_address(&mut ledger_address); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); rpc::epoch_sleep(&namada, args).await; diff --git a/apps/src/lib/cli/relayer.rs b/apps/src/lib/cli/relayer.rs index 497c69c819..9d241f5cd0 100644 --- a/apps/src/lib/cli/relayer.rs +++ b/apps/src/lib/cli/relayer.rs @@ -1,8 +1,7 @@ use std::sync::Arc; -use color_eyre::eyre::{eyre, Report, Result}; +use color_eyre::eyre::Result; use namada::eth_bridge::ethers::providers::{Http, Provider}; -use namada::types::control_flow::ProceedOrElse; use namada::types::io::Io; use namada_sdk::eth_bridge::{bridge_pool, validator_set}; @@ -11,10 +10,6 @@ use crate::cli::api::{CliApi, CliClient}; use crate::cli::args::{CliToSdk, CliToSdkCtxless}; use crate::cli::cmds::*; -fn error() -> Report { - eyre!("Fatal error") -} - impl CliApi { pub async fn handle_relayer_command( client: Option, @@ -36,15 +31,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); - bridge_pool::recommend_batch(&namada, args) - .await - .proceed_or_else(error)?; + bridge_pool::recommend_batch(&namada, args).await?; } } } @@ -57,14 +47,9 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk_ctxless(); - bridge_pool::construct_proof(&client, io, args) - .await - .proceed_or_else(error)?; + bridge_pool::construct_proof(&client, io, args).await?; } EthBridgePoolWithoutCtx::RelayProof(RelayProof(mut args)) => { let client = client.unwrap_or_else(|| { @@ -72,10 +57,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let eth_client = Arc::new( Provider::::try_from(&args.eth_rpc_endpoint) .unwrap(), @@ -84,8 +66,7 @@ impl CliApi { bridge_pool::relay_bridge_pool_proof( eth_client, &client, io, args, ) - .await - .proceed_or_else(error)?; + .await?; } EthBridgePoolWithoutCtx::QueryPool(QueryEthBridgePool( mut query, @@ -93,11 +74,8 @@ impl CliApi { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut query.ledger_address) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; - bridge_pool::query_bridge_pool(&client, io).await; + client.wait_until_node_is_synced(io).await?; + bridge_pool::query_bridge_pool(&client, io).await?; } EthBridgePoolWithoutCtx::QuerySigned( QuerySignedBridgePool(mut query), @@ -105,13 +83,8 @@ impl CliApi { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut query.ledger_address) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; - bridge_pool::query_signed_bridge_pool(&client, io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; + bridge_pool::query_signed_bridge_pool(&client, io).await?; } EthBridgePoolWithoutCtx::QueryRelays(QueryRelayProgress( mut query, @@ -119,11 +92,8 @@ impl CliApi { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut query.ledger_address) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; - bridge_pool::query_relay_progress(&client, io).await; + client.wait_until_node_is_synced(io).await?; + bridge_pool::query_relay_progress(&client, io).await?; } }, cli::NamadaRelayer::ValidatorSet(sub) => match sub { @@ -135,15 +105,12 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk_ctxless(); validator_set::query_bridge_validator_set( &client, io, args, ) - .await; + .await?; } ValidatorSet::GovernanceValidatorSet( GovernanceValidatorSet(mut args), @@ -153,15 +120,12 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk_ctxless(); validator_set::query_governnace_validator_set( &client, io, args, ) - .await; + .await?; } ValidatorSet::ValidatorSetProof(ValidatorSetProof( mut args, @@ -171,15 +135,12 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk_ctxless(); validator_set::query_validator_set_update_proof( &client, io, args, ) - .await; + .await?; } ValidatorSet::ValidatorSetUpdateRelay( ValidatorSetUpdateRelay(mut args), @@ -189,10 +150,7 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced(io) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let eth_client = Arc::new( Provider::::try_from(&args.eth_rpc_endpoint) .unwrap(), @@ -201,8 +159,7 @@ impl CliApi { validator_set::relay_validator_set_update( eth_client, &client, io, args, ) - .await - .proceed_or_else(error)?; + .await?; } }, } diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index b3902e0900..d7150bfa9a 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -35,7 +35,6 @@ use namada::ledger::queries::RPC; use namada::ledger::storage::ConversionState; use namada::proof_of_stake::types::{ValidatorState, WeightedValidator}; use namada::types::address::{masp, Address}; -use namada::types::control_flow::ProceedOrElse; use namada::types::hash::Hash; use namada::types::io::Io; use namada::types::key::*; @@ -67,7 +66,7 @@ pub async fn query_tx_status<'a>( ) -> Event { rpc::query_tx_status(namada, status, deadline) .await - .proceed() + .unwrap() } /// Query and print the epoch of the last committed block diff --git a/apps/src/lib/node/ledger/shell/testing/client.rs b/apps/src/lib/node/ledger/shell/testing/client.rs index 7649156b8e..790587a549 100644 --- a/apps/src/lib/node/ledger/shell/testing/client.rs +++ b/apps/src/lib/node/ledger/shell/testing/client.rs @@ -1,9 +1,7 @@ -use std::ops::ControlFlow; - use clap::Command as App; use eyre::Report; -use namada::types::control_flow::Halt; use namada::types::io::Io; +use namada_sdk::error::Error as SdkError; use tendermint_config::net::Address as TendermintAddress; use super::node::MockNode; @@ -98,7 +96,10 @@ impl<'a> CliClient for &'a MockNode { unreachable!("MockNode should always be instantiated at test start.") } - async fn wait_until_node_is_synced(&self, _io: &impl Io) -> Halt<()> { - ControlFlow::Continue(()) + async fn wait_until_node_is_synced( + &self, + _io: &impl Io, + ) -> Result<(), SdkError> { + Ok(()) } } diff --git a/sdk/src/eth_bridge/bridge_pool.rs b/sdk/src/eth_bridge/bridge_pool.rs index da80bdf41f..d41816658b 100644 --- a/sdk/src/eth_bridge/bridge_pool.rs +++ b/sdk/src/eth_bridge/bridge_pool.rs @@ -19,12 +19,12 @@ use namada_core::types::storage::Epoch; use namada_core::types::token::{Amount, DenominatedAmount}; use namada_core::types::voting_power::FractionalVotingPower; use owo_colors::OwoColorize; -use serde::{Deserialize, Serialize}; +use serde::Serialize; use super::{block_on_eth_sync, eth_sync_or_exit, BlockOnEthSync}; +use crate::control_flow::install_shutdown_signal; use crate::control_flow::time::{Duration, Instant}; -use crate::control_flow::{self, install_shutdown_signal, Halt, TryHalt}; -use crate::error::Error; +use crate::error::{EncodingError, Error, EthereumBridgeError, QueryError}; use crate::eth_bridge::ethers::abi::AbiDecode; use crate::io::Io; use crate::proto::Tx; @@ -100,11 +100,12 @@ pub async fn build_bridge_pool_tx<'a>( }; let tx_code_hash = - query_wasm_code_hash(context, code_path.to_str().unwrap()) - .await - .unwrap(); + query_wasm_code_hash(context, code_path.to_string_lossy()).await?; - let chain_id = tx_args.chain_id.clone().unwrap(); + let chain_id = tx_args + .chain_id + .clone() + .ok_or_else(|| Error::Other("No chain id available".into()))?; let mut tx = Tx::new(chain_id, tx_args.expiration); tx.add_code_from_hash(tx_code_hash).add_data(transfer); @@ -124,9 +125,9 @@ pub async fn build_bridge_pool_tx<'a>( /// A json serializable representation of the Ethereum /// bridge pool. -#[derive(Serialize, Deserialize)] -struct BridgePoolResponse { - bridge_pool_contents: HashMap, +#[derive(Serialize)] +struct BridgePoolResponse<'pool> { + bridge_pool_contents: &'pool HashMap, } /// Query the contents of the Ethereum bridge pool. @@ -134,25 +135,35 @@ struct BridgePoolResponse { pub async fn query_bridge_pool<'a>( client: &(impl Client + Sync), io: &impl Io, -) { +) -> Result, Error> { let response: Vec = RPC .shell() .eth_bridge() .read_ethereum_bridge_pool(client) .await - .unwrap(); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::ReadBridgePool( + e.to_string(), + )) + })?; let pool_contents: HashMap = response .into_iter() .map(|transfer| (transfer.keccak256().to_string(), transfer)) .collect(); if pool_contents.is_empty() { display_line!(io, "Bridge pool is empty."); - return; + return Ok(pool_contents); } let contents = BridgePoolResponse { - bridge_pool_contents: pool_contents, + bridge_pool_contents: &pool_contents, }; - display_line!(io, "{}", serde_json::to_string_pretty(&contents).unwrap()); + display_line!( + io, + "{}", + serde_json::to_string_pretty(&contents) + .map_err(|e| EncodingError::Serde(e.to_string()))? + ); + Ok(pool_contents) } /// Query the contents of the Ethereum bridge pool that @@ -161,26 +172,35 @@ pub async fn query_bridge_pool<'a>( pub async fn query_signed_bridge_pool<'a>( client: &(impl Client + Sync), io: &impl Io, -) -> Halt> { +) -> Result, Error> { let response: Vec = RPC .shell() .eth_bridge() .read_signed_ethereum_bridge_pool(client) .await - .unwrap(); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::ReadSignedBridgePool( + e.to_string(), + )) + })?; let pool_contents: HashMap = response .into_iter() .map(|transfer| (transfer.keccak256().to_string(), transfer)) .collect(); if pool_contents.is_empty() { display_line!(io, "Bridge pool is empty."); - return control_flow::halt(); + return Ok(pool_contents); } let contents = BridgePoolResponse { - bridge_pool_contents: pool_contents.clone(), + bridge_pool_contents: &pool_contents, }; - display_line!(io, "{}", serde_json::to_string_pretty(&contents).unwrap()); - control_flow::proceed(pool_contents) + display_line!( + io, + "{}", + serde_json::to_string_pretty(&contents) + .map_err(|e| EncodingError::Serde(e.to_string()))? + ); + Ok(pool_contents) } /// Iterates over all ethereum events @@ -191,14 +211,24 @@ pub async fn query_signed_bridge_pool<'a>( pub async fn query_relay_progress<'a>( client: &(impl Client + Sync), io: &impl Io, -) { +) -> Result<(), Error> { let resp = RPC .shell() .eth_bridge() .transfer_to_ethereum_progress(client) .await - .unwrap(); - display_line!(io, "{}", serde_json::to_string_pretty(&resp).unwrap()); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::TransferToEthProgress( + e.to_string(), + )) + })?; + display_line!( + io, + "{}", + serde_json::to_string_pretty(&resp) + .map_err(|e| EncodingError::Serde(e.to_string()))? + ); + Ok(()) } /// Internal methdod to construct a proof that a set of transfers are in the @@ -207,13 +237,17 @@ async fn construct_bridge_pool_proof<'a>( client: &(impl Client + Sync), io: &impl Io, args: GenBridgePoolProofReq<'_, '_>, -) -> Halt { +) -> Result { let in_progress = RPC .shell() .eth_bridge() .transfer_to_ethereum_progress(client) .await - .unwrap(); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::TransferToEthProgress( + e.to_string(), + )) + })?; let warnings: Vec<_> = in_progress .into_iter() @@ -242,15 +276,19 @@ async fn construct_bridge_pool_proof<'a>( display!(io, "\nDo you wish to proceed? (y/n): "); io.flush(); loop { - let resp = io.read().await.try_halt(|e| { - display_line!( - io, - "Encountered error reading from STDIN: {e:?}" - ); + let resp = io.read().await.map_err(|e| { + let msg = + format!("Encountered error reading from STDIN: {e:?}"); + display_line!(io, "{msg}"); + Error::Other(msg) })?; match resp.trim() { "y" => break, - "n" => return control_flow::halt(), + "n" => { + return Err(Error::Other( + "Aborted generating Bridge pool proof".into(), + )); + } _ => { display!(io, "Expected 'y' or 'n'. Please try again: "); io.flush(); @@ -264,11 +302,15 @@ async fn construct_bridge_pool_proof<'a>( .shell() .eth_bridge() .generate_bridge_pool_proof(client, Some(data), None, false) - .await; + .await + .map_err(|e| { + display_line!(io, "Encountered error constructing proof:\n{:?}", e); + Error::EthereumBridge(EthereumBridgeError::GenBridgePoolProof( + e.to_string(), + )) + })?; - response.map(|response| response.data).try_halt(|e| { - display_line!(io, "Encountered error constructing proof:\n{:?}", e); - }) + Ok(response.data) } /// A response from construction a bridge pool proof. @@ -287,7 +329,7 @@ pub async fn construct_proof<'a>( client: &(impl Client + Sync), io: &impl Io, args: args::BridgePoolProof, -) -> Halt<()> { +) -> Result<(), Error> { let GenBridgePoolProofRsp { abi_encoded_args, appendices, @@ -322,8 +364,13 @@ pub async fn construct_proof<'a>( .unwrap_or_default(), abi_encoded_args, }; - display_line!(io, "{}", serde_json::to_string(&resp).unwrap()); - control_flow::proceed(()) + display_line!( + io, + "{}", + serde_json::to_string_pretty(&resp) + .map_err(|e| EncodingError::Serde(e.to_string()))? + ); + Ok(()) } /// Relay a validator set update, signed off for a given epoch. @@ -332,7 +379,7 @@ pub async fn relay_bridge_pool_proof<'a, E>( client: &(impl Client + Sync), io: &impl Io, args: args::RelayBridgePoolProof, -) -> Halt<()> +) -> Result<(), Error> where E: Middleware, E::Error: std::fmt::Debug + std::fmt::Display, @@ -374,27 +421,33 @@ where let error = error.blink(); display_line!( io, - "{error}: Failed to retrieve the Ethereum Bridge smart \ - contract address from storage with \ - reason:\n{err_msg}\n\nPerhaps the Ethereum bridge is not \ - active.", + "Unable to decode the generated proof: {:?}", + error ); - return control_flow::halt(); + return Err(Error::EthereumBridge( + EthereumBridgeError::RetrieveContract(err_msg.to_string()), + )); } }; let (validator_set, signatures, bp_proof): TransferToErcArgs = - AbiDecode::decode(&abi_encoded_args).try_halt(|error| { - display_line!( - io, - "Unable to decode the generated proof: {:?}", - error - ); + AbiDecode::decode(&abi_encoded_args).map_err(|error| { + let msg = + format!("Unable to decode the generated proof: {:?}", error); + display_line!(io, "{msg}"); + EncodingError::Decoding(msg) })?; // NOTE: this operation costs no gas on Ethereum - let contract_nonce = - bridge.transfer_to_erc_20_nonce().call().await.unwrap(); + let contract_nonce = bridge + .transfer_to_erc_20_nonce() + .call() + .await + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::ContractCall( + e.to_string(), + )) + })?; match bp_proof.batch_nonce.cmp(&contract_nonce) { Ordering::Equal => {} @@ -410,7 +463,9 @@ where has yet to be crafted in Namada.", bp_proof.batch_nonce ); - return control_flow::halt(); + return Err(Error::EthereumBridge( + EthereumBridgeError::InvalidBpNonce, + )); } Ordering::Greater => { let error = "Error".on_red(); @@ -423,7 +478,9 @@ where Somehow, Namada's nonce is ahead of the contract's nonce!", bp_proof.batch_nonce ); - return control_flow::halt(); + return Err(Error::EthereumBridge( + EthereumBridgeError::InvalidBpNonce, + )); } } @@ -439,14 +496,18 @@ where relay_op.tx.set_from(eth_addr.into()); } - let pending_tx = relay_op.send().await.unwrap(); + let pending_tx = relay_op.send().await.map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::ContractCall(e.to_string())) + })?; let transf_result = pending_tx .confirmations(args.confirmations as usize) .await - .unwrap(); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::Rpc(e.to_string())) + })?; display_line!(io, "{transf_result:?}"); - control_flow::proceed(()) + Ok(()) } mod recommendations { @@ -547,7 +608,7 @@ mod recommendations { pub async fn recommend_batch<'a>( context: &impl Namada<'a>, args: args::RecommendBatch, - ) -> Halt<()> { + ) -> Result<(), Error> { // get transfers that can already been relayed but are awaiting a quorum // of backing votes. let in_progress = RPC @@ -555,7 +616,11 @@ mod recommendations { .eth_bridge() .transfer_to_ethereum_progress(context.client()) .await - .unwrap() + .map_err(|e| { + Error::EthereumBridge( + EthereumBridgeError::TransferToEthProgress(e.to_string()), + ) + })? .into_keys() .map(|pending| pending.keccak256().to_string()) .collect::>(); @@ -573,19 +638,18 @@ mod recommendations { &get_signed_root_key(), ) .await - .try_halt(|err| { - edisplay_line!( - context.io(), - "Failed to query Bridge pool proof: {err}" - ); + .map_err(|err| { + let msg = + format!("Failed to query Bridge pool proof: {err}"); + edisplay_line!(context.io(), "{msg}"); + Error::Query(QueryError::General(msg)) })? .data, ) - .try_halt(|err| { - edisplay_line!( - context.io(), - "Failed to decode Bridge pool proof: {err}" - ); + .map_err(|err| { + let msg = format!("Failed to decode Bridge pool proof: {err}"); + edisplay_line!(context.io(), "{msg}"); + Error::Encode(EncodingError::Decoding(msg)) })?; // get the latest bridge pool nonce @@ -599,19 +663,18 @@ mod recommendations { &get_nonce_key(), ) .await - .try_halt(|err| { - edisplay_line!( - context.io(), - "Failed to query Bridge pool nonce: {err}" - ); + .map_err(|err| { + let msg = + format!("Failed to query Bridge pool nonce: {err}"); + edisplay_line!(context.io(), "{msg}"); + Error::Query(QueryError::General(msg)) })? .data, ) - .try_halt(|err| { - edisplay_line!( - context.io(), - "Failed to decode Bridge pool nonce: {err}" - ); + .map_err(|err| { + let msg = format!("Failed to decode Bridge pool nonce: {err}"); + edisplay_line!(context.io(), "{msg}"); + Error::Encode(EncodingError::Decoding(msg)) })?; if latest_bp_nonce != bp_root.data.1 { @@ -620,7 +683,9 @@ mod recommendations { "The signed Bridge pool nonce is not up to date, repeat this \ query at a later time" ); - return control_flow::halt(); + return Err(Error::EthereumBridge( + EthereumBridgeError::InvalidBpNonce, + )); } // Get the voting powers of each of validator who signed @@ -630,7 +695,11 @@ mod recommendations { .eth_bridge() .voting_powers_at_height(context.client(), &height) .await - .unwrap(); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::QueryVotingPowers( + e.to_string(), + )) + })?; let valset_size = Uint::from_u64(voting_powers.len() as u64); // This is the gas cost for hashing the validator set and @@ -693,7 +762,7 @@ mod recommendations { ); }); - control_flow::proceed(()) + Ok(()) } /// Given an ordered list of signatures, figure out the size of the first @@ -720,6 +789,9 @@ mod recommendations { (*p).into(), total_power.into(), ) + // NB: this unwrap is infallible, since we calculate + // the total voting power beforehand. the fraction's + // value will never exceed 1.0 .unwrap(); true } else { @@ -736,7 +808,7 @@ mod recommendations { conversion_table: &HashMap, in_progress: &BTreeSet, signed_pool: HashMap, - ) -> Halt> { + ) -> Result, Error> { let mut eligible: Vec<_> = signed_pool .into_iter() .filter_map(|(pending_hash, pending)| { @@ -803,14 +875,16 @@ mod recommendations { ) }) .collect::, _>>() - .try_halt(|err| { - tracing::debug!(%err, "Failed to calculate relaying cost"); + .map_err(|err| { + let msg = format!("Failed to calculate relaying cost: {err}"); + edisplay_line!(context.io(), "{msg}"); + Error::EthereumBridge(EthereumBridgeError::RelayCost(msg)) })?; // sort transfers in increasing amounts of profitability eligible.sort_by_key(|EligibleRecommendation { cost, .. }| *cost); - control_flow::proceed(eligible) + Ok(eligible) } /// Generates the actual recommendation from restrictions given by the @@ -822,7 +896,7 @@ mod recommendations { validator_gas: Uint, max_gas: Uint, max_cost: I256, - ) -> Halt> { + ) -> Result, Error> { let mut state = AlgorithState { profitable: true, feasible_region: false, @@ -835,8 +909,10 @@ mod recommendations { }; let mut total_gas = validator_gas; - let mut total_cost = I256::try_from(validator_gas).try_halt(|err| { - tracing::debug!(%err, "Failed to convert value to I256"); + let mut total_cost = I256::try_from(validator_gas).map_err(|err| { + let msg = format!("Failed to convert value to I256: {err}"); + edisplay_line!(context.io(), "{msg}"); + Error::Encode(EncodingError::Conversion(msg)) })?; let mut total_fees = HashMap::new(); let mut recommendation = vec![]; @@ -875,23 +951,21 @@ mod recommendations { update_total_fees(&mut total_fees, transfer, conversion_table); } - control_flow::proceed( - if state.feasible_region && !recommendation.is_empty() { - Some(RecommendedBatch { - transfer_hashes: recommendation, - ethereum_gas_fees: total_gas, - net_profit: -total_cost, - bridge_pool_gas_fees: total_fees, - }) - } else { - display_line!( - io, - "Unable to find a recommendation satisfying the input \ - parameters." - ); - None - }, - ) + Ok(if state.feasible_region && !recommendation.is_empty() { + Some(RecommendedBatch { + transfer_hashes: recommendation, + ethereum_gas_fees: total_gas, + net_profit: -total_cost, + bridge_pool_gas_fees: total_fees, + }) + } else { + display_line!( + io, + "Unable to find a recommendation satisfying the input \ + parameters." + ); + None + }) } fn update_total_fees( @@ -917,7 +991,6 @@ mod recommendations { use namada_core::types::ethereum_events::EthAddress; use super::*; - use crate::control_flow::ProceedOrElse; use crate::io::StdIo; /// An established user address for testing & development @@ -1026,7 +1099,7 @@ mod recommendations { }); let eligible = generate_eligible(&StdIo, &table, &in_progress, signed_pool) - .proceed(); + .unwrap(); assert_eq!(eligible, expected); eligible } @@ -1124,7 +1197,7 @@ mod recommendations { uint::MAX_VALUE, I256::zero(), ) - .proceed() + .unwrap() .expect("Test failed") .transfer_hashes; assert_eq!(recommendation, expected); @@ -1144,7 +1217,7 @@ mod recommendations { uint::MAX_VALUE, I256::zero(), ) - .proceed() + .unwrap() .expect("Test failed") .transfer_hashes; assert_eq!(recommendation, expected); @@ -1163,7 +1236,7 @@ mod recommendations { Uint::from_u64(150_000), I256(uint::MAX_SIGNED_VALUE), ) - .proceed() + .unwrap() .expect("Test failed") .transfer_hashes; assert_eq!(recommendation, expected); @@ -1186,7 +1259,7 @@ mod recommendations { uint::MAX_VALUE, I256::from(20_000), ) - .proceed() + .unwrap() .expect("Test failed") .transfer_hashes; assert_eq!(recommendation, expected); @@ -1206,7 +1279,7 @@ mod recommendations { Uint::from_u64(330_000), I256::from(20_000), ) - .proceed() + .unwrap() .expect("Test failed") .transfer_hashes; assert_eq!(recommendation, expected); @@ -1223,7 +1296,7 @@ mod recommendations { uint::MAX_VALUE, I256::from(20_000), ) - .proceed(); + .unwrap(); assert!(recommendation.is_none()) } @@ -1308,7 +1381,7 @@ mod recommendations { // only profitable I256::zero(), ) - .proceed() + .unwrap() .expect("Test failed"); assert_eq!( diff --git a/sdk/src/eth_bridge/mod.rs b/sdk/src/eth_bridge/mod.rs index 49b77705a3..b8577956ca 100644 --- a/sdk/src/eth_bridge/mod.rs +++ b/sdk/src/eth_bridge/mod.rs @@ -17,9 +17,9 @@ pub use namada_ethereum_bridge::*; use num256::Uint256; use crate::control_flow::time::{ - Constant, Duration, Error as TimeoutError, Instant, LinearBackoff, Sleep, + Constant, Duration, Instant, LinearBackoff, Sleep, }; -use crate::control_flow::{self, Halt, TryHalt}; +use crate::error::{Error, EthereumBridgeError}; use crate::io::Io; use crate::{display_line, edisplay_line}; @@ -43,9 +43,7 @@ impl SyncStatus { /// Fetch the sync status of an Ethereum node. #[inline] -pub async fn eth_syncing_status( - client: &C, -) -> Result +pub async fn eth_syncing_status(client: &C) -> Result where C: Middleware, { @@ -66,7 +64,7 @@ pub async fn eth_syncing_status_timeout( client: &C, backoff_duration: Duration, deadline: Instant, -) -> Result +) -> Result where C: Middleware, { @@ -92,6 +90,7 @@ where }) }) .await + .map_err(|_| Error::EthereumBridge(EthereumBridgeError::NodeTimeout)) } /// Arguments to [`block_on_eth_sync`]. @@ -107,7 +106,7 @@ pub async fn block_on_eth_sync( client: &C, io: &IO, args: BlockOnEthSync, -) -> Halt<()> +) -> Result<(), Error> where C: Middleware, { @@ -130,14 +129,15 @@ where } }) .await - .try_halt(|_| { + .map_err(|_| { edisplay_line!( io, "Timed out while waiting for Ethereum to synchronize" ); + Error::EthereumBridge(EthereumBridgeError::NodeTimeout) })?; display_line!(io, "The Ethereum node is up to date"); - control_flow::proceed(()) + Ok(()) } /// Check if Ethereum has finished synchronizing. In case it has @@ -146,7 +146,7 @@ pub async fn eth_sync_or( client: &C, io: &IO, mut action: F, -) -> Halt> +) -> Result, Error> where C: Middleware, F: FnMut() -> T, @@ -154,29 +154,33 @@ where let is_synchronized = eth_syncing_status(client) .await .map(|status| status.is_synchronized()) - .try_halt(|err| { + .map_err(|err| { edisplay_line!( io, "An error occurred while fetching the Ethereum \ synchronization status: {err}" ); + err })?; if is_synchronized { - control_flow::proceed(Either::Right(())) + Ok(Either::Right(())) } else { - control_flow::proceed(Either::Left(action())) + Ok(Either::Left(action())) } } /// Check if Ethereum has finished synchronizing. In case it has /// not, end execution. -pub async fn eth_sync_or_exit(client: &C, io: &IO) -> Halt<()> +pub async fn eth_sync_or_exit( + client: &C, + io: &IO, +) -> Result<(), Error> where C: Middleware, { eth_sync_or(client, io, || { - tracing::error!("The Ethereum node has not finished synchronizing"); + edisplay_line!(io, "The Ethereum node has not finished synchronizing"); }) - .await? - .try_halt(|_| ()) + .await?; + Ok(()) } diff --git a/sdk/src/eth_bridge/validator_set.rs b/sdk/src/eth_bridge/validator_set.rs index 5c98b39ae0..82640b2102 100644 --- a/sdk/src/eth_bridge/validator_set.rs +++ b/sdk/src/eth_bridge/validator_set.rs @@ -1,6 +1,5 @@ //! Validator set updates SDK functionality. -use std::borrow::Cow; use std::cmp::Ordering; use std::future::Future; use std::pin::Pin; @@ -12,13 +11,18 @@ use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use futures::future::{self, FutureExt}; use namada_core::hints; +use namada_core::types::eth_abi::EncodeCell; use namada_core::types::ethereum_events::EthAddress; use namada_core::types::storage::Epoch; -use namada_core::types::vote_extensions::validator_set_update::ValidatorSetArgs; +use namada_core::types::vote_extensions::validator_set_update::{ + ValidatorSetArgs, VotingPowersMap, +}; +use namada_ethereum_bridge::storage::proof::EthereumProof; use super::{block_on_eth_sync, eth_sync_or, eth_sync_or_exit, BlockOnEthSync}; +use crate::control_flow::install_shutdown_signal; use crate::control_flow::time::{self, Duration, Instant}; -use crate::control_flow::{self, install_shutdown_signal, Halt, TryHalt}; +use crate::error::{Error as SdkError, EthereumBridgeError, QueryError}; use crate::eth_bridge::ethers::abi::{AbiDecode, AbiType, Tokenizable}; use crate::eth_bridge::ethers::core::types::TransactionReceipt; use crate::eth_bridge::structs::Signature; @@ -39,7 +43,7 @@ enum Error { /// `tracing` log level. WithReason { /// The reason of the error. - reason: Cow<'static, str>, + reason: SdkError, /// The log level where to display the error message. level: tracing::Level, /// If critical, exit the relayer. @@ -53,7 +57,7 @@ impl Error { /// The error is recoverable. fn recoverable(msg: M) -> Self where - M: Into>, + M: Into, { Error::WithReason { level: tracing::Level::DEBUG, @@ -67,7 +71,7 @@ impl Error { /// The error is not recoverable. fn critical(msg: M) -> Self where - M: Into>, + M: Into, { Error::WithReason { level: tracing::Level::ERROR, @@ -76,9 +80,10 @@ impl Error { } } - /// Display the error message, and return the [`Halt`] status. - fn handle(&self) -> Halt<()> { - let critical = match self { + /// Display the error message, and return a new [`Result`], + /// with the error already handled appropriately. + fn handle(self) -> Result<(), SdkError> { + let (critical, reason) = match self { Error::WithReason { reason, critical, @@ -89,7 +94,7 @@ impl Error { %reason, "An error occurred during the relay" ); - *critical + (critical, reason) } Error::WithReason { reason, @@ -100,18 +105,18 @@ impl Error { %reason, "An error occurred during the relay" ); - *critical + (critical, reason) } // all log levels we care about are DEBUG and ERROR _ => { hints::cold(); - return control_flow::proceed(()); + return Ok(()); } }; if hints::unlikely(critical) { - control_flow::halt() + Err(reason) } else { - control_flow::proceed(()) + Ok(()) } } } @@ -168,7 +173,7 @@ trait ShouldRelay { E::Error: std::fmt::Display; /// Try to recover from an error that has happened. - fn try_recover(err: String) -> Error; + fn try_recover>(err: E) -> Error; } impl ShouldRelay for DoNotCheckNonce { @@ -185,7 +190,7 @@ impl ShouldRelay for DoNotCheckNonce { } #[inline] - fn try_recover(err: String) -> Error { + fn try_recover>(err: E) -> Error { Error::recoverable(err) } } @@ -224,7 +229,7 @@ impl ShouldRelay for CheckNonce { } #[inline] - fn try_recover(err: String) -> Error { + fn try_recover>(err: E) -> Error { Error::critical(err) } } @@ -268,7 +273,7 @@ pub async fn query_validator_set_update_proof<'a>( client: &(impl Client + Sync), io: &impl Io, args: args::ValidatorSetProof, -) { +) -> Result>, SdkError> { let epoch = if let Some(epoch) = args.epoch { epoch } else { @@ -280,9 +285,15 @@ pub async fn query_validator_set_update_proof<'a>( .eth_bridge() .read_valset_upd_proof(client, &epoch) .await - .unwrap(); + .map_err(|err| { + let msg = + format!("Failed to fetch validator set update proof: {err}"); + edisplay_line!(io, "{msg}"); + SdkError::Query(QueryError::General(msg)) + })?; display_line!(io, "0x{}", HEXLOWER.encode(encoded_proof.as_ref())); + Ok(encoded_proof) } /// Query an ABI encoding of the Bridge validator set at a given epoch. @@ -290,7 +301,7 @@ pub async fn query_bridge_validator_set<'a>( client: &(impl Client + Sync), io: &impl Io, args: args::BridgeValidatorSet, -) -> Halt<()> { +) -> Result { let epoch = if let Some(epoch) = args.epoch { epoch } else { @@ -302,12 +313,14 @@ pub async fn query_bridge_validator_set<'a>( .eth_bridge() .read_bridge_valset(client, &epoch) .await - .try_halt(|err| { - tracing::error!(%err, "Failed to fetch Bridge validator set"); + .map_err(|err| { + let msg = format!("Failed to fetch Bridge validator set: {err}"); + edisplay_line!(io, "{msg}"); + SdkError::Query(QueryError::General(msg)) })?; - display_validator_set(io, args); - control_flow::proceed(()) + display_validator_set(io, args.clone()); + Ok(args) } /// Query an ABI encoding of the Governance validator set at a given epoch. @@ -315,7 +328,7 @@ pub async fn query_governnace_validator_set<'a>( client: &(impl Client + Sync), io: &impl Io, args: args::GovernanceValidatorSet, -) -> Halt<()> { +) -> Result { let epoch = if let Some(epoch) = args.epoch { epoch } else { @@ -327,12 +340,15 @@ pub async fn query_governnace_validator_set<'a>( .eth_bridge() .read_governance_valset(client, &epoch) .await - .try_halt(|err| { - tracing::error!(%err, "Failed to fetch Governance validator set"); + .map_err(|err| { + let msg = + format!("Failed to fetch Governance validator set: {err}"); + edisplay_line!(io, "{msg}"); + SdkError::Query(QueryError::General(msg)) })?; - display_validator_set(io, args); - control_flow::proceed(()) + display_validator_set(io, args.clone()); + Ok(args) } /// Display the given [`ValidatorSetArgs`]. @@ -376,7 +392,7 @@ pub async fn relay_validator_set_update<'a, E>( client: &(impl Client + Sync), io: &impl Io, args: args::ValidatorSetUpdateRelay, -) -> Halt<()> +) -> Result<(), SdkError> where E: Middleware, E::Error: std::fmt::Debug + std::fmt::Display, @@ -454,8 +470,8 @@ where }, ) .await - .try_halt_or_recover(|error| error.handle()) } + .or_else(|err| err.handle()) } async fn relay_validator_set_update_daemon<'a, E, F>( @@ -464,7 +480,7 @@ async fn relay_validator_set_update_daemon<'a, E, F>( client: &(impl Client + Sync), io: &impl Io, shutdown_receiver: &mut Option, -) -> Halt<()> +) -> Result<(), Error> where E: Middleware, E::Error: std::fmt::Debug + std::fmt::Display, @@ -493,7 +509,7 @@ where }; if should_exit { - return control_flow::proceed(()); + return Ok(()); } let sleep_for = if last_call_succeeded { @@ -506,7 +522,7 @@ where time::sleep(sleep_for).await; let is_synchronizing = - eth_sync_or(&*eth_client, io, || ()).await.is_break(); + eth_sync_or(&*eth_client, io, || ()).await.is_err(); if is_synchronizing { tracing::debug!("The Ethereum node is synchronizing"); last_call_succeeded = false; @@ -516,20 +532,17 @@ where // we could be racing against governance updates, // so it is best to always fetch the latest Bridge // contract address - let bridge = get_bridge_contract(client, Arc::clone(ð_client)) - .await - .try_halt(|err| { - // only care about displaying errors, - // exit on all circumstances - _ = err.handle(); - })?; + let bridge = + get_bridge_contract(client, Arc::clone(ð_client)).await?; let bridge_epoch_prep_call = bridge.validator_set_nonce(); let bridge_epoch_fut = bridge_epoch_prep_call.call().map(|result| { result .map_err(|err| { - tracing::error!( + let msg = format!( "Failed to fetch latest validator set nonce: {err}" ); + tracing::error!("{msg}"); + Error::critical(QueryError::General(msg)) }) .map(|e| e.as_u64() as i128) }); @@ -538,16 +551,17 @@ where let nam_current_epoch_fut = shell.epoch(client).map(|result| { result .map_err(|err| { - tracing::error!( + let msg = format!( "Failed to fetch the latest epoch in Namada: {err}" ); + tracing::error!("{msg}"); + Error::critical(QueryError::General(msg)) }) .map(|Epoch(e)| e as i128) }); let (nam_current_epoch, gov_current_epoch) = - futures::try_join!(nam_current_epoch_fut, bridge_epoch_fut) - .try_halt(|()| ())?; + futures::try_join!(nam_current_epoch_fut, bridge_epoch_fut)?; tracing::debug!( ?nam_current_epoch, @@ -625,7 +639,11 @@ where .eth_bridge() .read_bridge_contract(nam_client) .await - .map_err(|err| Error::critical(err.to_string()))?; + .map_err(|err| { + Error::critical(EthereumBridgeError::RetrieveContract( + err.to_string(), + )) + })?; Ok(Bridge::new(bridge_contract.address, eth_client)) } @@ -648,27 +666,49 @@ where RPC.shell() .epoch(nam_client) .await - .map_err(|e| Error::critical(e.to_string()))? + .map_err(|e| Error::critical(QueryError::General(e.to_string())))? .next() }; if hints::unlikely(epoch_to_relay == Epoch(0)) { - return Err(Error::critical( - "There is no validator set update proof for epoch 0", - )); + return Err(Error::critical(SdkError::Other( + "There is no validator set update proof for epoch 0".into(), + ))); } let shell = RPC.shell().eth_bridge(); - let encoded_proof_fut = - shell.read_valset_upd_proof(nam_client, &epoch_to_relay); + let encoded_proof_fut = shell + .read_valset_upd_proof(nam_client, &epoch_to_relay) + .map(|result| { + result.map_err(|err| { + let msg = format!( + "Failed to fetch validator set update proof: {err}" + ); + SdkError::Query(QueryError::General(msg)) + }) + }); let bridge_current_epoch = epoch_to_relay - 1; let shell = RPC.shell().eth_bridge(); - let validator_set_args_fut = - shell.read_bridge_valset(nam_client, &bridge_current_epoch); + let validator_set_args_fut = shell + .read_bridge_valset(nam_client, &bridge_current_epoch) + .map(|result| { + result.map_err(|err| { + let msg = + format!("Failed to fetch Bridge validator set: {err}"); + SdkError::Query(QueryError::General(msg)) + }) + }); let shell = RPC.shell().eth_bridge(); - let bridge_address_fut = shell.read_bridge_contract(nam_client); + let bridge_address_fut = + shell.read_bridge_contract(nam_client).map(|result| { + result.map_err(|err| { + SdkError::EthereumBridge(EthereumBridgeError::RetrieveContract( + err.to_string(), + )) + }) + }); let (encoded_proof, validator_set_args, bridge_contract) = futures::try_join!( @@ -676,7 +716,7 @@ where validator_set_args_fut, bridge_address_fut ) - .map_err(|err| R::try_recover(err.to_string()))?; + .map_err(|err| R::try_recover(err))?; let (bridge_hash, gov_hash, signatures): ( [u8; 32], @@ -707,14 +747,15 @@ where relay_op.tx.set_from(eth_addr.into()); } - let pending_tx = relay_op - .send() - .await - .map_err(|e| Error::critical(e.to_string()))?; + let pending_tx = relay_op.send().await.map_err(|e| { + Error::critical(EthereumBridgeError::ContractCall(e.to_string())) + })?; let transf_result = pending_tx .confirmations(args.confirmations as usize) .await - .map_err(|err| Error::critical(err.to_string()))?; + .map_err(|e| { + Error::critical(EthereumBridgeError::Rpc(e.to_string())) + })?; let transf_result: R::RelayResult = transf_result.into(); let status = if transf_result.is_successful() { diff --git a/sdk/src/rpc.rs b/sdk/src/rpc.rs index f1267fe8a8..db1de9caab 100644 --- a/sdk/src/rpc.rs +++ b/sdk/src/rpc.rs @@ -30,8 +30,8 @@ use namada_proof_of_stake::types::{ use serde::Serialize; use crate::args::InputAmount; -use crate::control_flow::{time, Halt, TryHalt}; -use crate::error::{EncodingError, Error, QueryError}; +use crate::control_flow::time; +use crate::error::{EncodingError, Error, QueryError, TxError}; use crate::events::Event; use crate::io::Io; use crate::proto::Tx; @@ -52,7 +52,7 @@ pub async fn query_tx_status<'a>( context: &impl Namada<'a>, status: TxEventQuery<'_>, deadline: time::Instant, -) -> Halt { +) -> Result { time::Sleep { strategy: time::LinearBackoff { delta: time::Duration::from_secs(1), @@ -86,11 +86,15 @@ pub async fn query_tx_status<'a>( } }) .await - .try_halt(|_| { + .map_err(|_| { edisplay_line!( context.io(), "Transaction status query deadline of {deadline:?} exceeded" ); + match status { + TxEventQuery::Accepted(_) => Error::Tx(TxError::AcceptTimeout), + TxEventQuery::Applied(_) => Error::Tx(TxError::AppliedTimeout), + } }) } @@ -978,7 +982,7 @@ pub async fn validate_amount<'a, N: Namada<'a>>( pub async fn wait_until_node_is_synched<'a>( client: &(impl Client + Sync), io: &impl Io, -) -> Halt<()> { +) -> Result<(), Error> { let height_one = Height::try_from(1_u64).unwrap(); let try_count = Cell::new(1_u64); const MAX_TRIES: usize = 5; @@ -1014,25 +1018,22 @@ pub async fn wait_until_node_is_synched<'a>( ControlFlow::Continue(()) } Err(e) => { - edisplay_line!( - io, - "Failed to query node status with error: {}", - e - ); - ControlFlow::Break(Err(())) + let msg = + format!("Failed to query node status with error: {e}"); + edisplay_line!(io, "{msg}"); + ControlFlow::Break(Err(Error::Query(QueryError::General(msg)))) } } }) .await // maybe time out - .try_halt(|_| { + .map_err(|_| { display_line!( io, "Node is still catching up, wait for it to finish synching." ); + Error::Query(QueryError::CatchingUp) })? - // error querying rpc - .try_halt(|_| ()) } /// Look up the denomination of a token in order to make a correctly denominated diff --git a/sdk/src/tx.rs b/sdk/src/tx.rs index acfe12f6bd..00402aa46e 100644 --- a/sdk/src/tx.rs +++ b/sdk/src/tx.rs @@ -50,7 +50,7 @@ use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; use crate::args::{self, InputAmount}; -use crate::control_flow::{time, ProceedOrElse}; +use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, Result, TxError}; use crate::io::Io; use crate::masp::TransferErr::Build; @@ -374,9 +374,8 @@ pub async fn submit_tx<'a>( let parsed = { let wrapper_query = rpc::TxEventQuery::Accepted(wrapper_hash.as_str()); - let event = rpc::query_tx_status(context, wrapper_query, deadline) - .await - .proceed_or(TxError::AcceptTimeout)?; + let event = + rpc::query_tx_status(context, wrapper_query, deadline).await?; let parsed = TxResponse::from_event(event); let tx_to_str = |parsed| { serde_json::to_string_pretty(parsed).map_err(|err| { @@ -397,8 +396,7 @@ pub async fn submit_tx<'a>( rpc::TxEventQuery::Applied(decrypted_hash.as_str()); let event = rpc::query_tx_status(context, decrypted_query, deadline) - .await - .proceed_or(TxError::AppliedTimeout)?; + .await?; let parsed = TxResponse::from_event(event); display_line!( context.io(), From cff47a277d5c26deaa0a609adf3756b66926e8d5 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Wed, 27 Sep 2023 16:07:48 +0100 Subject: [PATCH 090/161] Remove halt abstraction from the SDK --- sdk/src/control_flow/mod.rs | 104 ------------------------------------ 1 file changed, 104 deletions(-) diff --git a/sdk/src/control_flow/mod.rs b/sdk/src/control_flow/mod.rs index 6b7d07532d..9b75b6e921 100644 --- a/sdk/src/control_flow/mod.rs +++ b/sdk/src/control_flow/mod.rs @@ -3,7 +3,6 @@ pub mod time; use std::future::Future; -use std::ops::ControlFlow; use std::pin::Pin; use std::task::{Context, Poll}; @@ -12,109 +11,6 @@ use futures::future::FutureExt; #[cfg(any(unix, windows))] use tokio::sync::oneshot; -/// A [`ControlFlow`] to control the halt status -/// of some execution context. -/// -/// No return values are assumed to exist. -pub type Halt = ControlFlow<(), T>; - -/// Halt all execution. -pub const fn halt() -> Halt { - ControlFlow::Break(()) -} - -/// Proceed execution. -pub const fn proceed(value: T) -> Halt { - ControlFlow::Continue(value) -} - -/// Convert from [`Halt`] to [`Result`]. -#[allow(missing_docs)] -pub trait ProceedOrElse { - fn proceed_or_else(self, error: F) -> Result - where - Self: Sized, - F: FnOnce() -> E; - - #[inline] - fn proceed_or(self, error: E) -> Result - where - Self: Sized, - { - self.proceed_or_else(move || error) - } - - #[inline] - fn proceed(self) -> T - where - Self: Sized, - { - self.proceed_or(()).expect("Halted execution") - } -} - -impl ProceedOrElse for Halt { - #[inline] - fn proceed_or_else(self, error: F) -> Result - where - Self: Sized, - F: FnOnce() -> E, - { - match self { - ControlFlow::Continue(x) => Ok(x), - ControlFlow::Break(()) => Err(error()), - } - } -} - -/// Halting abstraction to obtain [`ControlFlow`] actions. -pub trait TryHalt { - /// Possibly exit from some context, if we encounter an - /// error. We may recover from said error. - fn try_halt_or_recover(self, handle_err: F) -> Halt - where - F: FnMut(E) -> Halt; - - /// Exit from some context, if we encounter an error. - #[inline] - fn try_halt(self, mut handle_err: F) -> Halt - where - Self: Sized, - F: FnMut(E), - { - self.try_halt_or_recover(|e| { - handle_err(e); - halt() - }) - } -} - -impl TryHalt for Result { - #[inline] - fn try_halt_or_recover(self, mut handle_err: F) -> Halt - where - F: FnMut(E) -> Halt, - { - match self { - Ok(x) => proceed(x), - Err(e) => handle_err(e), - } - } -} - -impl TryHalt for itertools::Either { - #[inline] - fn try_halt_or_recover(self, mut handle_err: F) -> Halt - where - F: FnMut(L) -> Halt, - { - match self { - itertools::Either::Right(x) => proceed(x), - itertools::Either::Left(e) => handle_err(e), - } - } -} - /// A shutdown signal receiver. pub struct ShutdownSignal { #[cfg(not(any(unix, windows)))] From e986bcf8792964976da2615e712c3afee94ffe59 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Fri, 29 Sep 2023 10:28:38 +0100 Subject: [PATCH 091/161] Apply suggestions from code review Co-authored-by: Tomas Zemanovic --- sdk/src/eth_bridge/bridge_pool.rs | 16 +++++++++++----- sdk/src/rpc.rs | 2 +- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/sdk/src/eth_bridge/bridge_pool.rs b/sdk/src/eth_bridge/bridge_pool.rs index d41816658b..61246f3b4c 100644 --- a/sdk/src/eth_bridge/bridge_pool.rs +++ b/sdk/src/eth_bridge/bridge_pool.rs @@ -35,7 +35,9 @@ use crate::queries::{ use crate::rpc::{query_wasm_code_hash, validate_amount}; use crate::signing::aux_signing_data; use crate::tx::prepare_tx; -use crate::{args, display, display_line, Namada, SigningTxData}; +use crate::{ + args, display, display_line, edisplay_line, Namada, SigningTxData, +}; /// Craft a transaction that adds a transfer to the Ethereum bridge pool. pub async fn build_bridge_pool_tx<'a>( @@ -279,7 +281,7 @@ async fn construct_bridge_pool_proof<'a>( let resp = io.read().await.map_err(|e| { let msg = format!("Encountered error reading from STDIN: {e:?}"); - display_line!(io, "{msg}"); + edisplay_line!(io, "{msg}"); Error::Other(msg) })?; match resp.trim() { @@ -304,7 +306,11 @@ async fn construct_bridge_pool_proof<'a>( .generate_bridge_pool_proof(client, Some(data), None, false) .await .map_err(|e| { - display_line!(io, "Encountered error constructing proof:\n{:?}", e); + edisplay_line!( + io, + "Encountered error constructing proof:\n{:?}", + e + ); Error::EthereumBridge(EthereumBridgeError::GenBridgePoolProof( e.to_string(), )) @@ -434,7 +440,7 @@ where AbiDecode::decode(&abi_encoded_args).map_err(|error| { let msg = format!("Unable to decode the generated proof: {:?}", error); - display_line!(io, "{msg}"); + edisplay_line!(io, "{msg}"); EncodingError::Decoding(msg) })?; @@ -959,7 +965,7 @@ mod recommendations { bridge_pool_gas_fees: total_fees, }) } else { - display_line!( + edisplay_line!( io, "Unable to find a recommendation satisfying the input \ parameters." diff --git a/sdk/src/rpc.rs b/sdk/src/rpc.rs index db1de9caab..877d4c328e 100644 --- a/sdk/src/rpc.rs +++ b/sdk/src/rpc.rs @@ -1028,7 +1028,7 @@ pub async fn wait_until_node_is_synched<'a>( .await // maybe time out .map_err(|_| { - display_line!( + edisplay_line!( io, "Node is still catching up, wait for it to finish synching." ); From 1831e8e02939ad4ad382e7b4a314f6f1f265613c Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Mon, 9 Oct 2023 10:32:05 +0100 Subject: [PATCH 092/161] Internal macros SDK module --- sdk/src/internal_macros.rs | 17 +++++++++++++++++ sdk/src/lib.rs | 3 +++ 2 files changed, 20 insertions(+) create mode 100644 sdk/src/internal_macros.rs diff --git a/sdk/src/internal_macros.rs b/sdk/src/internal_macros.rs new file mode 100644 index 0000000000..b864faa948 --- /dev/null +++ b/sdk/src/internal_macros.rs @@ -0,0 +1,17 @@ +macro_rules! echo_error { + ($io:expr, $($arg:tt)*) => {{ + let msg = ::alloc::format!($($arg)*); + $crate::edisplay_line!($io, "{msg}"); + msg + }} +} + +macro_rules! trace_error { + ($level:ident, $($arg:tt)*) => {{ + let msg = ::alloc::format!($($arg)*); + ::tracing::$level!("{msg}"); + msg + }} +} + +pub(crate) use {echo_error, trace_error}; diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 622a63a1d1..9fbdb56dc1 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -1,3 +1,5 @@ +extern crate alloc; + pub use namada_core::proto; #[cfg(feature = "tendermint-rpc")] pub use tendermint_rpc; @@ -29,6 +31,7 @@ pub mod tx; pub mod control_flow; pub mod error; pub mod events; +pub(crate) mod internal_macros; pub mod io; pub mod queries; pub mod wallet; From f338ae3768e38105e31a7e1c0de827dd0be11730 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Mon, 9 Oct 2023 10:43:03 +0100 Subject: [PATCH 093/161] Echo SDK errors to stdout --- sdk/src/eth_bridge/bridge_pool.rs | 61 ++++++++++++++++------------- sdk/src/eth_bridge/validator_set.rs | 38 +++++++++--------- sdk/src/rpc.rs | 13 +++--- 3 files changed, 59 insertions(+), 53 deletions(-) diff --git a/sdk/src/eth_bridge/bridge_pool.rs b/sdk/src/eth_bridge/bridge_pool.rs index 61246f3b4c..4a1664a8e8 100644 --- a/sdk/src/eth_bridge/bridge_pool.rs +++ b/sdk/src/eth_bridge/bridge_pool.rs @@ -26,6 +26,7 @@ use crate::control_flow::install_shutdown_signal; use crate::control_flow::time::{Duration, Instant}; use crate::error::{EncodingError, Error, EthereumBridgeError, QueryError}; use crate::eth_bridge::ethers::abi::AbiDecode; +use crate::internal_macros::echo_error; use crate::io::Io; use crate::proto::Tx; use crate::queries::{ @@ -279,10 +280,10 @@ async fn construct_bridge_pool_proof<'a>( io.flush(); loop { let resp = io.read().await.map_err(|e| { - let msg = - format!("Encountered error reading from STDIN: {e:?}"); - edisplay_line!(io, "{msg}"); - Error::Other(msg) + Error::Other(echo_error!( + io, + "Encountered error reading from STDIN: {e:?}" + )) })?; match resp.trim() { "y" => break, @@ -438,10 +439,11 @@ where let (validator_set, signatures, bp_proof): TransferToErcArgs = AbiDecode::decode(&abi_encoded_args).map_err(|error| { - let msg = - format!("Unable to decode the generated proof: {:?}", error); - edisplay_line!(io, "{msg}"); - EncodingError::Decoding(msg) + EncodingError::Decoding(echo_error!( + io, + "Unable to decode the generated proof: {:?}", + error + )) })?; // NOTE: this operation costs no gas on Ethereum @@ -645,17 +647,18 @@ mod recommendations { ) .await .map_err(|err| { - let msg = - format!("Failed to query Bridge pool proof: {err}"); - edisplay_line!(context.io(), "{msg}"); - Error::Query(QueryError::General(msg)) + Error::Query(QueryError::General(echo_error!( + context.io(), + "Failed to query Bridge pool proof: {err}" + ))) })? .data, ) .map_err(|err| { - let msg = format!("Failed to decode Bridge pool proof: {err}"); - edisplay_line!(context.io(), "{msg}"); - Error::Encode(EncodingError::Decoding(msg)) + Error::Encode(EncodingError::Decoding(echo_error!( + context.io(), + "Failed to decode Bridge pool proof: {err}" + ))) })?; // get the latest bridge pool nonce @@ -670,17 +673,18 @@ mod recommendations { ) .await .map_err(|err| { - let msg = - format!("Failed to query Bridge pool nonce: {err}"); - edisplay_line!(context.io(), "{msg}"); - Error::Query(QueryError::General(msg)) + Error::Query(QueryError::General(echo_error!( + context.io(), + "Failed to query Bridge pool nonce: {err}" + ))) })? .data, ) .map_err(|err| { - let msg = format!("Failed to decode Bridge pool nonce: {err}"); - edisplay_line!(context.io(), "{msg}"); - Error::Encode(EncodingError::Decoding(msg)) + Error::Encode(EncodingError::Decoding(echo_error!( + context.io(), + "Failed to decode Bridge pool nonce: {err}" + ))) })?; if latest_bp_nonce != bp_root.data.1 { @@ -882,9 +886,9 @@ mod recommendations { }) .collect::, _>>() .map_err(|err| { - let msg = format!("Failed to calculate relaying cost: {err}"); - edisplay_line!(context.io(), "{msg}"); - Error::EthereumBridge(EthereumBridgeError::RelayCost(msg)) + Error::EthereumBridge(EthereumBridgeError::RelayCost( + echo_error!(io, "Failed to calculate relaying cost: {err}"), + )) })?; // sort transfers in increasing amounts of profitability @@ -916,9 +920,10 @@ mod recommendations { let mut total_gas = validator_gas; let mut total_cost = I256::try_from(validator_gas).map_err(|err| { - let msg = format!("Failed to convert value to I256: {err}"); - edisplay_line!(context.io(), "{msg}"); - Error::Encode(EncodingError::Conversion(msg)) + Error::Encode(EncodingError::Conversion(echo_error!( + io, + "Failed to convert value to I256: {err}" + ))) })?; let mut total_fees = HashMap::new(); let mut recommendation = vec![]; diff --git a/sdk/src/eth_bridge/validator_set.rs b/sdk/src/eth_bridge/validator_set.rs index 82640b2102..1b7a77466d 100644 --- a/sdk/src/eth_bridge/validator_set.rs +++ b/sdk/src/eth_bridge/validator_set.rs @@ -26,6 +26,7 @@ use crate::error::{Error as SdkError, EthereumBridgeError, QueryError}; use crate::eth_bridge::ethers::abi::{AbiDecode, AbiType, Tokenizable}; use crate::eth_bridge::ethers::core::types::TransactionReceipt; use crate::eth_bridge::structs::Signature; +use crate::internal_macros::{echo_error, trace_error}; use crate::io::Io; use crate::queries::{Client, RPC}; use crate::{args, display_line, edisplay_line}; @@ -286,10 +287,10 @@ pub async fn query_validator_set_update_proof<'a>( .read_valset_upd_proof(client, &epoch) .await .map_err(|err| { - let msg = - format!("Failed to fetch validator set update proof: {err}"); - edisplay_line!(io, "{msg}"); - SdkError::Query(QueryError::General(msg)) + SdkError::Query(QueryError::General(echo_error!( + io, + "Failed to fetch validator set update proof: {err}" + ))) })?; display_line!(io, "0x{}", HEXLOWER.encode(encoded_proof.as_ref())); @@ -314,9 +315,10 @@ pub async fn query_bridge_validator_set<'a>( .read_bridge_valset(client, &epoch) .await .map_err(|err| { - let msg = format!("Failed to fetch Bridge validator set: {err}"); - edisplay_line!(io, "{msg}"); - SdkError::Query(QueryError::General(msg)) + SdkError::Query(QueryError::General(echo_error!( + io, + "Failed to fetch Bridge validator set: {err}" + ))) })?; display_validator_set(io, args.clone()); @@ -341,10 +343,10 @@ pub async fn query_governnace_validator_set<'a>( .read_governance_valset(client, &epoch) .await .map_err(|err| { - let msg = - format!("Failed to fetch Governance validator set: {err}"); - edisplay_line!(io, "{msg}"); - SdkError::Query(QueryError::General(msg)) + SdkError::Query(QueryError::General(echo_error!( + io, + "Failed to fetch Governance validator set: {err}" + ))) })?; display_validator_set(io, args.clone()); @@ -538,11 +540,10 @@ where let bridge_epoch_fut = bridge_epoch_prep_call.call().map(|result| { result .map_err(|err| { - let msg = format!( + Error::critical(QueryError::General(trace_error!( + error, "Failed to fetch latest validator set nonce: {err}" - ); - tracing::error!("{msg}"); - Error::critical(QueryError::General(msg)) + ))) }) .map(|e| e.as_u64() as i128) }); @@ -551,11 +552,10 @@ where let nam_current_epoch_fut = shell.epoch(client).map(|result| { result .map_err(|err| { - let msg = format!( + Error::critical(QueryError::General(trace_error!( + error, "Failed to fetch the latest epoch in Namada: {err}" - ); - tracing::error!("{msg}"); - Error::critical(QueryError::General(msg)) + ))) }) .map(|Epoch(e)| e as i128) }); diff --git a/sdk/src/rpc.rs b/sdk/src/rpc.rs index 877d4c328e..f302574aca 100644 --- a/sdk/src/rpc.rs +++ b/sdk/src/rpc.rs @@ -33,6 +33,7 @@ use crate::args::InputAmount; use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, TxError}; use crate::events::Event; +use crate::internal_macros::echo_error; use crate::io::Io; use crate::proto::Tx; use crate::queries::vp::pos::EnrichedBondsAndUnbondsDetails; @@ -1017,12 +1018,12 @@ pub async fn wait_until_node_is_synched<'a>( try_count.set(try_count.get() + 1); ControlFlow::Continue(()) } - Err(e) => { - let msg = - format!("Failed to query node status with error: {e}"); - edisplay_line!(io, "{msg}"); - ControlFlow::Break(Err(Error::Query(QueryError::General(msg)))) - } + Err(e) => ControlFlow::Break(Err(Error::Query( + QueryError::General(echo_error!( + io, + "Failed to query node status with error: {e}" + )), + ))), } }) .await From af1fa0e186c611e0c3c9b9e99c3217251252241f Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Thu, 28 Sep 2023 10:24:46 +0100 Subject: [PATCH 094/161] Changelog for #1953 --- .changelog/unreleased/SDK/1953-phase-out-try-halt.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/SDK/1953-phase-out-try-halt.md diff --git a/.changelog/unreleased/SDK/1953-phase-out-try-halt.md b/.changelog/unreleased/SDK/1953-phase-out-try-halt.md new file mode 100644 index 0000000000..48280ecb2f --- /dev/null +++ b/.changelog/unreleased/SDK/1953-phase-out-try-halt.md @@ -0,0 +1,2 @@ +- Phase out Halt abstractions + ([\#1953](https://github.com/anoma/namada/pull/1953)) \ No newline at end of file From 79af00ed2f23d5d0300d776b5f56b05152a5df9f Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Fri, 13 Oct 2023 09:37:17 +0200 Subject: [PATCH 095/161] Implemented compounding inflation for native tokens. --- core/src/ledger/storage/masp_conversions.rs | 126 ++++++++++++++---- ...3AC0E46C83B093EAF35D1A0537CE81D282FB9.bin} | Bin 16958 -> 16958 bytes ...9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin | Bin 7448 -> 7448 bytes ...D76149D3088F539CF8372D404609B89B095EF7.bin | Bin 7448 -> 7448 bytes ...062D269F657017C578484081762FB65D9D52E.bin} | Bin 9184 -> 9184 bytes ...BD6BDE6A1BF0BAE7D72FDACCC888A32094C72.bin} | Bin 9184 -> 9184 bytes ...7C98D1E5AAAA9988F26B1A47090ACCE693572F.bin | Bin 7448 -> 7448 bytes ...69989A13906D683BC96E27EF50FC037156E25.bin} | Bin 9589 -> 9589 bytes ...E9FF2DA066496E6664F56EB28F67D75C21911.bin} | Bin 9589 -> 9589 bytes ...55E2583731052033322E25250C780EE322BF4.bin} | Bin 18732 -> 18732 bytes ...CE2E0F12C98370D2CDFD6A75236522A4235F5.bin} | Bin 24899 -> 24899 bytes ...905E9DAFDAC88A291E7F1756931C8A85441E6.bin} | Bin 19839 -> 19839 bytes ...8DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin | Bin 7448 -> 7448 bytes ...344FFFAA6CA273027CD480AEA68DDED57D88CA.bin | Bin 7448 -> 7448 bytes 14 files changed, 100 insertions(+), 26 deletions(-) rename test_fixtures/masp_proofs/{72CAB503E1E0568CC0CAFA892125DB879A97D990F8395D0D8C34BC7EDD203DA9.bin => 1226362759E8F5050B9954234033AC0E46C83B093EAF35D1A0537CE81D282FB9.bin} (64%) rename test_fixtures/masp_proofs/{574D00A0B71BE528A2923F6B68934EAA4FA91FFF4AFDF3B08047E7DC6BFCED36.bin => 47AAF805508239C602AD831876B062D269F657017C578484081762FB65D9D52E.bin} (70%) rename test_fixtures/masp_proofs/{DDD66A8E673E8E8A1401967F6FCDD5724C594E275B353F45FF749CB76D3CFF52.bin => 85BDAF3AC6C282F8C767109FBDFBD6BDE6A1BF0BAE7D72FDACCC888A32094C72.bin} (66%) rename test_fixtures/masp_proofs/{B94B8EDBFA5038FFB9D439D316EBD09A62AAF19015EF8149D6502B6C0FA871C4.bin => 8D37EB2E5C3BD60B88B1257DFF869989A13906D683BC96E27EF50FC037156E25.bin} (64%) rename test_fixtures/masp_proofs/{434F17129C496E5DE034C4F2553E767C0E74D457A68F8606AFBF41E6F2F56D6E.bin => 992543B7B7B6B9DCB328590D37CE9FF2DA066496E6664F56EB28F67D75C21911.bin} (65%) rename test_fixtures/masp_proofs/{B9D0EC03A64BC8F9536A05F587B35316EE670A242606A81AF0139B3F21CDEDD8.bin => DA50E59A47A7BE9BC8BFF03D9E755E2583731052033322E25250C780EE322BF4.bin} (67%) rename test_fixtures/masp_proofs/{0B436B9FB574776E45EDA537DCF334701D250179C310C7076DAEE5367CB3D74A.bin => DFDFB1EDE901241995311122C25CE2E0F12C98370D2CDFD6A75236522A4235F5.bin} (67%) rename test_fixtures/masp_proofs/{C82CD3AD0DACE8091365CB0D91AE73F7B5BC1D64F787FA8A7985D301776103DD.bin => ED30921582F7DCEA42D960F73DE905E9DAFDAC88A291E7F1756931C8A85441E6.bin} (61%) diff --git a/core/src/ledger/storage/masp_conversions.rs b/core/src/ledger/storage/masp_conversions.rs index 624fe2aa1f..f5d7553516 100644 --- a/core/src/ledger/storage/masp_conversions.rs +++ b/core/src/ledger/storage/masp_conversions.rs @@ -15,8 +15,8 @@ use crate::types::token::MaspDenom; /// A representation of the conversion state #[derive(Debug, Default, BorshSerialize, BorshDeserialize)] pub struct ConversionState { - /// The merkle root from the previous epoch - pub prev_root: Node, + /// The last amount of the native token distributed + pub normed_inflation: Option, /// The tree currently containing all the conversions pub tree: FrozenCommitmentTree, /// Map assets to their latest conversion and position in Merkle tree @@ -37,6 +37,8 @@ where D: 'static + super::DB + for<'iter> super::DBIter<'iter>, H: 'static + super::StorageHasher, { + use std::cmp::Ordering; + use masp_primitives::ff::PrimeField; use masp_primitives::transaction::components::I32Sum as MaspAmount; use rayon::iter::{ @@ -53,29 +55,41 @@ where let key_prefix: storage::Key = masp_addr.to_db_key().into(); let masp_rewards = address::masp_rewards(); + let mut masp_reward_keys: Vec<_> = masp_rewards.keys().collect(); + // Put the native rewards first because other inflation computations depend + // on it + masp_reward_keys.sort_unstable_by(|x, y| { + if (**x == address::nam()) == (**y == address::nam()) { + Ordering::Equal + } else if **x == address::nam() { + Ordering::Less + } else { + Ordering::Greater + } + }); // The total transparent value of the rewards being distributed let mut total_reward = token::Amount::native_whole(0); - // Construct MASP asset type for rewards. Always timestamp reward tokens - // with the zeroth epoch to minimize the number of convert notes clients - // have to use. This trick works under the assumption that reward tokens - // from different epochs are exactly equivalent. - let reward_asset = - encode_asset_type(address::nam(), MaspDenom::Zero, Epoch(0)); + // Construct MASP asset type for rewards. Always deflate and timestamp + // reward tokens with the zeroth epoch to minimize the number of convert + // notes clients have to use. This trick works under the assumption that + // reward tokens will then be reinflated back to the current epoch. + let reward_assets = [ + encode_asset_type(address::nam(), MaspDenom::Zero, Epoch(0)), + encode_asset_type(address::nam(), MaspDenom::One, Epoch(0)), + encode_asset_type(address::nam(), MaspDenom::Two, Epoch(0)), + encode_asset_type(address::nam(), MaspDenom::Three, Epoch(0)), + ]; // Conversions from the previous to current asset for each address let mut current_convs = BTreeMap::<(Address, MaspDenom), AllowedConversion>::new(); // Reward all tokens according to above reward rates - for (addr, reward) in &masp_rewards { + for addr in masp_reward_keys { + let reward = masp_rewards[addr]; // Dispense a transparent reward in parallel to the shielded rewards let addr_bal: token::Amount = wl_storage .read(&token::balance_key(addr, &masp_addr))? .unwrap_or_default(); - // The reward for each reward.1 units of the current asset is - // reward.0 units of the reward token - // Since floor(a) + floor(b) <= floor(a+b), there will always be - // enough rewards to reimburse users - total_reward += (addr_bal * *reward).0; for denom in token::MaspDenom::iter() { // Provide an allowed conversion from previous timestamp. The // negative sign allows each instance of the old asset to be @@ -90,15 +104,80 @@ where denom, wl_storage.storage.block.epoch, ); - current_convs.insert( - (addr.clone(), denom), - (MaspAmount::from_pair(old_asset, -(reward.1 as i32)).unwrap() - + MaspAmount::from_pair(new_asset, reward.1 as i32) + // Native token inflation values are always with respect to this + let ref_inflation = masp_rewards[&address::nam()].1; + // Get the last rewarded amount of the native token + let normed_inflation = wl_storage + .storage + .conversion_state + .normed_inflation + .get_or_insert(ref_inflation); + if *addr == address::nam() { + // The amount that will be given of the new native token for + // every amount of the native token given in the + // previous epoch + let new_normed_inflation = *normed_inflation + + (*normed_inflation * reward.0) / reward.1; + // The conversion is computed such that if consecutive + // conversions are added together, the + // intermediate native tokens cancel/ + // telescope out + current_convs.insert( + (addr.clone(), denom), + (MaspAmount::from_pair( + old_asset, + -(*normed_inflation as i32), + ) + .unwrap() + + MaspAmount::from_pair( + new_asset, + new_normed_inflation as i32, + ) + .unwrap()) + .into(), + ); + // Operations that happen exactly once for each token + if denom == MaspDenom::Three { + // The reward for each reward.1 units of the current asset + // is reward.0 units of the reward token + total_reward += (addr_bal + * (new_normed_inflation, *normed_inflation)) + .0 + - addr_bal; + // Save the new normed inflation + *normed_inflation = new_normed_inflation; + } + } else { + // Express the inflation reward in real terms, that is, with + // respect to the native asset in the zeroth + // epoch + let real_reward = + (reward.0 * ref_inflation) / *normed_inflation; + // The conversion is computed such that if consecutive + // conversions are added together, the + // intermediate tokens cancel/ telescope out + current_convs.insert( + (addr.clone(), denom), + (MaspAmount::from_pair(old_asset, -(reward.1 as i32)) .unwrap() - + MaspAmount::from_pair(reward_asset, reward.0 as i32) + + MaspAmount::from_pair(new_asset, reward.1 as i32) + .unwrap() + + MaspAmount::from_pair( + reward_assets[denom as usize], + real_reward as i32, + ) .unwrap()) - .into(), - ); + .into(), + ); + // Operations that happen exactly once for each token + if denom == MaspDenom::Three { + // The reward for each reward.1 units of the current asset + // is reward.0 units of the reward token + total_reward += ((addr_bal * (real_reward, reward.1)).0 + * (*normed_inflation, ref_inflation)) + .0; + } + } // Add a conversion from the previous asset type wl_storage.storage.conversion_state.assets.insert( old_asset, @@ -163,11 +242,6 @@ where .map(FrozenCommitmentTree::new) .collect(); - // Keep the merkle root from the old tree for transactions constructed - // close to the epoch boundary - wl_storage.storage.conversion_state.prev_root = - wl_storage.storage.conversion_state.tree.root(); - // Convert conversion vector into tree so that Merkle paths can be // obtained wl_storage.storage.conversion_state.tree = diff --git a/test_fixtures/masp_proofs/72CAB503E1E0568CC0CAFA892125DB879A97D990F8395D0D8C34BC7EDD203DA9.bin b/test_fixtures/masp_proofs/1226362759E8F5050B9954234033AC0E46C83B093EAF35D1A0537CE81D282FB9.bin similarity index 64% rename from test_fixtures/masp_proofs/72CAB503E1E0568CC0CAFA892125DB879A97D990F8395D0D8C34BC7EDD203DA9.bin rename to test_fixtures/masp_proofs/1226362759E8F5050B9954234033AC0E46C83B093EAF35D1A0537CE81D282FB9.bin index fda6b2648ea391bc3c537ce65b68f32984192478..1764192dbaa796b80a90fc4d76da3a085cc69cb1 100644 GIT binary patch delta 3690 zcmb`{S2!CEpapPy$KI4!Rbp1`+Cr2dwMS#`P3#dx?Y&Bk*rRq)TDz#)TT!z{DYar# z?*HMw-MKqE1Yw94Idl8`>`)b^6~Z!)a`kiN18XTK>2PZY?cBoVuy zJ^wg3WuDbAwco_9NrrD}Jkwl_#`T(Sa3%!C(hB?hYYJf}!sU-xJS*%EWJ5}G4vV=> z9X)TFAS!tcq{-t_`T8n3t~*g5Mmh75Vx;^EifjGT?^|*{IVzuu6AGl%bnJ5QG8fBR zHih|OiKeZ@J^JMrzL)!JLFMapQp*Z_Fm~7l$W9fO*lz_ik zYpE##?J1-hV;*uWLOq|3jq72DQEWbP8rFO<%=)sl_=NIN9f-ccWC8Q($H&^)ymdDq z=clmiut9?TW){kP-2aCW%+Yfa8Pzg4qjC>JRFw-0_~er}6tPzhey`-wnL!8yQ2a#C zL6-4@<+H9Jj64hkG#YdUG|Ydf7@_%njH*o{)Bz?}1-+(gwONl;omu^ZxNX(+Zy++M z+@u}wKZJvdJXC<8cy@;@$42k7HleImL6;9}YMpdV&IfjviC+Ll#EAmHdV2vWA)6c1 z(}&nRem}04`>cyUoqNgDqm%+>mI!W5fO8;<#o6n3AYB$HFMAQ@K7I6)c4ZJhaYlJ; zaj`6;C2n&!Ee92FgSIO!gY?gh&kU?h*NedS`O!D9 z4P6kxBzSmbm@7&4w5*u8Ybi%N%o(&)W{1p+&pu zN$oVLBbnxxz3CTM0sEw`aTTz3@Y7{i80+#?B;rN&ki12Wxm5ku z-j;QHWp>yd@t!7C|9HCdr5axh)gD~&_j*-GuX(0k7&Ao9V@Rz}yWBvyqFtcwIi4&9XZ#VO0;vk5(GmJ<9A;oZ~{G;i~UgXz4OO1779 zOz~BUS!;IykTK4acFtM%2ZSiM*icO6t@!{>HrUW8P z@Jm}@Sg*oG2wN8|8p+D9?Z`X%i+Ap2+u(5*)OknNYfK* z4X`gE*G(4DtR?av;`%A2RhP~n5+xZi?M%FgdZF_Je}~`0)`cKgPZD? z+em5iL+xxUsMRdrk*MKk*Cq+ju>6=wzzmvStTI^dvgUcDjzgW0y|(mCMNbg`qi`@g zFZn=-nBi%3*P$_JUlfhuu(+bZ9UcQ#ulgNtknFIOal^`*Eyeyrv~N^yewNnY8rC_^ zBpMmi+Xb!}PSwLL)S$HU0|W(5Vlmx&YG!?+V23z%#hNRjue&{an?%Hm&HrpaSRVbP zaxp|GR5;jU*SS?C-gb&*HnC?#kl3nG-c)chFe7-$+ujHa2zPHM%;A1ZVYVq#T{;t6 znbhKwQQl_mZkiHnMoMJiz(1!27O?*y{Zly*1w2lvua2m=;l)PwL41dOB5CYf$gdTG zq)9)Uq)_tIB;Rn?1PJ2P=Xb$V^q2NkgixyxmrO(D65QBAq1ou=wYb%@GORQzAnN~m z_aes%3V?A(JJ#*Pn|4AB=9FEB!kZ?(7|Po{(bci-#)D}`iVMvH=A$1eEiDB-!jsjx zm9YHmIz9ma<&Z9Y$&1-p*L-GlrW5>*i@q3=!|mRL9QMz{kCI58x=TmxQ*MRhDJ=IL z{i#De5e)z{B)xka!tWHmqDGBKbJYtA`c@{?%UshZolpJp@xc=aXl?Dlh*EAHp^VMN zk%1f>M6a9cMmV^0oEsp4l{7@{NGSL|u}{W>ZY_$vCF$LZq_t8|E3G0{50R#Hw0-Qd z$(QSTB+!9$z`tfo1s9b=8c^_Lm;w~!`q_Bu<)~TPR&K_sy4T#EeMJpi$i+c96mg0l zO}%VJGf^bTxvGI~<4a&J=dRinK4P97^jtLRAus*|9Ht3v&M6K1LAYFf78(M?hH?lQ zMZs*-tPMz<(dVx&G!?GJjhwR_-6n8-5=`6~I}AiiC$B!Z@}QVhADEh4RJAWYf$j(b zJ7CO*!CEgSY^$dv{OL9i+W(wd29xHEh9H<7{G2e2B1Xm?;x);fkq+FCZSRAc!X1O@ zagw2m*dP0XWNXjOe*ZW!SXgy8=l=jIIej|ktm=$7CXF11UY*86L&Xs)PqoQ_0F(-XsOB~nQdlvi#Ky=K==9v@ z>*1^}yBiY*QLkW*h;*y1ZdtdZTH5aF;NHvz;Xb)8*S@WvE=qPug0deRqtK`kk*e(# z;#R8T<3`ySYYH=beA;#LOe<2F?4lzq%Ik^D)WMt+T+iSRH*Y(CiErWC{Q}ovrtE0q zLe=lTP?MDVR&F*khQy(sjuX6KLXiaa-YT9*vJ;>Bk$I+0-sw9g?00!pl=L`s`{UiG9|h0{TSn`$ep?Kik}hru!QuF zsj?G&iHbLQ-I{omTGQBo@+*b_+Kz;qJI}-7EgNn!%qvD^lF^3a$}}@q@+$nIqI;D} zN3yiytf}4wBHH-5czCDBb?zuaw=jgso#r$JcetL~c+|llS$NKa@H0YZirAQiw`8RZ z<+v_-5K&HmPeN^aXO9$Ay{sENx2QO{jB6vI=D(G*aM8E;OK|DWCxb2yu(;u&tse>K zO!#&6J>BQ)U~phh&Js9lAg~s5H1WJt)DaL>D^+t+Qf(dwy^aXk0p~8L6MrRmvcyHh z5z9XGvjCT?xNv7l6JXOA61?}`R=%knC1;))>UTLp`lyI!MB)Of0clRAIFGD}=(NLE z(y6mqoc4D8-dh(a7G;FwY>F7>+O9HSj1m&D(JUJf`qbOX_wcSW0g4&B97fk}kHY=) z6!k?3b)5WT(BMLTYOyyXczoxKi|G$Mgl%ouXjM98*V;*&)wK5CV=~P8qb~$yF8}7O zZM=`~t&V(x_6YhN)^riEmX%6-E?%Rvq_)2&t3u(F2HRDJ`_meT^1vGZ5)(ER1I)pJ zwnKk0>H65+Hx{a&;7_DIG|;x3BXQXSo`CI{#S&#{02;pbL#CP6^%s8TL7lwpz6mT}JHq3_a?%PV8mPKfGPDU-I_N99aAwx&XuM_QUE*B3I4XR#*qS`>nt zwZC)z;_XwkP95Y-x@h<5k3^i0B*i+{*-iV~730AR`L-qB8NEBCs9|UXCw4S8x>65e zE25TKRu3@2O8xj}?G)?FK?IG$Dlgwin#SBVT`S2#p1ihjcSpf|76pDDDwt5P#3;w{ zTYbHAU$+o-j!`vqv?l3%M?+RjS|>j@t@{dKYc>aah1+qVK2mL*wcEA$&|#Ay;q}UKkP}PX%dXUE$RuTxp+0=()hseew9Cg&pU%st_R delta 3690 zcmb`{Rag^_0tIkJ4C$^>14l@=3?vm$L_s8#25A_Aqha(PCf%cRqiclHH9$g2N@|GG zsH8*c{vYo9eYxlR&in5>52rf{&>aOxR^sCxR#I?ovOEELH7UHUG#bTuESRE|U3Y%5 zgva3d(uj!DHptqri~uTH#Em$!QUiPzzpq?_CVm8c5jkU;2$sF++} z>$4(0VFbX^&6!E8w+_Rg#NwO45&FG@aUKwgtW6D?aq~NoTp=j*FrpHB42CM6HL_rwSlk z*xBbC)g@|MjBNQeU!It?<@lQc5ay;7w&S<$`Uh@iP9CuFn4q7SKy}WY{AZ&M8&A8Kz z7f)2aBWHJ(RrIX@?#?6Xb?H0tVok4AF*rR{(*X59OS&N>p@jpE83&Edd;JQQ#?)s* z*#lmH8qJa(;UzvZrOWPo%fq|Lr@o zqV5}S2Q)fjK-MCj2pJU53kzVw7k`w)`etRvq`4{WJbGbIi1a zwhU1SeIRXkjniyDFPXJsig-y%f*UjBxu*vlbd{y5OpsBs+kgAKSBgyPQ5^N)?9#F9 z%B0>sCC%@k?#3xu>^p@F;uoq4d|D=&CjP0M@;0t~Z}P31b@y@zNnEL{@)A$YKVwz9_#KNG^q)B~d$#Tco8x z>w&-=2Co=Rf3VE{5pHV4We=6|R-!X8SQ$o_Ml8D*hWx>@lvkNG2_Ny1Bb?d$K*wi= zFOvp)(+vlxJwDS+P|~NjH2*Z{XROTqYy6-j6Y3x^ejpT%_SFO6p>_V7bCWR zt1ZtJ7}2~xi5^**F>`gb6Z$aEro~KYMkJS*)jM%(T9gIV0VRowrK{?#A*u9+emhpa zB|R#z4?0hR<%XfV>z_UL?)tpe)`ear zRDAOJ-8vS8on@bf$5DIX+yLHeJ&G=LnO?>Gfb{|U{s2(IVHf+mi^cg#WT42AD35Ew zgm+YxQ3U&+R3YNZ#r{-xwyY(SI|ejFDFUFz1f{ZrTW!uSuj{WP5l^dU+}zsr_6b4I zDbub4!!4n+sXsslwTeX=5uMxf?pv?C?J7n2((34=F`>=vY`!66->By6plN03zm^Ny|$m|=p7 z{l(e@3-{5!G_WYXraqEiHoKaYCD@_ahcp@Cn&VH@;9=te@n4IV+Cz)?K4??QT_p)s zKB!%Nhd?pgTAv0}9vggAvzzKqzu(>Td=w@$GP$S!w+$DE@Nnf#n+I;sS|NT3lEUQm7PI^&qHtE4aUB1Soj`0V1o_0{S% zkkkY(5HaPYM_y4D?6pI-U9{>HE`~CPe3zDHmDwu(5dr!_g392_Ai%mD%yjb428`j^HL;R_2B0Sw zElwoz)#y}uccS5&(v^>b=DqoRPGkdPbsW|dAySG^(~#{yTlr$Q`_KB_IvE`AW)N7v zG{YW)1L7pSLR98~W*hcnN|RyImM|yFUqZC!h1nPx+GgSzO>{(Bd4QYNY)}P%E}M@T z?ZRlz(j;n#Kj$WR@G^7r{E}Do6R>DN3@~C}M$6ZKR&282EixV>P3`n2<8Wq#=X6Z? z9sqHQ{Rr(c=7uu$F!A5WD6g0ER;cIwcHTxR_G@QehdrkR3@qMw(0K*!syAvUjard+ zn?nFW#ZMC_WD}qXC3n)-Wy26IBY7N6@xqLSUyp?*F>z4Tsu}}%(Yf@^IhPlW+mn(! z;M0%6WN8VrCwRo)`cOE-TR=i@XabfLh*%AG^YRIf-*BkIz;?jgft-^AucfD|?G{^> zIbT#auLG}7zYg#qgOX4OTC4($FRgFlLn6}iuER8^qd8Mt#O>?JY;UP}XBP zyZUA1(@YpJU#fJlqBU$Zz-0DjQ*?;mhdE+0#X)zPPJUYoD`W7^2|XWJoj=jL zMLhC;988aE8_E5kMkj>bDUx(EM>IdQ4b-bXEpg5sA|`ZACFJn%#Ar@RT*T{ajx3z;9EfUAbYf4yi`SnqK2^`Ia#xLH> zh+Oy96+`Xe0tunliQ}2YrzutqVG&~L%@_G{{}}3wUh?(G>pgrvZckuAA=J2s=McD? zlV%pGX$l9Ws$j=%5L zzzos9}U{s?n7VgBFnq;{G6`@`>qyvhK=VghdgoG{s>MgyRogUtgefY zt`~FNqz`4mOY4w`tlU?^q~^ZJbu|T+l~EepJjw!+W_STdGcWdsFB58CUEhmm_e|}G zeG|fa`26vc&2E;c&HCM&IOWz4*5un3Qcj!@VO3(0Hi52_J5h5q_?T4l_@`kaE!LdlN zL#M{xM6NeX@`+|)U-1yUQECBc&s$pgH#%W=AyTfkE<95fKsK M-~KmgyBfy-0XgjwGXMYp diff --git a/test_fixtures/masp_proofs/1362F1CF9B836CF8B05D8189EA9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin b/test_fixtures/masp_proofs/1362F1CF9B836CF8B05D8189EA9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin index 13110a81e528e81658716fe465b67a0ad1f36729..44c77d25216a2aab849662c19ab147b5abd3e01d 100644 GIT binary patch delta 1002 zcmVDesq;T?(}6;GJ0A&$?ljj6iL3u1;1rj(E|Vaq^z0{Yb<2 zRZ!sAVzYl1B?EteB%bahyAOV=K1~c8E1~ECI!?J)a8J%u;9THG77q3h|4jmv>^N2j%Tq{f-vl2<6U)wr)%%$C6n(i|cDRpzu-qM0 zR3y?U(RSHvjnB!CZ%=<&fIBH3bsIJPYuDpXGn9F^^KWwFj`$N2uyJ8Hqp*Eo zPqE?SlW%{kni&~??X?R?Z-vATv{D!V79xIHf5kRFuRXbY3pdJdB?MukdLyRJ;yRvc z@SaXYTfLWC=OfDJm@qJ^*2=KgNuzpdpLcbkp@;K!>KzuC;ERJVLen38-!9EFSW>oF zcHl!D$8EyqzFH8zu{N6Y6Lf4@Wy;AX4>C;Qlr(>v(XP4SduzysR)O1wbU>SZHrtR% zSs~B4#Qy)6GfvDU+)|2r5Dcu)^oK65j^#n@Chn%L;b+IOCaZlK1uv3if(18xD2nb~ zn2}q?o8@$xaOeKPY$Im_WRx=GsGjQ-^0WyY#P`ri7T($Z<>ih8-R4n7mDwylJu0$e zefoduiW)R5L6MW3<&}?a$Q+2nOu=r8RM05VZ8uxi`JeD_T3=*ag?~{TU$81v$yC`3 z_&`GoX`d_=Nbne7Qeet$jj%wypfP)Bm5$`dTrla5Pj#&dauc8u6ZNo_4&Yo0yi6HhnVs{%c(n1h+DRk+HvcP}S~ YSVrNE4_G+MjtEdT%j delta 1002 zcmVI*#W~HT2;nz@ zxi~LAKH@~LzYW7&?%Nl-yO@i!YZLSWAmJtQBn$7S9#~0XdePPH1X>(4vR|n`8}a_0n(9a43?gcZDTG7jB$|(QP5Jv>-b=}M2@)fIZ5Y7K$5xO_!ds(G1s}e; z***mlYCZ74Jzn#!`s*2)zhRxDh3YWI2@JejZ=@VFp%Q<>%`dFF9NE#uu~s;*kgF}A zjr{`(on`#d4gAxmsz!+_w!QE+8$@F}$flYVq-HtJ96*NKR$0Ca9QfI}sn~}7hp=|D z%Az(}D_tri(B4Anq2x|Y?Kt)ZF}*I7CHdgIH3>57fui8%?T9!q2%*W&fM9V>Z(YFK zaPCwHIh}vnh5jeqV=Yk=ynZ}5`x;A}+)F7HS!4u;Fy-A=dI;0qvjn%BBW?H+#CTd0 zIu{MjnaL?iSL@%{9L2VrIwp@7vcdO${(fTNVW+#mQsyk-gpC60S?_--3<2GfrMWMn zk>I4!CZ_a`5~~5X|DVX|xm49%YhN*eiwicVRH%RP5{H84w-nKlK!EB#3ZUYdY8fBO< zJkmr7Zb!hp5c;2NyfCfGHp0sBaKMltdu;(={klj^%5GmADz3FR3hq^2Zf^~$l}=;Y z=wg5K9oiAgZhA%6MB*8a#MC^W7Q?lu?j7+FV;sKqK>)j>6Dm?&>(8slzvS%<6itI83D0Ud%{Nyt(B2IE1{CwBEo8x46YUft05k2=2z^F@ zjdY#V32bIav31Y_+`>+n5KW)xwK-md95M7MSpq#;ZkQe8=bLB3loEBW@`eBKQaHA_ YMJrM1aWjl1>Z3|e-ldnP2b1I+G|z_R+yDRo diff --git a/test_fixtures/masp_proofs/37332141CB34FC30FF51F4BEE8D76149D3088F539CF8372D404609B89B095EF7.bin b/test_fixtures/masp_proofs/37332141CB34FC30FF51F4BEE8D76149D3088F539CF8372D404609B89B095EF7.bin index 0e74c8f67b52270617893dbd0db61ab11e91e60a..c702a4e9b7d40dc5649b26874ffb844890a78506 100644 GIT binary patch delta 1002 zcmVC6}N zEapxwnp>)jr-~G`oxDz)P23KuyUb>@7<^x+wFlZgI=DKb5ZoBe-?^*KPINBxHY#X7 zp;w_2IJQ(Ft{4m%|MHQ?DXS;7IPNC*y^x^#r;0FU%M*WIJ`X+{Z8Ozx&q)yJU_18{Wl=msyXXpnXJLB3Z30&a9r{+2B ztHyP7Ny%L}Q_J*pVnNb$pE4Z0P_Y3$Ozr8%Fl&a6!3e{W#jO-z5aiDjsiF9y+r^GL z6*9{Vvj%^=wcEdc(@e6`iZe4r&Cu1hb4vhH7-oi=n-=(pX~>np3k)z#f$&wqN5pP5 z0y>}|BIXhj0ZSl;_bUoNnw-YbEiE06}ct9mmI+uHW8Qwll`g#oAqhmWBGGgPt4qxG_ug z#UlJBzkh(*xYNIxLz{p&hhC}IdJ&xI5Ju-}qU=OJUJL=Cy2@8`M&ZpC;-H@$_C&K3N-6R+R_;TlVKQPv3Q zV~HrDDpb+!#wQGUh#0>`d}o%EDxs7^{(#NOEK@&>AELtPs%KBfH$AmjcXGm}!`?at zN{}O?aZ1h@UEN%yVTqrn-*;71H6keT)z5!VCV7sI8+y{U;b5u!oGYL4%P-#Gx_t7* z4?N(b;2$b0PPmW50-tH}ET3r`i{JiOGv8Em7QrIpgV6JehH;>Md2OYK9RnjiXxiV^ zqG1nCR?yxruNd?aGiPb$HqZ@=0&_FjDqCc^=17l$0KgbHz2#E0S7n{{oGt9;S-rB9X?z5e?{VCIH Y`ypZ$XOpoSr`$q+Y7L>Lk;u- zAg{eP94h^yOrHp?p+RD5Q#Cr@+%Jma4-Q=WxXq;cVY62d^a3E@LY-NWzD%|^v=K`> z(`V>i*wgh`)jz`w11t@QhL%FJYZLSWAlsv+;c~hIJuG@2Uq9SpiJy^<;m-ot1|@8y zzMxRw6SIF7B?EtlEsV2Z8Qrb9b$S$8IHN^`RMNy2Wp65;;z{BU!Rdm5^KwY7z`5TQ zp%WRMYiJ9Y%+a14uubHSY4&RuX}h{Zg~zNaRsyuVDcY)2%2oY4RDPz4O$j#cT0$M; z6Br2!TYRF56B@7(6!slsQ)#=q{RcSzgHbOQaX9@?Eck!EMUr`HdyKIbf3n;LS0Oe> zkXsE_Xm_^ABgbqw`tm;*jkY_~MPS(BG(NdJnQQd^d(cP&FhoG}Lvq5kM8)AOxvaPl zU}Po?9@;7E$%`@bveO1Yqmzx~Sl03M5Tso$|9=OB68za^!QWZ_I88?kk<)vB1KR41 zdmLG4S@wU`-g0(T*y-PJt60D@bBR=33-&ksNim1Y?!^Kn96_BhkPQTDqyRZAj+iv^ zz)gzkgez(~JgVMTA9MI;hV5uau;pp%03~k@M+0m#uvv4%zLrehQ~jDMIx?|v5D9FB zd_TM|)*m&)q=zK|Fk7aZGW^P5p<+$T4>!0*{`-H;DP@XW1m0}N&GzDCF&1PPtDhY{ zqA(8HX7$g^Q&OsKfvvH=ZL+A8;w`p*){tv@Mn1H9yexLJwnO};7j2O8>>5axj!$i^ z{#mHoF7$(XIp;99$E3OY?igpf3^Y^4Hx_nq5sBNual=wGu1*iy8i>YYL z*!6$&N~LclzmY7~3rR-C@8srsw6njX%oj*Pr#Tp{_?2#SD~qBs<|IFr;#nOoYCC9) z5}x$|rFS6{+Pb63!Edj9*<=-%{~&1~jrF+hl!R$Hi~g<&$y$@x&DOh31z528?c@Gk zlpmt95MR+5jq)`DE~<;B0AMZQD(r602H<~z`erK=1o+U<21#yy=nc)q%WbNznbwmf zcjj*=0pZg~+5(R|5eWZe;t&~!WZ0a(TseQFvn{Z^txzS z{s3g>x$yQww*;dU0o!q47!e`q1JJh5SThA>Sf32)IpawZKY%-Z+t}534?X#{f~$~buEcgl(OtRG8<}9iJo9mtGZ+YQkXd>ljYIUCVrhP&&*L9)5Yr?6 YOZ$)3UyW6T8hhMoonG*543p#>G%kARwEzGB diff --git a/test_fixtures/masp_proofs/574D00A0B71BE528A2923F6B68934EAA4FA91FFF4AFDF3B08047E7DC6BFCED36.bin b/test_fixtures/masp_proofs/47AAF805508239C602AD831876B062D269F657017C578484081762FB65D9D52E.bin similarity index 70% rename from test_fixtures/masp_proofs/574D00A0B71BE528A2923F6B68934EAA4FA91FFF4AFDF3B08047E7DC6BFCED36.bin rename to test_fixtures/masp_proofs/47AAF805508239C602AD831876B062D269F657017C578484081762FB65D9D52E.bin index 83e5789827553d51ba6d808265e8cb68071ce88a..7e5410e8031e27696320808e5d8f9c26424e05ae 100644 GIT binary patch delta 2022 zcmV^QGX4F@y@ zgYyC)1!sfF2~u|6pFOXgNs-T8@emgAX(g)UWyRgvRt3C4AaoRAB6ZE4x5kA-X4d17 z+T>aTn@~#@DgwWdNPiS4&v78SzA1qmZn}|svADsmu97t!s)z&!jd+6mcjD9?k1A6j zkLM~Kf&SU^S0#{;mIcuy)mYqmfTh6B)O-}+!&Ku-Am+93^|F8wPNlwTy6&G1+*z)7 zsp#vy8tT~n@qrt011|vp000000000000IC20179kCZ=`&>5~c;M?Q^DMKo7xV%kUZii_%@jYa$IX4<_FfK79xiq$rT?mqFf&d#v&R}m z1Ai^@Y6Hs6T5+K%1EeaCedf)usmK(vScv9nCTUF$b`Ocd&<(kZ9n3I<24Z@i#YxS` zDi9|{$N_u9F54H(3>wN)7Az8HLZ7kP4{fe*Ws;5_sIf8tYZ&ffM&&=*kah~iSh=C= z_CgsapTs|((4&1d?k&>Y&wGwXuM`=S9e)u9#%y9+`RUK7uf#VLpc zcXXqdEf4aq_`NglzZ3Acvv_=gNR*o>UUel{Bc3}i!8M~pSH-4f>fgefW5BMtC<@xh zG|=!{176GRg&6Zv3fzEY2bi@R`MYTnD5MB4U3MohQtNpps)H0SyT>wjY!*dMHlMK- zTe+lMSR1ncrpWS`-H)MuOqU5FaDRr@sh_vNt`!$o)j*=J?kzq5W#-ZfeQNgV2u^bQ z3EEiOR;d{mSx68qRPMRe9y*CM8N&5WSo`{m9%<%-R--;@eLi1}P~7a58HfI^fPsZY zVy1pty;ukD6fR^W;jgsq9msEYv~87Jhk_c$S)l7Xf|4MpQL|m^3nxuI!+$pJwsI6Q zgFCUJOlcsvN=go3xOl#1%b#*o9VUa)tgnQNF%eGR}2 zmT)B|&!h&ph^iD23oynkBx@{+ydm5M^K6mvb%3$aFt6%YS|H>mUCO1o~19gC1c*IvpPU=gg6{+VR8 z!G=JYsd&m;0es%@0A&SE{%D^_McVd0RE5xbYCUcOCRkSV(^sI(!+)*#!jk~1u|H5> z#rGOzdwD&~Ez)ub+oRfb;iZm;A}nz)7vN+P*Le(5jI* zZ3i1aIa-oCqly6(SRX-SQ_ay&v_i|0k0!Q}a+&Vtn&l^vm$(Bs|{L3yd=+Q3x< zBJzH5_K(8gVV?mgh<^ZxLR0x7;TQMRSkEmuz%X<(A^FF?+%-1sGRq` z=M)aewDm%-@_wu2@5X%Ld773B-!{w|X3`Yq3mF&05xwC%2L?~EoL8adg!#7x-Z$f1 z>(kMRDQpQMYU9LT75XAqgysz+$b-T?`TSn;RyX>9J?X)`!d|ECKn3oy0}zOf4iX*f zM=p_$ag)8o%6}T!kps-lX)WNChP28H=LRn$LD-IR16j;hJ?wblM!x>0&oy8FK?tz4 zq{a_w05LbrEsBA_je1-D_YZ1J8^1r;3*tw-^nk;t672n&TvgqE>~HK zkjI}zljXJIv}2u3M7+Oh32g*@0YHyalHPJ`bCKXHtbZVQ0Et15=mEX80kapHayfw2 z(lDGJzCV!<2umCgCr4-5dM>MQg5dVH5EGEiw4g<_#Gh+9gKL#-h>^({ZbJJfWGPZ= zK4+IyDjk&~7Y~3JscspPL3ZY_Ti>*0?t_vHSCwbl%L%Ex>p$cUY)cHwlFJNbAfh_l!fVCw0q~)S1Mk7C z2|Je1bd-paGQ-S|7k^zP*dQQ>+C4#&G9YF?`BGZ77pB+hW~Bb%V_KuJgTCSiRhs}H zBD$Z>l{nGc)RXPMA!etv=K7ofr?V#Q1opg-kADjjAhptc9n~Cz*>3Nuik<6KRZEQ6 z;a*Pg#K}H4#F#ChM=t>Y000000000000IC20179kCZ=`&>5~c;MEy=u_J zK<^()J;9`qM#4u&`E9MV9Yz_MHV%_<7C<1RCM6R-<{6F*q!?Zy_h+S`a-7s~liWy$ zkBJ2x&L}05(H1};mDfIV*u+}fNB<9usYKeq1Wt-Q2Deht51`Ho+;UY(lQ9=TL4Yr6 zF1>>J)uzeW7jcxxsGwdk%S4c$>iA*2wZc#_F#<#Y0m_oTR9u7SLsKW}NU^J4(0w`}x@sB|nvC*NPM23V-Gmu+Jc_9)S`u!ZLIC8Z5sHE|E)HCLw0% z(3Cf%xg;aOcjp^C;b-+;krJpzRAmFUfm$80p4PDIuH}~v?tLW6hXJgm4D@Z6wwd+# zd1P^pQ;uplue^%wviR6kL$htT;LUvF@>BaLmNbx^Yad(8XPfvLB-ws%N%hr)i2w>MmD31&fF#^|{q9{jE6ZmcYuA?-N}m|u&ouvPJ_Hzlo#(*!3dMHX&Hz;Al9CbcLC|H%Xc0#xJU~Z2zQ)>=zsoA7j+ay z{DdB@m1=yr&vi7=ubs>Y_BWxWK~yh;ubVyw0RjC(6hQ~y7P8%DUBz^6anz0(gfz() z@>C^Gvp`9CxRc_PYloKMMJ{;-<;)+JpJ;RHEH#}0UNKN;dt-%7CqPSaEw0>yI4m+k zz|mJ8ia@g93wAA%W8UZe2!A$_kSa-yqWGB z>mr#C?w7g;A_$Kyr_KlbyKr&317y`<-kLG1uagaHd$!Q?=)v|OfPcOhd`=XhfEpk* zRCZ*OE}<9MY_Mz7Bzxu-_Y?u7pwwkoc9*^aw28?4FQh&g#nQ9+zW8A(zD13NmS_Ru zxK4rmb?u~>z=pO|h5ED%arpTOfT=<)uy87?-oL^^61db-et|$}BEG8D9L4bbjG`=v zVS9g((35j^aodnPgn#zwDQ1g6YXJLn#uuC%zQ|JNxLAtb)GkwHR z@h^O9IP3=B-7_=KMM)LRx>uG%HR@19TWog@PO`BW4>C=M9aXJZoMD<(ujwRM21^_d zwWX{uvsNt3S$~hO5{lE@#aJuw%xE?$r)bDY1*8*oquY71@#3UMZK5{v9Ie+X%B2s8 zfaxO3s1XN}z8=Z+ss{Kkv@do99d&0+2*Q`Uyk6$ewS;AK7WaoB%Xm#vp9*lT>pS$k zR_|w<91|q0PEGg+?5D)@2s_*uFZ1V|2#P-N64o?U8h<}2is&~(m6qrS7W2wRpD6}G zq7qvYr6WAnfRtN4Kl>buBE9;U`3lj&V;2vdS6`f-aw5@M86@V_jbt zaSstqihZ14WdMC9=BU^;P9bWP_|Y&fV2lTe)TBzEO5E18juRw%7U$jFy?X*QjMCGV z*Qi%}yek!>dlzbj9^2;Q)hS)>X>(d{{(94o;59FS8!pSsmS&)a0$<||c=aAB43n%Q EG}E`!4*&oF diff --git a/test_fixtures/masp_proofs/DDD66A8E673E8E8A1401967F6FCDD5724C594E275B353F45FF749CB76D3CFF52.bin b/test_fixtures/masp_proofs/85BDAF3AC6C282F8C767109FBDFBD6BDE6A1BF0BAE7D72FDACCC888A32094C72.bin similarity index 66% rename from test_fixtures/masp_proofs/DDD66A8E673E8E8A1401967F6FCDD5724C594E275B353F45FF749CB76D3CFF52.bin rename to test_fixtures/masp_proofs/85BDAF3AC6C282F8C767109FBDFBD6BDE6A1BF0BAE7D72FDACCC888A32094C72.bin index 0a8cd508185b176b40d43c6dbba4cf66abad91bc..d4245c57e2ed3b88764874479d31af8ab8ba2edd 100644 GIT binary patch delta 1936 zcmV;B2XFY`N8m@0rXd#_eS8PH%Zg8mK)t#1HD5@Dbnn;vP7+1B9l5aL2T=o)O$N8K zegRAqAorrqHapT4R$|-+Z+6h;G#4B?@`OiS4+gqJ`}y3#Hj_XFLw~w9th}2{adr7` zUN5BEF_i%(o!Q)#WWai2w>bpxqb!ecYePxE);k2( zpz|`5@)cho)r)C^pi1%E!Oj%%ZgaAc0k!&(4nZ{z=+%~G`+tz`lW`V6Ak5=G_pC?7 zgnExiurH|;Ddc;->~tm>077ePUr!=kWDZTX~C0`A6aKr*gX{hei)74dm-Ug74T8t762@aK}p zM?V<+H#--MbV3W~i=#6J)T48C-Rd=j{b|%Fv$GfU0wC~TLvhb+C}vh*y|n%i?54+w z70bEfkeW)0nCU3njis~48bt$t7aM(i2fE9OPl`ajx$`w&NQHFo*ZfWrMYOkq8&?#hyK7Tj!h)TZ`f0qB5B^YJmQMhqZie2DPD1C&0&tx!Q8k7hC=K`#79?uSX zR+4aT)fS>^-sZ@wi0GrfPUG27P49uQGV!o&ts=mAXwpE0yN;6H4==0CF(p9G$>A7- z-nJ&aPZ2s`wXOc#T)a|$Sohzq(aIuBk*tU2h@50<#vzltNi@<`<=o)yry_Or5=8m? zSouI%Ha>|fAqH(fg(f}fcecjrGf~@H&C^jI2)z*|Igz{*oAj^*WWI63mj48CDA1h^ z%UsMpJ06qj-=lgra*M$JddUwX7zy4+X}pe1;?M?c-GP8?1US%tJOE*2MKrAnnU!+? zI$nuWQFBYIcSW4_rco$M^8qHg0*^PhbH_d4Tu7Ay(8(MBq=w105MMiQJ9SF)^zr;B z?~PfAN;6E#_Oo^ahMK1?nrQ<$-8Z!Xe0dAX-0WANs=4j%QtAGn4@L|JaAehW&j}c)id7Q#yrYzL6 zD&ok8+1z^*PGJBlxP+B82jd;7klL4~e; zi4jjdec*yt9fYGL$2)jKMJ0aa$^F=9E(j~DQHf+nc`|Q*G#LJeQiwGly->U=(*O=8 zk%3maHL{ZxzhErID-YOA>FzW>0nPE~GyBe6PqZTh8L2?Z>J(AqqI?fbLcy{H|9)$d}FFjaH3AD2yguHf!@sOD#R<}18Ee=>wu*~ z3oXThvivW9dMSPMGshoUia4Xr?9#QcvglF{lfMuMDmvycU0&!Gz)6pQdkmB-NLWf& zwZyPb9`?!i%5|hs13tBKrWSGie!h$@jSI#Uzgj{zxIsN4VfC!*(z}GTG>p%X! zOigRN%$N6}FqcQ^UNLZPqe{ttbBYghSdU??QDB6BX|hu%Uon^?LDBv zFWdDmZ&&)%obn^bd1=7sxV%V-W@qMt5-?oJqj*}$Ja7fA&}^WQ&aOag74){?-@#zE zONVNB6`!!tB1E8*1a<0=21Y?pZ7q_rGBhKw0)HY7q=6Lh zsHe?;H~pQYzjb5RVqz?peyba%dZ>J-I4jT03x;^28`_r_MC<|*+Huiy;3_p;!Blg~dUN~xA#2TWKb)ZniyEogs?iAsn% z8xw&l_t~QZ;dlGb0tHUKu!nP|RTwTGtyZ1G7j(yIYvUg^1t&Ef5n2EvHt|`vK1>{t zgO6weWhJ$1IDVUM7qlaosp5uO_GS6ZxQh`k(mH>7?&YDR%0_0wR^|M4WqXMBe-RXa zAO?f2^C*ioDgOX7-rn-%7;{;=_wJe@Ie>E02M^qzv6dXPRmRxTVPX95b#!E37ss!nq*a7EGAG7?%^m(~5Iw`azKa)TOLx0W+tK;oZ$uzRk zVzzbM?#PTi_tiR+uz@u(K$E4=x+oz1kka)f>?sYlg$rmR$Oo<+o36!bFJh44_;&yX z`!oe04`|{432{1eB-s!b%c41DH z;r>aiE&>-*NYu33NCX>q&I^;U6(S%CS(-A!YKfQGONq8xl=A|vo^*{mcd;gvyT_G$ zOmT6O@)cho@eenIJ|Rlq^2g2=U{mRm>N917_@ZL%EHHyiEus;dlW`V6Ade3Wj$EcS z%AQ^n((YM&$md{M(w&i3-$%P2NlQ7G7n9KzKp=>Gva{-BH9#PsdRk~?kohko{!_qN zjC{|E!3$e2A|sPA7ePTzqeb~D4U>7zuc%F7O1}*Lcuf#x?1Zuul3~?^|0uiycB~@Q zg8Gm=X1i2l=y$dw{TJFcL*-FMDPW2;j#e%tv$GfU0w9~as|&S{u;&qw!jucH#43d4 z%faj$6h-tfLS~L!f4H;98bt$tRXAaPSn)CnYT?(nFbGwYfNW;uMyzqwZz~n8H6VDs;lRMs{I3OVHtp`%P>BpLg18+e zehvlGNUwt3)~2@(!EzJxtizJblM^|Bfh*Nw&8ZIyR006#qnlsNp>L>vI0H&?g7Nl* zdo*+QMg;;Ft>A12qZwTMrmUo;<5h=szFq6@dz?|L;@hU0sB}D5G?vn~teq*Oxh!aZ z6xjdnEE%YmI~MK2t%1jT!835Mcu1J4ec>)!P6EFyTq!baIEK7$y5eLZr(~uz8Qli9e)k_zML!TBjY8Z5~JQ-8osxku7N+i!~TzzyR{c|?`vRxPNDbM3Via_QQw)3 zu0vI5-=OoRt7w2=uj4XzXPXC|%Zg{VX`XOij%sKW**Ch#sK-@->qduZfhpr>%(@ou zFy_PahAJ$`LXebdCi}@kc@+wg!d1Z@#?HF$i=R*xcM(PCN7^H~&qzr@P?-qyhkekR z-J~erFrUU0`2+2LgM%aS1UK$W;#~4R`ii-A2Bl`K6CDoMf`UOQArIvxiv{QRGch>R zDma(_q_*C*9LO!5sp%S@;Hf!luEmnjF7L0obyqG%;SkI-REIA91>i$!#SbdwsIqEO zZDDkmDN7(?M>(A~lV_|`@ej6nxic2@>*oy{vHM5^aCb<5j5Ko9r5d{xf{lSVN<+Fv zrzMM6R!P<3&}+rDs!V~b1|$(e@TbHlHBr7h#Z%; zPWIoRrswY#z@WZ}^Q@9n_Kvwi8Uq+JVLbxr2o#m^0WjP^BsbWb@N0t%ZfeY+^M~j0 z_X;@j)4iI1w=W5YA5kt%1L$IvVPnOZ$wd(kkB-7CIvmASz(4~ZFyAX4kOHG5ES5ug zc(vbq$qFUH0y~C|YU!iE3#iwjycb3pWgg`5^*pgTI4?EupY+FlOy7eh+w;d%|9f59 zDU(~T2&36SrMQa1==G|cQks6!Xtec^E?wzn@F@g;p5nG8N`TQ9CP}N?a?qt8CteU+CseWPHvn!Ye>gND|r8^Qj#_Gq%#wdRnA34Mefl;Ar_ zCBs^OOPi|VGyCyS;k5MBCfB$5p{@M{7_Dx9mx|)w0X)4tX@JspFFwX}b%5Grz)5@+E35=6XTvwIQw``o&p^mE!iT zk}M?eKq>1t*gN;bcg3|Zpq4wFgCi^8h-0H<-%v>0lJ)w}_aY)-q}SVy)ju(U*YQt6 zc`|^qtFyWV0k$p)JdF)5VY&JF`u)W~xH5vKwAHx{@a8lI$wNXE={XtEaBzyH;IN0(Pz4)*?bQ@j#$M z8;!Ty<$a^n0>@}aYG$oz8BoU4!Xc0=07#gfW%9>94I0y>t8m`rOqKITr>iZ>LRtf) Wi0IuWUVLYe)9Gnu4hRa9tRpmtj;`wf diff --git a/test_fixtures/masp_proofs/8B29BC2E1A96DF331C7C3A2B227C98D1E5AAAA9988F26B1A47090ACCE693572F.bin b/test_fixtures/masp_proofs/8B29BC2E1A96DF331C7C3A2B227C98D1E5AAAA9988F26B1A47090ACCE693572F.bin index 76b40e6552fe560c8972619d316a447d37974631..54421f95dcd807e270f4e698bf324bc902ecc86b 100644 GIT binary patch delta 1002 zcmVC0yIvoU>OD^a3D^5mp-@o@M=3gFP?J zv2v>Jd6x+9PA^=hk`)a$IMD2~YZLSWAXB1ya8Mw;&S1ZJK(&l?Ky|llD8kPZ(F&tP zm7K$4MzenwB?Eu9DPMG1%47MIn!-Pz(4fyShms(70ikAF*zRH3(4&J^>BiC#C;`WK zBTm4&oSKTV_5ZR42(GbEv-kHus7`d4^P-ezz_a_xLfW#`ej!FExu0L`+Ic zHJdII)Ff!g6o|nhjE#i9lvYR@Zl5o#0$7jsHY29?#36s|bLj&V8##sfCmf(&#K#*3 zhCO&9yt@w}?TPmu*Q7B>Qfa$05KbF6jSZIJ!@EuJ!o9-{PXQ`Y3+jFOFtG~$v*@fb zwQQ*pL`0bcN$i+q<_ye+`ia6G?7el2^VPFeg&un?go@|JNRKs)$#+nzkCqMCEk?v9 z58mx!Xv}}0{idjBZ)_D$d`%hVB{noU?&TcAI18+b<7JZS+2X>|umYM+cA05e;_wdw z@8wL>G=d**)o?M9rUJz;Ozt#)E}!*=uExs)AG|uWBL_SYN4A844u-`HeOl(my+HC> zs0}RJjkpn!ES2{V&U|aU13eEx#&t*G)oycQDPVs@_z;s0)?7-%21v?2b3T=d43!19 zz#b;>J2v%9CGLg{g>Sw?lY{f3$+tCKj!EmZIC7RBOd9`{G1y5)Q*-s?w9v8FmJW{r zX;%JYR)StWq%?$QJoWpGtpUIETkazYd7TEH1QWtarcCV4LYEtTiinZ2!IbC6^1VAV ztMGqQx7|nBehX8VS@Ra}+dfj5>L&-NpRy3a>y7=IGS(KClgG86CVAt;v-#@&rei1D zI@&O@x1z7-;`tY!wX3wYbUF;CwO>;{)k)$Td&EteLqKr)TQN^AbAHzRbW^eO`#@;wHfK2;L%3k6;|W8h3qBm z&WMD`)?D#7rL7W=*%I`aO+f6=`l5qz!s9}B8QIzCVuF&YHsq2w^iH%!PR4cmbBji? zM(=c~c#u=5oRO#q(GafuVDSVKg{hy;Qt@N+qr=>%=W$xQk67GGrbh)y{rZuUuz>&& YdTThf7=`82h!}GyroTO$29x9*Gy2x|UhyqtYTi=dw+BQxB?VQ7$x z9ZUYhn*Cp^XUR$*@Y4vGuEP;Ov$N~AhfBmVRbcO{ddj3I*tn$Q{wC=>zIa4jtE`GF zoFdB%M50@RRh{@IV}R+F5xJ_LaA9os@AihQfkpU+;@E$$Uf=gfdbybxNhD;AOgkMs znc^aCyI+Q!Lhoe&G$L%T!abg|?dANS;qm99h0d1pdg~KYH?ztmydcGhye|7jfTcOY z@GK9+@pg8Qr09lZmC`dED88|xI7aUz#RS#1T>5%<)RfbRnLH=KdohIlsd0(jH>On2 z_K3OG`W=6>z7K@#q4p<4p!LLCf`H)}q0lj+TxrgbR9~95D0V#P+X&Gu{W!?)8)AIsFug zB~!x!+dAC3pmOFIzovEi`KX@(j7_}Z1WOsGb2+n=*3_2nN(54=2a*L^lc{#oV5RQE zhD@B(j$66zhC+o+OgEhr<$Rpyh^|Oa%$yL9NX#AHxj$*R1go7cS80?qNo=e(c?EH$ zu-SiWWo$GLy5eLN4nu?NTjL>80{BaWw_SX|w4Gtwmp~k%0(M zPTp#v>ZMlP__E{3q|x?WvvWhX>H8C2_gT+|hxVWa9)X5WG)T&4+F0`h;Sz^+?V!C2 z9t}9?hoswbj?P1GF?>*#f9O;@A}jG z^Vqj;;cW4lp8h$A)A6-Cu=Y)zYXL>yJ_J5*n5wld5HZ)Lcdi z?*O-OT{)$fOO7qo$oN~ZpaBSWii9$Ig@t7_7yfC8#=pJAU~D=Qw8&}ilveVO>K6sE Y7?8IeV9LpuF(4U1Hi*_v)&IrNT$QiT z>9HS63`^PAZ$lt`1ZC_S?^l|Y;Yb`|diMU_M!XfVYU^e8jJge135+Ztu4XJa zE@Syx@&1ULDt04E=^WVGYQPLfgh6=H*#`PqAk-@l*wgrzD^MFqCm*2m{F2MIPN#Y* z36rIWp`kk%U<0#k5Vr#$l)8Y~H1TJaR1N7FW=r{QM@Bul}{t-ge1Xe241m=U^y5+DW!y3#3|O+g`F7mNIBdOXPo z7&>F2FS;Ms`TW>i(=;Fsos--`9ZazalhjWo-Ji;~sJOy7*TfD+h<{3g1lMRFGhHWE zcN|hgtf)YenzvIZ*e6X^JsL;M3L#RtB>@t)XCT3t&PwBC#sgh3h_q-=SMlWF3bhv7 zZ<++wC2A@A8XmI=6;%Tu^1@;J4r85Z9~DzfQoOM99r4hER}$lPc0BYL$Y?|6lYSUL zAgn{ekf|W*)=M?Rf8ho1tBs0A1KP{w8Cf*RU=4p9yOZ7+L18xMv?AHN%^`z(8;5BdWFM zl?$C*3A^km_q3G3|579pS~GnY=0x*6-~{Ha58$(DlH@kdAD%I)Y4#@K8J%*J z8zWP84jk~Mv!fg~1Ak34(|qd-C0w1TU{cgpUbkC*r&g;rn?Yq!IIAtEbx*Z<`oc+G z%g9Y_zc<^xUQ3HZQK>P{`ZDa3A(LlSXgPO)o}=xOJ`;$!wsRxkxAckwBas*_>6}v) z1FHl?hoH&rSE+t5yTSKy@amLY(YbTds0h1vfSSF7AlMozeSfzD@^$-9e3p0LH3%zN zo5aw|QHFLHkWZURw+qBWv!=Jl_edyJPp{JDG)EiN>zc=6T z=E`pLFvD(j{9PDp!##aN%<-M~Q=bIm0k9{QVtHVr`>tP7H?&+be6Xe35VB-5Jxt?k zKP_E3t6f~}}C)@sF1l(oNJ7$JD^iYv$f0Sq~`sCw2xu-oMlf1bwfB7puA z;XmR7<)m(Wa6CwYqf6=MgPwoMBeL4%7!U0?CW2s4R~(vfBp`LKXh~H(8z6mE$9l9JNM1SrZIhJ@4(HsNj;Dt~7WFj*) zASKfO$w|R|sBf7#z{8rFJmKs%1BpNds$BLI54!ITlyKPM*B|OyR?QK2&j+*5$jWt$ zzsSj(q5D(O>vmklmC-5&ICHCBoyJD=;JQ$f2;nEoC$K1u5of1On>EH2PCEmZEekS# zt^mJStba4-kX-!6|I^<{0GHIT5*!T1?@qm!`t{(ygCsC&WtLU^-Ug$g>;lv#c|+%J ztbn$EmjMB~h55uMOKfHTyy(c0w>sAm4@c!#N zu@;SQHl4p!n&f3X)m-FSDBX43PvH#S$DsEIh9PdMB6iP0x^JHTpy=iuTJ0Oj`&CTu9Y z6dP|V)yJvn3+@H40m}pwBPL!|7okIyi5r~TBF(+glc0QSsjfyy~4t7PKaS>H9@FDl7u9Ml>g97sKoZI8EMk{-ni?p|45Q z%L~ak9PxGDYsT;2jgO>WCf}yr1KJ$q2JOvGsXHigN`Bk^jw+(&4-?)3?#36BCA-+j z?BS6_M-8S{P}c)}ij}`6y_^GQ=Y9#J3gneIQGPHHu}M5WpVU6I2^+M8Yk!D@>t@az z$P}VTmaR7kt9dY;EY$Y%Ey6zOf95KD!PTJs5M9>2o|p~ESqS|*oPHh(lUv%86ansy@2S0;|_Wnp%pP- z;23Rv@{gOZ4J$br{%P|L(KjHH7xAn-J<2Ujl7S-sT?ge@-YGO@owW+lwF?uCmQX1m zEw%-9#0u)R84G=DrlqD3P8wk0tBtc!d~=rxe{A^ZEg(5FtowC33O$g6_koh8Wb|>* zst~h*q_%TkVhOg7hzlT0x~gEG95NpDo|i##E)j0Xzug8dMd1TCy{WyaE3Hx>C0#2> zOJXnuCb;XMIAhrRH6B4^8JVBOKfi52|A?j@AhphH6CJAFJ;P^I+1KIf7?!@U>1XD< z3Eul4FOJNcayqkY5Vr#$?a&qzkUY2;$eMLE$*et!&Jl>z$%0#2%cA0)0ML9ylVJrS zf6+H-i&2HP2#76wYNdJuF{c$`QYAg8A*AO`BGU^YM<5xx7WTX3h^>H^fg#p%L?Yfp7Dak1bMB<2fSd@8c>+Wrf9a{v zV3M=7KV`g#kbrCBdftaP5ila$(e?8vB?>sb1|ZH!M8Luz1HWZuaP;}2ME6VaWL0ea z@)ul)!I140H7m0T6;%TuMAWJg<4zR~caN1{95PuBRxf;9{J$&;!a?>K#}hV+lYSUL zAS+1ylFH9v#N~E81^Ns!IRz?L1Ak}@Tm@b-UpfElC$e=`}&C#K|m(D5d@R5LZG z+V(?3+chfnHT*@gGOp@M@N${vsGwl{8V^5XM!+OEP|S>p@9MGbu{&ms#P&iSBiC0v zN&YC>JDb~&KK3T!8J%*J z8zWP84jk~Mv!fg~1AiUIIyPvsUGoNTvNmz4RLaF|*H@-HYUuA3frj8AlK_rhgLAhg za&0vC_;Kd2I;Yh2O-0!UE`E{|T$qL{BKnNngU{h={~SUe=x~#d4?2+P87#l~1I}@# za)Zz8L`u*97zOFPKmNbUH}f2U%E9qFBv@ws_rn>nrh}VnynjRwnOk4GmeTL}Vja4~ ze=_jBmfDX*+&d&bi5VK*Z9po{nGx#9q%w(FiLqKn8te{@mP(rVWpJN`^9nkF{N3W} z7!{-n?mX^80tpue4SldA6HP8)sIPK-?12ny-EU8mSy3=s5^z_Q<4acl>ZfFM7`E!f zwjgDMWg~|*`+qa%!3aZQ7L$qj; z zqcjn`yG>(n>)rS8{R>5MnT&U)lOj>Whtw!XX1tX@Tz`o&UG94sE?R{?K=b}bI(IFJ z?a?L{g9JpAtY$blHb+As@!Ko_o={%@lUn*Er5HoEO=5k3RG3BX80kX8;NE}JpZHB3 z7PDy)VZgd0|9226SRnM31+3)sE>LcoVRY3D4whPM2T8{6AUN;T@!>#fj-MQ<{dfx) zx1Tzs$baxS{?G|Z&-Yuudxq5)_>9g=gD$4cXTm9(7}keH0X04`U;&H9c%k!PN1kW5 zLBGgHbb+9Y6RjnJgp((*tzcKMm1L5*AB9(;?4-Qjfn5o6^V`j%PW zc)FkbNTV~_z*5wD*|R(Hx@Lptub;C4`;9R~w||4?yBVevIh}3X#IdvbDKT3JkZ`SI zwMf{&ebCr)L_9imlxR_p*S}`l!{V@K_*q}rpDkXo%F|=;L=4YImxZ1o;c_Qc-4a^q zsKK1?PqL%L*#sJzsC4vcp}PMN7_gA+5|S5}m^t;43=O}Z^TG|UlMB|x3JrM{uMLL3 z=zkN4Y)8l~T@8*E-b>FE^mG%gdF~{at#6ZHO*jlzfEg;aFuI||KwOv*G@~ehP#<$s zcj>yK;4aFy&r%oT#kip^INXys7kF(vw6N{6+Npe@_?1EwO1r-+ibI_@T`SfilMa!} zk$~vaMw&zg-L9sLghWEEl5e`mnkbMn1b@;cj;chx;d-6O&0k5HVXZqewyV*3u z8c68SF|Td=(9rh6Q11wZzlA?Q+hD4TfG5rgSa1*tvpu zeARcn5bb2(f|c@zb2iNu0WJSlD}NUzHbg3e7CYt(Ry%=WK(adL9^O|`jlhhgR!qd} z*Yb_&HGDR|6b#xdhNT;g=WD@$Oj4=hjT>*Iw#2ck!}N2P&%1YtRgJO)ZGm681QVAz zUan5=%ulqnUOFA{CD)w>3ner&az@(hH+d?MD~#Y=l}L%liPx04SW2#cfPWHLTp8Q| z(sJ6oylTM03V)=&>u3U#O$N8K zegWqVe;3-Ov5A!)G!w=AO7z2y$*<8Xr^GyAosIx*6D~&BUmy%eIDZ=lCq41Uh`Lm* zVkUS=`s|HX6b*S5cT+{SIzS+L$H)Ku#hgAYs~zk1pz`|!cMB_s{^OFpV^qZ)vB^sy zLLZ_pndCt`qt$`j3jbT|1AmFlwpN+o9VAozf2C1VY#>ID00|cUUYk=23jRWI(b?9ZA6NZTBjivs zi1FMntlCQ7u?`?xH|y|cc@tgE*CxrwP`WD7>d$L+okx~+a!i|lQ0M6&AOaVOXNxz( zAms)~udEDt-m(MG}-_r$IJK4pLtdi z=quwoHew(Yu}a%=b-Thtd3`4USSfyPiBAuW;;{K@5yVTfaJd5@c=a)y+H%Yr+B^@( z$`5*UdCsf)U%`AlinCXVbQL; zDel>@FR!4(t=q34ABio|d?g1`qLJO~^)VWSQ>ao{LwJL;pB0^Lk2LjC(%`+S$dGbV zV(Gr)mlx8|FaolVuVkOOHacARjc8c#^4oLNJv2LW#~Fwp(VzygB_c0(-2_ z2M3~K<*z!OW)zSa?@x;Ly2W*`yzs%W@ar5SC;fajFBD2v!zmSwlRtc zENCeN_9o&PopO{LBU5$`9Pp*HqZ~E^f7lMRt0D6^r$oY=23H`8F;OIOqYAyeYQVw@ zf26+aXr+u)@_2<(u+z`*!r8;^`tuJCf4A0wr$#Po`1QC|w@V7{3deS&FtD|ivXd958p2dj z)K7)u8F--Ac?8#C@v-J(*JzJYP*TH)!S5Xd`w%t0U24rg?<^C%o&3^-S*=ytGZ%!YHFvwsmCf5T`VL`C8| zFQo4|$XFsgx@xZT!n0^L2{DxHvwYK<>`s~ub`$c2`W3IjcVX`msq#hwv6|sr>1yF= zeWdGB6j3p>u;L`awTAF4T`}QcZ%6?O24c@MMltqY?N4fo6q~aGXskOsAs;K;(S$w5 z;(Er>nI~7t0+D-m1nK)Mf6gu+E*MFwNbez-dh!-I_KGWf*r@p(pymO?J&AzI5!Q$# z0FeYW{8bqj-Iw^R)>+0Nh zk>H7czZbKdroFJoAN{&L;YEn7AKe)*v1&-WpLg@PVra4aI&888yPorsknT=2cOrJG z33MDbtWGi30a(|tf6YV|1F3HJz)G;xK5HY2I$sRXg@+-jb!CsV!Zx$7|3)50L~yOe z*@_9zOB$`CBMXyf&+LMtfj`jN&rW%8y{TKp5iObs4r3^e7(d7|0V|97xRYloG45XH z@DNE|!DV)?EM0e(*A>~&>LQHGSQ5Gw;fs=|S2z<|&W%>of8;(Zz}Geto^C(AZ=V`1 z3N0b>FfV{%wt3qmE+jYm+X;*Df@Lb~nOEMg289;28e5le4+*7Q5w_8`V5J8?2UQT2^$vdN=w~iV_0`-RvQCnUKR%l zDqP>?2`|oAe>Z!L>PZ7VRSfue(D|MhGOmau^CUjrE&kO)grg2GC1!3qkcW#;@hkuu zt7g$1f21VzAA*~T7OP!$z@x#U-N+cjh}?TxZ~Cw;gIkY?;LFXNMvMS|gY9?o{%~go9;kEL-|EJ>qV%QSf92g_ixdIXm(@NJcMitK4|f@I z6yH_eRM(58ruHm#DuYr)G56Ww4JSgGUA|rEfsv)bEyl+K6$_t8OIuKt@fbX|b8ST= zh_w+gEBWxcu)Vl}tIIQoAjDhmR|s5xRud=4c_Cf?dMeZcWhQO_2U1r#g?BvQW#qBQ zIV=^fe}~%a;uCvEZc-0000000000000010000000002lTIZkpX6=5 delta 2313 zcmV+k3HJ8&O7%*RrXjJO!gY2Vet0Z8Bv?G6xO{J0c*!%qmHe1R^%B&jF-ZcGO$N8K zegWqVf4-Bk&IOjQ2UkRF(~vC{(mD1qyssGZcTOK?h zxl;#U`GxW?_;Kz|@xBZaLz7*|(8^D|_7hese`$IDIUvSYEcL|;tlHo_8fn=dGaROx z)d@`-?2tnb!%BDatu`QQAT+Sr{BLNePi8ff^&ms-jkvJABhQmCmD(8trk@!geGMhY z);G^ea068CB{o`eFc!4fA*C0BImHp=6caciAjH9Kjl0tpyZ_Ti&(RaZ9E`ksCch65 z3D02uPRJjzcLK9)5Vr$=m3WLCqr>&H5>*VGVRlfZmf=0Jv{Timvm}=G#?}ZLARHyW zu2?n4XXc$uY%mNA?9WKG%S0L~hweYy(5pHMULeKHeG9;Wq`dF?^1L%|)Rv&e^DV0k z+fD<}vHa%sVqhR7$qsN4ANfm34Vrqg1J7;{=UCd=H2-~qnS#52d<&ONAWcl^8zL(S z9LaG112IA5K53OSnL1!@yH2ZE09~*d6d-XuG!`&EF0;`__<*qaJkr#B6@Y^+H%fl0 zQ$ONm+94p)zs7z4PS#7uZ|@W#-pf6UTw5i8X0|h0!Y!Y)oHYd?tw2;pSQ{piU5yb5 zF?#1m{C=RoL8KeS#x?Djj z|IG>yBE@wR*L0crC+k6ojK1^GI|=9_jpU2_{9JJe)}vB-_;8ezWfY=TdD%+9=@l!v zC$1YK<_|pTn?W$N9xNxNCeK9icfX2G@}Xg6j7MlfVB^$Jkjn@s5uGvRKS4^ifsOtd z;p|EB_9o&PopO{LBU5$`9Pp*HqZ~E^f3cpzb#@zmcq}?3SUjS*d~aKL$uqu{{Fp`c z64a$JNu%J>X&b$*6|9@>(})!nda;kJ{8w62oGPmL#9uW*OW_rdzBW1=#REUwu2}-{ zy{nj26nw4t(-Si?G^KR!oNZu@E&iNtQ!>oU(WKRBq+b3YwTAt&_VS0vfh&W(e`XZS z^+FFG<3xt2w9Ef;IZI~k0I9s-QN+V{R7sIcW8G8p_kq| zAtewT6r1D${4u^*GABk1X}Rj)v@2X4C)WnEFTPF&2-vLwLe^ViK|ree&t)_VJsGh? z0BiVOb3(hC{dqW3NqpiulR*%V~_)f7Ju3V>r?+ z4gKYGBQ4DoPL$&N1jN+pT#b^fHZ1_x>}l-@wt=&KS*)LqZwJttenpPoD|ry*sZUunkb|I3 zl%?vU8yS76mbz&MwS#%*Xf-<8wN?BM;}SAEh|MJ|u5Mc4kR)^ie;tSopjiWV^dO<9 zub9^k)?FY5o$SXIKDoX2?)6L)Nc1maLPg#q)kLoAp>jyuxREW!dZ&npp1A=qh`$J( z2U$RKrlXPix<-b=VxHuXiGf&=7J}?{B;`xNvfZDv1sH{E7Coe@I3@@l+$NPzD2*)} z_brifqibE>0J!_9f3(df%OXY)*fmX~#siimuOui21KEAED(C0jJc^0J_3>pF2*%;jFTP-8@%DdB+W&f2rcfyk8cE1>vg5DxGN zK9+mUZCE$$e{C7y>+4X~zZO!%9Q$!e^AL83Tl8V|Ho(kv|JM~M^J1RNZl#ILB=~Or zw-2Zf#Z8;hHuaZab%#Yyy!zw?H|O*^8ogHy0#t3SE^|51+YI1#9chciDBEpl&Q+oc za%CK&2Oq2b3yE5X+b$S@vP7ffRdPwp3woyfUcrFy~IBR$q5M5XF>1OV|8wzEE@v jsM^Mm#-|1X0000000000000010000000002lTIZk0cBOE diff --git a/test_fixtures/masp_proofs/B9D0EC03A64BC8F9536A05F587B35316EE670A242606A81AF0139B3F21CDEDD8.bin b/test_fixtures/masp_proofs/DA50E59A47A7BE9BC8BFF03D9E755E2583731052033322E25250C780EE322BF4.bin similarity index 67% rename from test_fixtures/masp_proofs/B9D0EC03A64BC8F9536A05F587B35316EE670A242606A81AF0139B3F21CDEDD8.bin rename to test_fixtures/masp_proofs/DA50E59A47A7BE9BC8BFF03D9E755E2583731052033322E25250C780EE322BF4.bin index 6519f4fbc39ff576434e536792ebe0b52d446415..a8a8691cd4e3d5a3d412e6ac82e1b884fdb149c4 100644 GIT binary patch delta 3970 zcmV-|4}I{gk^!ue0gyT&v*8JjBcxGhbdqpwSaGf6a-mE*M){jiQ-)!{ZAP^e1C!we zx3dxfZ4-Y$t>1r#CuU`jOVkuqd{Mw~A|aXbhZ+U?7Bl9xgbhj{I*(u3|G={L5y+lS zxr~YBB_D_EXL0`TJ|4nGswXcQAhyDi*iY{)y?SRuE<9I8R}h@uma!F%7Rcij;h-Ny zAs`^37)qABdaRq@^6b1tK3Vgu$3N7FpE_a$LMnfwck)Ic->JHXpt95O&Q?_6+}Fa{ zWby9(cl+f=29m{$lX=@o;=^205Fop; z(xcb8)RKu)@a-@z@d(6GT*LtHTd9TQa|g!ID^4JU;&k64__QkTKKtjU#jN@+?50bm z7t6DK6~F@^lKv%d(hj|pC&bE<2hY=tfQ4E^G>@anlkEc!pl!@?lU*4ie{8(R1sZBv zu?KYgIEz}?-VmUTiePR8V{k!eSRSHGTn}b^Kp&2%T!G0000000006lc6Oflkg%d zvz!{q5Fpls9vfmpF+wY=6fphb_a2Q?&N1VHUvi1PRKbLhxj~bKD?lK>04UEK-Pe+I9WEEreoMyZ|<=5M9Hbw6B%HK~5`` zG~r93`i zp`@93=jQUhy-c6tb1657B_J9IEP_t$8p>i{EjQ|ga5B@~%i-Mq?U_bhFuN?t8(i{# zeLUh_l-mTn*b2_mvmP^*0)O}JEo_&MS!|zjINoS>{8*(YL`01@p(Gv! zAm+%PnR$NNd!ADHwZFGS1@$&{QQbOlKM_gvd}h7zT6;~z)p`>5aI-Qtl>&e92z-$B ziaHqBb~N1U^bLi81=7TUpA99wnFG|#QY<@qSk}dv*ov0 zDUurFprT=GN$7PUvCZP$r&O~>Ih6u`Bc{i#D-xO%p|D)Q2XUKEC*+b&KG0x6YAIm9 z)Os^BhNM%SjTJH(;D|TTfhvv#_k#O-!lR6D>mKJ^PlhjPl;e&Jp@gOLg%_5A(}H*}p<_>+fEjSRbHaFCevLRQ=0-!7HXLN9kR zX)#y!Mjz-Yvj7f7)8``J9If8`{Ullu8gtQ45&)4AAb+#r363MAQD=0LaBWy|t>bc` zOgcvSn^04RVZd!hwG_1$)l;4R%r&N=YD9ngWiBxBoYK|hTsi4K%xNOs={yp#Ug!L& zo94q<`S7S@o#Bh_qXjpsWfNpwS(Gu2^@}#eNapYx9vu9D1oOikZ04!pzX=_C+->r4 z2WQCwqkjevB9U}({4o-Ta5n;ttcOe1{z2_I4S)bzQS#GpjUhWz)&HL`(&sl?2;{ye zd5O!C{<`=F8b&=jb}8u~1=u%CKXLCWupi;P{G&dSbx_@`stfz^j|#c!JX4I$hcRk% zm4Wn?m2s+~f0kA3(M~5B>@)|V4!oUtG4yTwH*usaSwv}8+PuKf*vnc^8Aof+4RVZ% z2c;gtlaYj{g1W9NOV%_%i4oxGV>fqgk?(-gETypm5sRGVF)tmomj?GocSMY_=ww#L ze1D$(h4^DYRNL& z-CDyG$td{?lhIlh+lny45Kv4?#Qev5y7e35gBKjAW}+s5i$l-(@LObUs+!rI2L=7g@1*M$IJ+_jRHt3P#8EZ#Qq}ec zvAxSTPR$}nhZDO%52fGMAEhy5g9LH{g@Fwzxl2wVe=c>))4_@)zGmV;ZAEOs>je9ntuT( zir6AVCvS9a^6NI(|I^fDePQPE?*Tk>f}${r(jn@`W$*zx#fwAsy5nX58!^;MNWZ1? zEQ+ZT_Yyi#F6gHF-{LxdV8kY5%m-gPEGIslD#XSL&d`x&9FlRmuCs`CP+GSV8t?*gnirUH&cz>Nr2Y=C)5=8)rES2?yX)2F|iw=|=6k;O)6u-BLMG#3k zdU14-cPPjJmT`|-5itEPA|AkJlU=2T4OC#7+C)3xA~#`;Ea!4Vk17LnKSt!>T#0s5 zkZU)%j2K$W!3mVXE^-3Qcx3)R1U>4W!BDGw5n(fUuBUa`(v1?-k2t#csej)q>Br9_ zeE>f^T_>YYB$#6AV6@@K@bkEBu&$~KYL%HKK;(xA#h zTc_59#TEr8cOko>ps*kiPL>hx64p)-B;iDFuE6HmDqJ+O-p75o?y27xpQs)MajH0x z`i0L2bVO@wXtL%qI(EJ zQNj=F_fIqCcr|g8hAc7M25?k%3|Z2duOd-IL%!5s&Re!4{8U3o8h=Ze4reo>r3Fqk zga!*jUUqn>LNJ6qMuoRUg*%j=LdQurqShP!u(kRc)bb%M>rJG#Fra zcN47jU4ar_SAP|^-r>Km^RiVU*_AzWe6I3^77gjAkP5 z7;}KvDVp2#x+0lXmiO0JjuD2&k*SytZmXfKvuyT+WI-=eQn;Ql+Q=b_U<^d~aAl=MRf@=rsJ%?J@5p`tfCUwrR zB^2Zk2}?3LAx~B*`x^+qtZ#r)D8MG_ANG>qI%)Fw2D=2^z|OIEDN3N#`o-LR^dlBs z*ZzWQICpj~{R;)qo)?%fcK>`0*1n4^7Yec)gMVW_T1U@z03Kr?Y()s%@{V@3@Eo~n zk98UqzeW){Lot??p_QzD2bh*)QYS#y4!L2>%NQaSu90&WOT4!p`vj=lgmcW`vxU@F z8wI5zo>~0SnfLxcpwH}IwDdem-`O&Aqg^4Ht1c^VEIPkwKBuN!Zoj6Lw;UK zNqWWTB^&*X5v`GTp-Gi+Ew%+t60W+e0Q!Yj6%&59(}fp#b`*jr`bRB;DKqc`7v#pY z7>$$8f;n$;6)cM;kcajF>k3n6TkuUbnSbp0n^QGWmj9?({?qUHIWeZv79g;#quuVr zM4ybGf#YMKNa#@POC1c>+LMs-ROASo(ObuCyY4ukfMk7(xaMpd-lBr=-Bw=k;h{}P cLmEQJMW|o|0{{R300000000000h7&0U*wjK4*&oF delta 3970 zcmV-|4}I{gk^!ue0gyT&<8hkG%YN$K2#mQTc6e=E8`wE@V5^otbWauYN9m^xQ>S=`?-MPo?0H;PE@CC%86Tmqbe4-iS z0f42tbXS~(_CmkA!0-s-NU)?yAWARB#1shbTy#jv+Tzk~EC1d8E&8+fWOz6L^-=i+ zNg&Z%swzBz-DGux>K@3SkHc(lP;ON z>G^}2B8_Ci6zA~KXlNZ-AYS!chGYhjybM&)vo+Y;zAZt&c}f!qXM;U0#tr}_A|PJ} z!ofPDjlS7ZHJ(w~g*5S+oB$^KydmyjXcEG^i$WlT;&k64__QkTKKtjU#jN@+?50bm z7t6DK6~F@^)b^j%MQGXu@Py%|`jkg15mq3EMKa*}VZ5q>u#+E6lU*4ie-fuN`oD4_ zZ#!^z;#-06s3i}5YDSYr+?tfs15oWoaUffy@tJ~P|Pg!3%U;$5PG!=G7W$^r7DSUqGdd2c0000000006lc6Oflkg%d zvz!{q5Fj`QQ@A6Zs3M;&t<%|6J1?T}a~9sNkH;cjnuTSQbmfzUD?lI_Rc4GZh`|xx zw4hSA+bXU=?r+I4tWfC=7jZl0w(;S~Yqu!E{5AMvuYEw0+ov<~2N7 zW(5$qqFHOOjl5+)fl;3AbeVB6fKHFsI|kU4IF)VD=H6pA0>D{|Y8X}vu6E^3(YLeq zMCvb_`fIc2ETjT|*NH)6ppXrr7W;ZQG$q8)c`cwMLdo*9^zHo)BfTz15REV&^|;yZ zbR+vgNZxz3Xs=X@dF-^GX!#gBk`j$pQ#9E~bFvXsv>{`Zf!COWfW!BDZz!o|j}*OGdpy=mb}^3NGuqijd2bJd9l zv9NL83fo4%L3rKA9JuA@iKQJF;UcJd2^A*!+IihufNb+=$B`@-tAYS2R|RfzkzDeB zeLUh_l-mTn*b2_mvmP^*0)P3s6H^g6dPzzmcs)V#PZFL~uMxmnQ}noEmr~J+0o8=T z0K%V)?=8GaZ;4Ydt6@s_)U>VPrv^xUbgVNvH}8)PPU5aI-Qtl>&d~zr9oU zp)s2B0h`(60JBTCaV1V3Ld!pSS-&R&8DaKkH7YJ1R1 zEw(Y^prT=GN$7PUvCZP$r&O~>Ih6u`4LW&4O$sDHk~Q#8dCJLJIRi=MP$|T*sqg9rW{ytOWg2sIQi1l$cTk* z(51PHCTSM6y`*MjIhIGaa=g8ny;%B=fN;{01_MBK+BM)vC0mr;{z2v+1=J^F1g_LGIayI!uZ zN)2%mSd-|Bc00swTfV$63fQ*9xg>UYZCo4J zIdx#GmQjl*>IJ#qXF{vkNW`%W80k5E*>MGsr7&@musiD+Qv!4(21s%@fhY%02~FMJ za_U(=mWZ`=2zQUZo}<9NtgvVMYHe9l1Vp6*SJi1DV2^JmF>)0p+7D^tmdFba3)w_s zML`5HM1T1KCRt1t>I}-$7s~8pcnG8Wor9f-eQ`>=0jp@lgn;}`#|Rer2T=|p5^>&d zsG5wkW?xCqfNbhl;fSxxUUmZ6Ts${ozG43FrcOX)ak78kD zKxT-HvfljNCBTY7IynD26ePk;=utnqF-;$2e1Gi|wk|7%PW;nhTpCkm``1`GsZu3r zz}Y6K{RK=!d1B8DxKlE$Nfo(3Bwcxa5knQosr|R93bUZ>u(o|Z4QJ6=43+U3^ge|y zJ|KY4rxBQs;7f7FMEml&>D%uLwow2#`_AA=xj>D|pdmno115y4u}kg++(~7U9{O!$ zynjP$^L(BO9Eas#?Molmjd84N(mh7oQ;pO1KRDv`bgK3BAhPDA%76+o9RNf4Qccex z9ch_(kN)X1&e@~$LYegcRNW^)7+Bnj#5}JEZ;=hOu81bkAH)vnsRp?|9zI=G+oaw> zEnf5%f}QvBFwmsbX2sfUgK|xFjg71eZ+{Vi*7^iK=-O9^20Yi-V|v*J7z(7y+M++q zGcTwA%i&t)(WESCTrF;@I+c%|ARKS+!UvU2WqXDS{CRx;s1c01h*{H+pTeVtRA{3V zizks4(9s*q89hcZ9v^BA&AP0-`pxcbXu*oKD4-O*LoDyH`1{{9S`F?6Zlv;vhJTZ+ z_UX+=wl7ZqiuKzcHY#83vu(yC8%@g3awyil8ULX97_SQc##y&hF;`rYF7w$;C$m;v znnD)fQ5)q9=GJY&$lw|X!YcsE4xP@1c@aGO&&;JOK26q3(;-ICZf6{xg%A$(DcOh= z-g8}!E!rl)F8j(c!55pQ(D%ze9Df1?%bPQT#_7M~e!O%f=G}Ge?&7#(GzDw9dilnk zyhK2bc$#DhtgCii%?53<@qf;Sm@V6U zrVG*EtDE^Cqx5L>crU=$uDu_~cJ=$atlV5#gb?4?V62E5CGC0x%n>9v5ptoUIJl!- zD5SJf?~6ak88_5Lt|*c56KhfR;;hT`;L;OlH2vVuzHKhb24P%SwDgf`j-*8y+tl94 zEO|18YO#dCGdOr_M$uN@26PZ9e}yHlJy3`CkaXH)zE84Sof z*4mV5Ke+~wVhZy`nSTw((v)n7IdMpG3DbZhgI_`jFbUCNEc^%K&wrT=?wO0)UW2pg zkW1iRkFhAcSa2*%zLqZY#fP8?h`WF|hj*uzY_ak+2_gk{FrJ?AhI(&0DHR9l!?FFS zS$0KfYV$7Zp24N`+}tIXrTkY(KCI`(12ek8@os}{b9b%JEq@W^#Y6uW51PC#`v>Ph zE=R(4jsOlz7x!x?agd1T=@n`({!Hlz$aWHWeGKnQvkrEW%%TaeYmp$4#`H3F1m`5X zc1R@P7hu-=0};_R1c{bG8gl#?3y-33e^{$5H1UFEj~QiIjWs!* z9Nm(^mxMG!cz?uJ#uv=~Eu+oK4UrZbRn}3hnZp`31Sa4XKErfn%j_i7T`$BfT5~xX zuDh$5Q?FzWqk+*Ut=RD2TWbnjSJQ??)>{UNebLElG3>w0L0P}sqF+7SF{`$qj?sk) z{^3B!Vm?T(+my6sH@j1Z&r2}1M4Q%^Mf!>F==pP;w0{wxXSvtLh@Sn%KIjexXU@A+ zD#J-uw-zI_DPQ-F+$C1o7Y~o=&KSLjp zwY@~Xd=fFWL`&D^I>}zDC5@y%RZK5X*I^WoFk+X$3pp#GY5}4@@yy{T(~#;G_=o$d zL90LKe`gkbicXrfcs*wrsbkjYd-Mhzk)Wn#v`C zo+owplLsF@M&u3KfqKFW@|5q@DZ3j(9jBGzlkh-Ek_|nbYl(2>B?F5y^%LgxCA>S1 z1Yd$sM{~? z0JeWMxWr*t8MSQMo9U+L+s;jH#RCS=nnnMO1|_&EFaqs*#ZAmQxqM`-(6~qLS~n9r z_gYNDi|*Y@AuhA2Tz)g$&HlV>d&Jg;D}Pnj4t{Clf#WBTbIuTNS{TqqydVd1X2)2Q z!)K-KnEJ++^NQ1d5wg7d+=B=YBGpetm^|RO;K!qKrK!9uD=96Ut}3{SyIaArqP_Hi z6r>ptxN0p8i)(*=g*NWt)Ql7P7E|r*c@l;0sid;K)zNBJLR|hisjb;biLdE~^?!W5 zwPOdNp{7g|a^WaTZ6_(S$Q;}!-8n*XfPm7?=&m2#JsRomE(ZdFNQX4iGDKvAA|Z9& z&-n+=-MRl>JvYzrC(!UGP+ApJCuXX7IPB}avXpA&ZdkPcR-e~=Cl((!bz04N=udqp zKEi{&0RcQAY!3yw3juG9wN^LxITJf`ssO)xQkWK~_+~}@&rO(w@V4(a00d$2V<2f_ z5-hl$Hn^?2U^v%*rt_nY)+D#VwSXq*4vP%ZpzCv#aa0i&Gapm zZ#I~kIpIr&kQr79;vle=g5h-%rs`_ZU$dU*JB-B!T(V0z1ehBLHiV!+iUH*8%7<=oKer)B7zdGgU~w)1=oNtPK`&n`K+NDta9IVQ7q}BqZj6s95V; zVig#oHG2P;Oyg3r-YT(snRsG2-M(8$cCwVy;|yO0J`&PERPG7*_lDo+yje0@_f#dT ze=$W}Q40Nl@1<$6y;6|EY0&;xq#OKy=SWG*f)6Pt>KNiCP6m_w%94`^rUtyNjh%h9 zEfz{4ERY@%jTes=jJ@m>oiw{-YO`&m9+@NcGg+x4oxwwAB#w~KH6m}V!T4k+Q*0hX zDT)g}l}yTeedl#SyZBR}s;STo^1LmEc=e64Cxg&yvvCicBSkWUx-UPE;p7YrQUKiNwinz z=y73s`xAU(3|J}bsDT(_xpH%?8#`A`+2T`dxg1lLH=#e#Pw&SqmE?6jdK>vj0LlC7 zR6&2HLF-Bp+W*IAO0w3^3PuqrqI-Da$hK|NXgY+T7UgKamxTYnL_1#Vs5nk{e`jJ} z_s%Q`n;lJm2g{Y7*u@4CM1kJm0#-=4KxH!4-(F8>FL6}awM8_Q8wIyfz_u8yQ<37- zKExbCm+5bqsXn(7WZ9>@vrF8lP^V}8;lgU8v3=F;-DvwD{gW!i4vXftv}~99?0=&n<=|k*@u0r#J9)?NK8vZ) zDX}28+NTZt5m^{)Exz}-rt}TQ2svyUVCipmr+WW{^AXmx0!z{fIfI$EO!}#KE~k>c zjia?=@gy&`n@60f1Rh;CFV(%Z|BFhv$bI*0FX`RuD6FJRd!__ar zgBVY@Jed9P7V8gXfuEIE`1dXZwQyrst>Aw`8R&p)aN2d0g}{($WKdX8i zBnhn48ONF7n&RAql~6#tHc8+`!0BQz^W@%at=4 zPsJJkri@_^f>m7(LdA7h55-XAYt7}zz3;tFWvJ!$WyJSq?>ayv=e2*pMnwIG4i;ml zxOpV97j$)Z43No!`ys8bwXA@=`6oKTD#BRAvcWk7eT&__GfQNwV_ zBbo?lY@D`2iSnOs5K0DdUT)n*XZ_}CuYp)~b=)AF% zk!hqaD_k@rqmSrs^pMiD7%<-w0fs4}V?!3uranTgvA+zb)Dxx*@ zeu|*jfx)>9!S)!<3QRyWP@$V5qrBaxllTd0FMdfA+^O=|j8BbV$E1%;dLGfemcwtOFM-=%>H10KU#hl&uB@I| zL~X9LsMXg5uV|u`D|kd)&MBF_DDg@;7&G3s^k0UKrt0+XkLNoEpW1}MVm-DU`}iZC zX*fOI;4olKCp-@9(E>^kD|zE=RY70liaoVjlfg8trFYPQ+czGF6Ixt$pYtM1aUZ2T zwEA4>2_9yoKPw&ZOT*2`|8ft8zt}OUTl+2`SX1u)m?_|SWc-{}*5}PgyLaQF5Tk|A z;Lw0-PjM_BozUjmu2p@qyDm&b|Eu?@Wgm1z3LOF6sl|XNMGH59aecXrQuw3x661-E ziC#D6^e2A{LC*yzX+fX{Xc*l_SJmN=-tL1I-V6U|#;l~j6YV-s-*QE}mt1EJ6pyZi zza06w<+RY7o7J(Yw8Uvz$+d+lD{i1tPDEHqM4$zR4@#P zs5=2WrWYQ|J<^|z3=>)(P=Us35outd(VctpH-P}g#YO%=-ypHy5Vhrm5goH)PIz-Jn#QdY zu(^Bvi#0W>n0Ls}c{Qc0!OAHy7%7^btAwvT7@3FC{chx#kXiRlP-o0082^TPZaGy? z+`W4BN|aBTXfOG_W(GoUY>Jwlh?25Qb1c+mGD>NHqxq?4eGB0C zr^)WY$S%dTkt>c;l&7YQxi@yJJ``5LZozv7CPAq0YVm3?Y3GaP0ecKJ%xhw8W@=dV z34~Mrm}uYI+KW`LxdQq@X-2YzuLt9d=oEh1t$>Eecs$cf@T-8kaUr7c%oHyLr|Q&z zq2J0L=IOoObPi4k@8OykVc$N(`T=Bk9MQjy1A_P#fuR(N!kun~WFlrPq){chf>gf` zonsoda74YC+DG1b8-9M9UI)wJ_8#*m`{y~MT;%Hwud1COZW#o{zt#{QX^;i-c%3Es z1iEfHazE9Og}wJM^!M@4RhwtbbN3&S^c)I$S*g7`zT$QHfFB4|^8HRa4p60_*(aEo zH}WwF6WZOlilMU(ag3jmO9WKPuMPzE(^={YL3Qwc>wOJlu(kVu@;N z4C5}69XRa1c!SLquCP;8f@-L7+h9P{qc(in6p0_07Pro{47^u?T;wRIU=)#8r3 zmHfNw5U+XA{jg2}bkoC`#);?@j=1R$H<6Txq2%&>;|7Iv^2tM7Vw}dp{iE4Z-w1ID zg>XKN`vR!(rVZA`tRl)I)IBFEFB4-X{oxHh$){VWgTYby?~@XMYP241>G=T}klxm% z@uCE%l}Zd|OgkOkkNM7xxnZh~USr+hfNJDC^o4)RBPUgOiF{S|7!%%dJFnd-l!z#- z!vzo8JN~}jUDHgpbm*P=F(zB{{aYJK4aU71(f-&7VSTlXh?h`{be5z273)D&Os9=Z zOxAmUSvPt#0ptVD%e2Pwp_B-gN_gBXZPDE8&0PwA*1R1xT(4_&mk z4OWSG6#nAu*_Kuj(>nCjC`@vb7Bu^OeT!!RaZhs9Xp0}*{xxQw zx{S{KLq0_EV-j|wat68&8^3VQqB%}NpZG9OZx;ZZI5ufAsb^JoELJ^Ce1Dr9lq~rM zWRMnmTK~M;RTpgUBFFcxUI=H^#go4&cPYXXQ*a9UqEX$&KvOh)y*=9t1+QegPS9G`hTachtx{O;E zQwC3}MBczulz&j9+QVOad6___u%{8@DRIejjF-5ghWnD~F%lCDxZZ5PgmLv(etlfS zO?4A!6ow}npX!II*q=8oT}>AB#1ZF_8@e&3aYKr+9H>+nC&8b{9}8C(8p>V91q+#g zkDn(MM4SXRURrsDP`lW03f(F}TV$#~+ zSz4Aly`pfcURc}v1ay9@n2K$)+=nk+bGVN9W!U`XvjHh4A1ydE2B$L04g2}6bf%ym z0h4x*KV#HGmLHQ+e#rPlIPlHzlNnqBGwmCFaLrX%q#ZILLqU>Z=`Za=7F5L}%f73R zbA{N9*n&1Y&B2}4&bHcI{b}-EezE4BP;aC}$x(b~hG^*Ta`Z9IW6I3R!HxG#YIdJq z`pSq2mENX+Zz|GhDpS(C^ws$9iBws=dgPt1^R2zYz@_^Wkz^{O8B-&DcQYb@D}Io= zG1F%Bb_Xv@$$G#3{X0I|_!$q-ybIM=G%?ird)$IE{vA20fP7MCjw<&Mjm)^Ojuz!r zFAllk2rhD-PT=Rx<+mmyf?)mmNj}V?m?3k)V{hj~vd9;R@LaS?w|{U~s(P>Rvsxgx zt$<)CWSrzuLi4%x(e>^0iJ&#mvo;u7&gXH}J!K;tozYA+a%gGt&)~@#e|biKt}lNQ z@nz*SZ&twALC1S7sG)Jusxe1eW^)0d*159%SgD@c;SGvhO)>i9is`4!9#RQ%M>qam zt>vM#63cHkF=jMyi6KQr(^oF69dN~#vFtc9l?QTZ`EZf9%`2tX73p<;{TA9+wxcT$gLKApSU~DQnmven+i@6fsR^&o&tdu$IW6F6S zX1=1C8*Gq!Lnm?gZ|Oy&KjSMAkH)^5RBjr*Q5jisYKGf|PdQfvYnv(|x!@3@QC6fb z#LJC;fu*oRXzH#5yaRBYv^o+oV6QY2p6+Z~E)@u`(gijJ1g};MBPl5k<&~QTb}m-} z@h*Y`n$2wNFZ;!mS+frf&MTXfE~&uP;%)Z==Ebnq;5Vk84xyWiP8J&Sg&zY%=S_%^ zA7?Iw+UozP6gpC#4k>3?!SPZh-P^(6G4}S9{GM?}!XJF1%>gU~liXX}>T`L{f!L|* z#J;D{W3Gad!%9B4=yvkX_8D*Za*Xo}qaH9z1 zIQXA{Q;t0eG_!}XU>&b8V-ILYhc4D)?0-?I zYeBop;f;GVcZ?Kuej&}5Vfc{!2ER>9s&{S2c%-4(UASD{E{b3w}DoHzwD0x9_Fo<9itKg2Yf5< zSSEueo=`h@%WV3t7;4hL1x0DFlX$0Gn}&(^4D;>D`l(%%AJ_ O6XIiFU?7Gq!u|(}l}vsB delta 5374 zcmciD)msw`pn!2ox?5ypNK2O=0#8|fhG!M9^?dy=z5GQ_*VSdWQg)O`8H1Uz(|K=&e1M~ z*58cN=V=rW6!EYc*Na^9>JK^gKfTiWS{COFZkkJ%Yf1i91&f_Ovj@!yctU59L?d?< z4257;SkM!-8SjU(%KX`WUy&sxbs>aWtIVkr|c*g5aZ9o0kwZy~)8P3?dC`cRma*~|} z5)PUSIDu@avYxK=E=+)NB!IN|CNgvPYCCq6ox5T^(I|%YHGR;k8qOo1qUCNXk%F9g zfQOS?Ic^Bl-Ys$Imy0fpA$1lXL}u?bIm{Z!6=(Wwg1v-o<>aaSPbUreua6DdL%vW( zD1$mX{@DaqfBxYd=LQwS1|a@EnTK&4-Dj9|OtD5IXrUlPQrgMy#B?hy?3>jz9~||H zyCr!JZhya1L0waM85~?tfwxZogu$=pf+*rA?2}j~c4)*{;cr`h_t(0}8&3xUT#)qK zUidW+B_tEz<1tOEQCud%Qf5!={8ob^g=;ceO?Md1^qjZjP7eD25{=&KW!S_m{c5EM z!e~>pnOT2lLJ|kF_hkib;SYNd@^%5KN>Y&b6KTKn7|_(Z;`HrjTKRxp&$jYBj&o#P z*sB48L>?~c*3&f|4X^UHkJxV*B{lU{+Pnokl}7`V+%AHdK^~rS;;7kOmpwTjm(zZA zrrpeD$5-NgFBGWEJ~q6VQvq3`OtDMDK#RdxEAmIY9jfCpJ^Efz0sQpe3Zp0x&Q=N_ zg3vfbs#1NM@sv5{GL*aY<&h;0npt$IHRPBd^er5Zg8icG0{rLivTFN9)~x< ziR#p$-k>O&#mv2QiU1ae_X$*!Eb11Uh5q^{s(WrEG6BAEA?6^E(gHszUtt-`2SOSN zZ1?2vXZt**20FvI;)P;q3uPQ2cFa^k!E~5*_o$vQJ>A?Q1fxbyJ2b(lW+Y6M^_k=77#ZJB%MbS(CE=yhZ=891Vcub&DuEC3>aic)3Lr{#Sbc{v zj9)V`n_n9+t)U#c;^R*=p)09^S89MkpLhvF!4iAYIG&IC+9++O4WS6E{Hy-H41EV!UhB#*THl?7S8Bh2 zMNUSxhh~UOhM~l#@Y9}#c<2B{?!x(?iVJ>pc#=DY9KJt{o87b)0I$R_boyl^cu5!u zKBq>1bpvN*S89wnc>RxxOy(3tA%es$*p)ZQ|KQbg%Tn>Wp{|hqzZTv5(4zbT-@E7@ z+t}9Og*;~yA%Ml@FAD_3SVyn6Q=jmu1fH*`;^>#&TIYMZ`j8a}&l$awF@xpy ziO)US%%>w>rxFiFKVfgtgPRu`TE^?G142zYE0epFf_`)e^D#IwpnLr2ilwg?d96IF z_#|3zJ8!vIb;CSd(sPd@?mK1THqtT@pJiD^h#&BHK|_khAKn_vTS4~!T&Iygc{V2jiq-9(LX!t^ri4&|JU)Vaoi@P%N0F( z;jYWEulphM@V;1(bGyK=k-Q_&yK!X`9hIfgu3Qh4audDywX9iG8^x?kOPJQ4sC=$2 zt9M#HnG&mJ=IikGL+5L$Dt-YYMajUgb=LTsWoIHno3pTch9RYr-AH6Bio*cj{6YUi zVAn45Mq~>9McYXi_UN7yQcq!&K!WRxh-G@Te%;Xeu?>go_Cxv!x7Koq)j!LkO=zBv zZX8?mioQ$jCU;eu@s#@PcVD{q8EOE6j?z!uwIo?%%c8ymxsg&n+@XeZ44a#!@~=0o zjR3MLdtxxxuBEi+x=js;Ncmc}#Zysss`3_O{c1ksc*=d~rbM{n2gfoftbO%HfWNDi zo>G-ktO)R$ktErV&U9V4ICr;~`0;kEhr%x5?RUplve|DbE#HML)ylUj^KuGmoOqy< zLH8}*%;_T{)2)^`J*=U2SKXiTlfe$v(fXZGGy}{cM|SQq*ja_P*;D1}N#*IMo>T7w zu1|F=tJ>~a5t*ECTy4@emg9e{+5Qqsey_o$X>1_m!L*6p);O&Ex8qjC8o>4KLg`E2 zyH{=N8@)ZtyXkz5-+M1)C=eyT3*OAkrN~Ul1QOqah#5E)KMV&ye3HYL+MVXmUf7Lk z63h;RWzhUaF|~Lm4nCAFjwIFj;o?MG25@Cd6V-Xvren00ZbS$(QkPymQ8*mbR8ec*E24r6tzeX; z_zLCkB_n#{PWX6(9W!u&t~s^po-xt;e^+MmopM^Xe1c}0>-C=nU77136E#>8 zSQdv^M_#FDSq0^}RA^j_F7`HMWgwVD?krCayuR6zRaLk?;?!cE$8D&#hy9(^Te$s1 z(@xO9hm-d{J9~b{@h5ePL0c5r2|IaRjzOCuiE(Yc^NFui8VlCbVhf{~k7rAUNKbo8 zES6}(H|Xq;LG{ojd&VSFUo`3ORK$j&zRX-qZ(d!d()+T{9P(joPSDm}1}BP#ZAkTW z>!cdfD&2o?65h}rP2x0K51R%?7U|pWxRrY;&;h%=rK$xo8g8d8pN1vB%uzSpafqoq zsN~oqJadX0A~&7U8?^ogUcb&pz!^`H&G14!sQ74|i4uM4oBz&Z1FJ7Cd}mZ*I-)lL z<%o3Eiz@qdw*-f?=a}$|9UlyfXn8a)=Pm#><%ix@(P=cB?!8@wRVT73Rp!}7(Y8>H z0>ZQ^wwN4lFSS&SdG4i%MP^8Y3bzoWay%{~ug4@+2*|9gfF|Xi-9K~1rU!g77k|nY z%u<5%^+DLaev(iueq(0@veLsndu01b-=V5wM>^{Vi78UaCXdNfg!!<1b4n4_C!}53 zAdoU|&J_Pg#O`-1jBiCV?@%iuZwK(ou>TREdW+u8^xEuQuwP$$bT^FvuhH2HaAN;% zQP7fr*-1;PBKk*?R(bsAI|`9cieLHuy?|;p+|M**T;a*GkUSuKFDW*?;k5Xq>cE&n zm3xG!=%KK@e6wcj^{3XJ{Yg9y>&LD_&U62Sa<_Y=GHv2N!=xq-neE34tqu<29}X6W zdP?L^0vGW@>$A5pNZR7R=EnK)0XDOnZg+onSE~QA^Rq3qu9tZ#z6FqWxum&da*^I> zuCnEg9BMw!3Lh`h730KFXka09Y%wvIu*g3Cx;i^sKNTds#vqW%!$8D3WN+lRnf*h# zZ~lW~&JyR42Q2lcqwM`TurxHD!8HLluIPJ7N!20m3h%2nLVw=Fx%Jw@Xa1hZI5G|B z77rd<7~YA_S#j97G6ICw%#@4W6-2IhSdK3Nr3m5cy}h1dlkI?s0Nko2Mk-0=4O+@Iji_V#+x?)WfecotI6RWRexshWWDF@9RK{0tW}1{7K)tm zE3NRW?91_okLa@_V7}*L*wlQmV9W-i>O>g5T~dc*7=5(&sPDON>{NA|f>{SU3)WUe zN)l$+b*0H1m3>DRs3+}*o4mp?%mXAmp1X{MSz05rn^ny*dRk<_Cf{RS_9({pzl&wW zzr^--bl1P+#s>TJ)p~69LEXFVpoO($`wSHNHuo)VPmPGK zhErc^kSvr%ptDcTEA!ltZEi+J&ifOI3jG+mRf#s8!Mqs zhDUAPWWH(1Q~EdhTGM#Vqk82`iLkPx$06Q&l$y>gMPFr4f}6%`p&nXfxHFQs9B2e=C6O&FMRJ)lHn#L{=BNX-{+g!s>ExU{+VNd zRWYKLSTB0;h}b()brV8yNqX*>zb(4PdiXfev?)wki<*5<(v*_bG?uR^zI^k2y1{;S z_uO9G&6STv_n3Z+I~tp}QA?g`rW_wv{HxSQ`+g-e3TaL*F2`=5z_1r;@rH`l?6*qm z4c{%X8b@TULwOS1eglu8{F?*}S;x&!XCAP5+#C()82d7GoKTk&%skacDi&k*5TW>$ z>+P6o{%3AC2xvF0zO=>M@A{-)5LX7%)i^n&?N2Q{%bu269}>GRwoLfFuBg18~L zi{}W(7K)PSX-Hk;<8wCacwvko3GP)mq`G%j5Sbb#FZF1v`c5|8cLIi*i;DS|7l^C> z$SOXbK#Q?)30a{*{N0SOddTw>x&=Us>@eg$;eABdY0PeiFMq9cDh*&Jk#B9gi5DZ+NI-hP88m2XsGvE-x7Ym55ff4r zmgn|rEf3aH5SQkqw?$D0?DSHn#uW71v&|i+-x4*DV##jMZoUceSQt-X^u(V;(??P zjGR$JB_;gFuFV%_&z}+ApMScp1gkI}@~I(&=Z$VM=0E<;M1x}ruL8lyv>52toXNJd5UAN!%X}^hP|ueO zr}MMhPm92uLm=+$zazAqJ*ML&hDg`51-V**$kp0mRJh zx%i8r8=XYIR{|q@y;~|^Mxw3Kp}^pN^r?U)=v@tFB*}@JE2Z(saDA|#kp8^j`9`xq zqR3nD0KIqyrms3gVN6@9b6f+++9_o8I5$tr{s884njBKyxZkK-vJ#g?McZqq*;Buh z`k{U~;n`VF;JLX;g1PDxo{!Fh3Sr&W70=2)^0|-vEcOf<%?|4}knf$viAPhy2?gkMgr1QT2 z;NFk-ADriWIOjPR^EnqYMH;d|(ozTcD_NyFkcVpqjuSFq^(-Uo?sDoIAcU9q{C31f zY~r*b$uV6i;518>O|cm-MJa|Kv@Qmd#u}!0s)l6W79?S~QShrT*noi=l5hq*4!N_@ zm=5FmckBVsyQ4%?O5zVx{zhu8(u!c%=;-s~aK1cm9nxlfd1hoT$x_b(@vnDzV{zH|86;GsVGc zZAyD=ezds3d`(st_Oj8 z@)3o@Vs6K_v4YNaRCnX%#{^o$XLintK-kiT@f;TzC2dO8LY=-$d$}(6opSPeb>6S` z+PW*Nbl-2M?{FOgc8QGWT0|0ibbaDn6G=^*>`{QDcaJ-1+A6@|aN8tiOE8gMf{04S zl9I(f?%N-Y4J0bF!LE)4@s7G&bG*7l!b;$!!PM!K!2HFIM*-pQ6N>$4*i-3IO=I*t zr&8zq$Q}uJh*=U|x_v4H)yXH?cOfsg>4L3G=)t5>c;fo}ow|JBPl7DK2z~upXVsbD z`6wY_P8&IO-qs0%91H3Gabw>?W27oQ;{Tw-rNyo(k}12WR9ND#vLcs%?r9c@@`0!E zBtUdz>+7*H*s^M8{QRLp9BN!Oyg9pl@`~|A3tT%+PIN( zV^EsChZ{67cHgk}6Ce`V%>xG0zW7Q?`NQ^iM>NKDNNjP3*EekO-nRs1ClOXoF7>I} z$hDXMUv#u&8Cd1d=t@OPFikh!+XwlL&Aa=A->(YR4@xm5Zc2b*6p0dart03FF;X+F zM&YxnzlhnJ^EmRlLCQFyvs_AGaz<$sd!PC3Mv|Z-U0j5`jos9zqRGSWKwIO?(YOX# zaMb5m50g*XOJO~(0JO=_jVjqXr(fO+07Ipqs-9wZME5VVI&)OQA zJ8Fq(W_8)_N9po!-?N-_o7G;TlMt=+$%AhQ;%!(WIL1PQWF}BA2j6sA4E>P0vK#qn z2zyO@Hv`}u6ZVjy+LI6;Jhbv?uhvh1+Gjf>b3F|Ceb?#TdDJF0den*w_9DXW*zgK` zM95$m{5};}@i47kGjE4I4dx}BC6>F>wg$GXZl9fzdmk{jFQmMMg*a_w4X^uyk*i21 zGD@Tz3x3PaD>h3e^jkPGaH3V2-ZcSrqr3{uOsrv6-C>Ibjhkn5aaDtM=f7_GceE~? zM-!8o1go!y{HgLb)xRQq{McsH#~<7Ep~oz+x?28zn0qErG$cQK{LQ}h2hD;WXC%YB`h%ONWDN=lL&#q}8}i9;>|{_dI~waa_UMhrVju6n-~GC_b#Lw$c75h~ zpf1SyE(+N#^&bTN_M%mA2ZOOxb=sdTz~shgv?r~R`Dp@q7#dm)VWZHM3nYjX=~#?^s7{>89&y8>#Yp_$p-+wL|Y zMS$9!M%8e7Q&#k$D(v>Snag$m0djw``Sy< z@0fuRhPcoma}}0O>+n{t@BZ30SC|&u#}4F#VUWmb>iM^@&s9Qj0A}#K7xj&uPX4nP zyQD{5PdJss>8by$>*f4l8xv3OJgA87Y$XX~?S=<=;w-}>b)I^}! zZWjl(`!^PKDK|p-LduL~Yjvu4%zdAP>75lUW?q#NqZen6C9jfWfj zm~`?LXFZ^HAw%B#1mj7Xz`UqZucPbMdmX+C=!OwI`d9tm_+gAL8yff^p>$YCqBUk@?Noc+CJ%7$j+8R z7LXybp##&|IqU`+^JfOACs3Hcs!Ju_y223~9|Uls2gIm<-8vnsPCWK(oBb%zTrmjH)hoBgE08jEjs7J5)eLn2 z=Hv+$2D&R-Ng_($qm;lje7W-&LaolkZvXJmbI+5_(gRqZIIQ0_Ojzp*FTe9@_X&G& zpYlv_$c(w}M5#ECqaN?wLj0^JD+NM-mARhQ&wehLW`M`%B?n~+Es}+Q=#^FSH*dx1 z_Eg4O(eC2-S9lTf6LZn#Az>1Ws3nvw220CY=I=Al9!PGq+L z_KK-PW6O=0nuERUU3i0c|#HKnfm&p@{LEk=V;3|img3|5Gi`uPY21< zLRDtdg~@#Dr!pA?xrd}e2A3PTrzR=V@GV|+wu-NbD~<^LOL($MfcmZq0S(e`<6lC1 zBtH1Q&B!47K&u<>v8;Cu^JYQ3)IOBld1sdwgMz#MyFa;j!blDxI!-_aa@ zqBGKeo$kVw-M;+y$zf%byCmJ^7{6WOCMg8ugIcmY?k!RDCzHjpT0N5@mz`dr&|HCT zg-vxeJ&!U)o;}mw|mJ!$61Xom3Il--nG5khsdWRO$kH@|U*3KbMl$@Hk z$H$_3yk~Sjx0McDsngG-{XwrMG=W2%ZyFTy0&Hr<8HYdmemzGR*2G87VC|U!)MoZ+ zq*JUOdxVw(*M`be&uSdWW+6j9m*i&07cI}d2?s!psS0W$2r&lSYT>L-w=wD%?pLoh zBWmu5%%8x_PUKs!kEaZ>9ovZAT{(RGFh#jd-}O8@HAtMt9vEjd8);51Bprztv^MxX zi`J_ZMC<7>uar=8<#-vW_`N4gX7h`lV~WKWX1jNH@_^m1FIjnl@$}ztDcMAO3;iZZ z7*Du)MM9zF79`kDfYP$3zltk4f#3bB;b`ZOl=m^-j_CYS!^#JjHtXomTo}Wfi1IA; zCVF^l)r_Uue>Jk`d1Hd+c3=KoLI2*B0?+yaBVr8Pc-Z`ix{0>_b=T4=NgdEOC{f3Q zgesX|06Uvrq&01cW0jq@R}5q<%BU){*Y7okc+0poMrZS(IZ|-&tC@4E;UQTo?vfcA zMz1&qiTY%o)Hj3Xml87v-yN~;Wf-%mwYUyBsOw;U!{z2kz0OGI5b@6xBqv2~oqO|z zs=rO02L2WxZZ5bL9}(6~;bW3ER=u<@5`N^tYG^`>uB6gX$`xDyeVK4Rlu{hm;}WSS z|AaZy<5A|r2O^dl|Is;b zvD1&0SiY>RoD>ci9aj4Y&gQyfo~>bekM0)sI}-kQb=EKy|3oIp7+UhQU`b7S`}Hez zy>8$@&@rx(hOgQg!N_{ef!IU7?AB-ATn48!)65%NY*$5nI+{!^xCXJz!rOk7`nk5rVeMt1#J|LY8O z*@I!f)#;CRlR0Ja^f%Q6W>uy;OoO+2Rc#-S+EAhV5;^IC8BI#rtG$Za^|8#}7L5n# zNlsT&&R%jpZm3HX+;oSd6un>OO33oVxcxc(#tG;RU* zbmKXPg=Cp<@pod!yV-p9rI844#pP+ErPmIRnhoMh-1zV^BW>8s4~pFY=N$#i+84Sw z@n;y@rpg>HTf3oJu~?lQ?ehswW<`eb<_gd+Z6mTs z2P>d+6F8NM*m9Eny8psiS-HzU^Y|Hg;ddTT=f?g=cm>FW=R{tYbUM)5zuIY{aN72u zr};>#hJtGWL#4MB4!+wQ32LG7DI_DI>9@BQ0IJUPR0N|VwXndgGMV8oRn zDJ|znYH9Z=2_1m{a_Zm>(0%0To6{jRXmJ8o=)?c?HzT0IfMSUY4WnAvjKI7+k$ql) zL)(@pHubEEDW(tst!ywIZ>z*CLBMG#`(l$2#=*FocH8b$ zxG!J?TkT0fM*v>W7(T-;>H4dLEZlOQC7r$`ZF5(TNCRIy@O$0)MkOX%7l`BF*&v&Z z1JtE_R{UNK3fmwt_6-#-25eXazWQZSVa|8a2f)M`C1iBVPSrREvmU{SNaR1>5dPH7 z)k=w9#jekyFD$i7ai3>!>4y?s-daVA9t*b1JQ3x`)8W6rPi!=n0a4wC8Iu2PS$cKY h99#G&`~~7cFhuThPXMhews{Y83)a8EL=e65`5*j4<7fZ? delta 4648 zcmd6o_dgU4z<{&j?3s~qc2+oBvPrTt4q0V{WOGs-vghSEve%ux_a=J_*~B?IBN=h_ zz2E=feSduagXi;nKF{aDXAs~s2w;lbwK?DY10H^ch$`}dl?qxudRjw9q`FdX(zVX$ z=a7=miQtm5C=-B{8#qx#{ zo9hyn6gw*8CqVz<0ZfCYPdD(iQWoeAl&)F6@qAli=qv+218V$9XsPMsoq|iGF0&G$ za#?|&sQ8EG<)(DKh)fUpWvzT99yBa+Bbmb#2Iw%14Dvj{{$7uS_SpyR_}OPuKN$Mh z<2!-{x@l+uQ}%ZT!(6q2g!Pe3%|y^tNIZLC-&sAq8D*A3fDsb0`LwR#<&)*eeh)RMokmxdlU~E3PeO%R-UqJ9&Ga zlorXc|1{Y!Q+k>Rc(NiV$UZ_U`qs$gO}a2P@V62>E$LNBIX$Fna-p(P2KYDUjko;2 zxI4p>Cu~|`E52?0O;H?>Uw}-pJSFDBJL~)Pnr!g@gAR87t}3w1kjqZC-a83O%7ZDL z#xYGc;iyBEmFaRta$8`ivh@Uc@rRZBVEGpQ3L421WK{7%Pe1*PcImBs(G3+)Mns19 zfu_9z=3+axLGQCkK9`ZxroX-7Khl zAJ9f{nf#ry?=eu;uE3ooXam6~B`<`h2vZHi!DCg3-C8Ew_;E?^BHjW;O39511H>T*(iaJu30UG*L)yJ49W#|+CenI+3(vcOFV5YX!if-34!s^s)SV0-SA? z-;Rt-oO;B);hG>-;GD_DHg9|x+Ib*raq@8v8~W96(!V5rSpo+Dp(dNzp+`JhWt{V7TW^Oe zdb$R3j=t8u{9;PU`w{PP3=vhM;Hf6j@y!{N2ASW?I6dO;f5TGL|CvXVh=!YEf=fPR zSg+xGeWX+oA#~MR1jY#%$9g#q|NBdu9l6}q7VW}&SRtn4Q5193dS<;ZKRccCYOydo z05w&aFxAi#zkSaa3ik1F11s7Jx^>)165!-Uk?%j(-`1DPrV>I~gBvcOo?PEYJDz_? z_i=9r7#EQ^+Fr(HW>UT3eIhWEFn~8ueD{R=tOh;r3hHTX^}z^!RsBvhbx6||?pkpX z(3$;g8{;E)W{4BrzXcX-FXZOv>k1=t_4btVM42jh7wS%9QN2ZR`fr+is5Z#f(bwRB zFx;^>R$JoH>&W^}h5sO6Z3wbTvlmpM_dYFg*yiD!$ZF0a9tPEdGuU=J<-_P6;UbO| z+!VuCdN}kq`;Y$is3xZ9Pw=UR&N2FqN2kn3=57~Ikid*Df>FLPmB_ws^n}h1yaMXS zrY_We-GxBI(suQYb#24@3XD}u0&8%Kji%CprT};kOHxV5VEqrJ{~&btV%`s#%rLEz zD{2|bslWWmctV{0m)2|p+?&B3oo@ra;W8)q1{p83b=XNwbK>i3FM@1*c(t$ovf%UQ zlo73V;90LG=B@Kp)S|3}XZF|Y%}OHd2DS7)#I~;^e%K>OK#-^T>3MeA5Z~S%=LGE@ z>6V)`Q4#`n!S4G=&0?)3L0}o`lqn8-(UHI{xUma2P~2wqMxzT@L94nH_|D( zd5TuXyz?j@P?MIHoxV!&ZyZWKeRBMv5_UF*JM0G4f(*B%5c0tRNUH-=f=|BrV$O&5 zmWb`TkL4X+xVSm?&Hh_c3RRB+H67`F2d>b)g+pZODDBb(3-tJ2&_ zUm)z?c}XvbQ}uOvJ_oW=%`i*wM~cXTS5tq9C0sgj`NsW{TEJh>bZPW!UcEFxTCM%7 zCT39<;*-SCK0M%nK#z$Rp4|t8GkAXDYm>ZdNolr?KEf=Yh$Tn2v?Q!(n)!DjoFi#43a&+^}DS502^ zE>VwliLbe$ETP}O6_X%ERYOel&4HrSf?Lk&rmy1C*lw06lzY73S|*aLVWf4Gc`fGZ z+u?PCmcp|^1zF%S#+&bZWt6%qDID%W3t@Fw+P=B0-_u0>TjrxW&59SG`$Wf!p}uziQ2cNdd?)Pm;k_tvDS!7EcF@E9lj>85LIU5BcM=N^5?X=?_f1 zPvZ7A-o53;hqBHhn;B-jpCGSZ4%3lO(&|?m!3k>DlKlz``;jKzq+*@_k<_{SFB>8_x68x z0Q=>3$D!@HzN1Ag-ff{5Ye_s^1+8u~%@90M`m@pu7uHj8fjfTv1 z4IQxkJV8GzF)4qawh4{LN9h+!6*K%TyiCmJsWO8+VjU8G@;e+OA+9PrM2SjL9YzvFQH=g{;RUv=rPT8r;6*{4qj$((k#jy>@|urXi7^Lm>$ z3crj4v+PGEiV-nAmE$ukJ1!U{hH2r@)Fetrg-rVtr_s$-Ft;IX(3kroHx!b?+RI&d zTK}SADr+Ujh#VU&qDiIf_0IXo*z*Z)=q2+VD-&2nE; zpyYKvWyI?JC4AR2c5S6@GLjjX3RL(aTu-{w>ij_#?sl)wO&vEc@03O6XL9 z-NTJDGOK;J&kOPqWM;wWF7TDyoAH|8<=q*p{N~5!!odae@31lZ-OLk8D?DU?5Iol7 z5DoUhagS}v)gIeFY}dEYKsv~%^y}&HqJ~`)7Q*e1rcxNIs);9xK@X2XNm@Eh3=(cv zji_zg!ZN(El?0Xz=XEaW%fOP=CJXFLbHpY7f799 z9~Eo$LIr=}A}@A2TbX}c!ck7YQltY3E9j#^DWh4QQGU7U~Pu$iK zbv+erCuyd(HV=;|L4h`zs@k88cdo?t8tDagBHcQqds8I;#aQ9^70I7CWTa0-3D{{hw34GI7N diff --git a/test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin b/test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin index c0a788ee9945314e71b99678948354b00730e1e8..9fa6ccfe2b5ff834dec8619ce7ce41edf11e8329 100644 GIT binary patch delta 1001 zcmV(VUgdYBnS6#jL#ul_6C>$QtKfepJLk;u- zAV#Ozm7s<%ujrA&)|+W`bS)wj7=+9^M~Qx(TQUfD%(GVz^a3Drur*sFm|x)(oYg5= zSFf8s@E`BE2%2ASwr~5YG}y?qYZLSWAmr=mWE;|yF6cp|zD(6di9e{u8DfwAvnv>r zz0YSm@w0yxCIf#FbBBmbB8$Q6I^ato%4-7VSYp-x#;1tA7F8ksxxuh<*!L5Ww#UHi z&N(NAtb8K`c4nnS6gxU$ zr?DABXb)$-lkx0~@)7rg-#$0gFJq)8`xg%Ng)3iB#c_YzcRcOXw6<@xc(F?J=%&_~ zJxmG?MJ=+iVV|xwt0kM0fnTtmB&a9*Z7*Vn(t8Bbz!-*`ibKzbioaICq0G0IB0bXg z5tvIddj9WrpqV(WvSD$np;derso0EP-y6TXKMvm%M}2vg_-pj z(U?%*Ut;FR5WG^DxTylKiuB=h6D-{_EnXWS;DDs-DC|mXri_kL{MxAGycP}7tvpVZ zCeEbZcjsuh!m)i4G&-nof_Hm!TeWVux9vneO`?DD%$Zsqw=18xxhS@G#r!zg{1rM+ zluv_yUq+>3d$s=s8$6vMhP|eWgFcm$25||D5RPwITLm#pjF0{_PMRWcPDwqYBa8oX zJEdg_(pYthOW(Lgpd_{rMalcBgW>R0pb5E`DnV~2Q6kX$up6LuqvC1po+Npj9O}8X zNYa06m%@Qqh&gikyMEDI%YAtm&Mdi7$MSD*$^J7C?olOGZiliMTM6ex$X#GZ45UJ= zmZu#JlE0~yIRED~MVL+~k_f3Wdx%U&;{!(*IXERS0L5QCRvjqt>;vz~@WLVkJ7p&D z4u`|e6E>>`m-O7vcln`aImLZ9;1tfr9KL_mmLkH2y;gq^nsYuHZSle~Yah6jE3o38X|?VuTF={SW#{kn!z+g&DJGi%~^! zFB;rMnzBC!w}N$=Ag+f>qyRi(oM_uqr3^37W(Z*eagnWI+n6(-zDnO`-*^R;8t?&P XVtr;Wtk`ZE@7O<FFMF$JLc7*fhXQoQ2*B=xY!%Qx474@3H|;pRmZ46>g+4RV+c64 z^t>cJ<7cT!_@w9KWk^=?0YQ@+I7O{umN;4B$ty#u%?IqA3bdfR_w4t0OZm&rVN6IA zsNe%eLt{siFr!4fry+HGpP*4^jD=d=Guf1Bg;mHJpkaSz3az%Mf+u5s66WwoO;ewy ziByqzDnr+X$cfpsNUj36#L>qLLNapi14y9U_~T={uk3j;k*W8#YUBYKk+0&w{h-bHOi8GTeK(fWJgZFS8C zo`h3faklk~?1thIfL%XjWUf^$20e#sz{nEPi1ThGh!IyFui@AtDJwfg7}+FvJ1M<+ ze-#6wd52segF@SiYApE_a^p}7$wpEw3v|ZNQ96PE{u{c-fQ;f*9v-`Dj&-I zP?P)8+MS9K9!bkEiZsU$csPd;4cQCEgA;mW;6DXovAv`LBg~VpR9P^8=jQse%W5jk zyS{(=lDy`-(J&OYy$1m!dF=KKAR-; zVYg6_ktpUI)=~T8%*2?0KP5(yQX;Yaj@W+{w0Qp|Q>X?U`rAHJ#u_%wagjjhL?zwJ zn;W<#jh0T1rnuM+i=pG7;s%wZjg1iA^7Y-2Q9=7t{;kk?3s*paeDonmuSZ@{WWGfU zr^wt_mM*=cFh5hsW!07rekcCIBh=11<+JO)TK%jV+=8Yh(1Fl^x7#bVXwA7@1G`0K zOSqj~TAxHZ>Ek6Pk+(dN=y_xw36rvAP%Af)r(esLIemkUPJQzNr->jfm;Dij0lwFV XrTYa55kiCU%mM<NCGPtQKJShxO6wDY0hWRe}jO?rU zVrw~$xv}JlpjU=@-QH7f=(ldJ+^o?zZyUq77fq5=8IlSb@M;Xj8-f7t ziyx!U1M@3PR4kf?##ywGgaH&Lm%D9Gn5D<1(QWw}Tpcsc@0GLFP|DzK0yIp-gc9xHW#oS(CxvrFtVZh$vgR5;!gjin-AG+J*pv zmG9wb*VTXROxWgz1H+)wIt3--p@h2SR;)*8H%m!5w+E#z)cet?q;{#buquA?w9y)1~Xcs4u@gJa1L%Pw&c z7!h}Y)ehID_XHfkOU*dh5T_&ViZ%4}MQ z)$)Bzbmg83cd{WGw1kW|A?PHke+cU+T0@^<5*9U?V_6~B3r>nXQ3b|CA+TH-Fl#|a z=l~gtt3b47_VQ|XbFV^+^nG3{N2D+JdT)Pa_?|>ZRTIeu=D3>!sLoH9$|i)=29XB` z!|8XO09M*VAJQu+Rfz(u~4FJvHZXJB@imAoQ+*PcOPLt~Z&rHq(*@G{TW)I6>fuap?U;93One0Yt62Lc1Q13v zo)6L|X$vJlp{E``>O6(kbiHBP*%sv~H`Y#PBfeo$Hh8Y^TK=5VDEiC@Lh*W)k-a~z Y!D$ZpN{}RqgSPwJpSjIC3zOs=H1Xx?t^fc4 delta 1002 zcmVhVLk;u- zAZkuxi6FId5BZIw`f|d^x(Cj$2?|%|E4Lg!aVR?Hyt7vj^a3Ew$bR6BsErdKnxuV8 z^Zi>(E31mkE?#c{2CPgJ@#0FeYZLSWAn(tlo+b_6m^1tsB_~FHRmN%0L;i@-TxrxZZC6{VAK?uxbbQvx9p@rEri1~Y#?_%G^+{7J#wu?4O*114JPOoPYlxNFSm*? zZuRCO&Mc$}J><+y&K`}yiPe>pe5-=d6S#>!`>w}GWvzh2XFz}XB~t*l4y(xHn~%sQ zknWw|W2Ohg4rIdqHy-<@TV5%0p0-cHrJR2_XEC)}AOx1RCkW-})l>k{%L&{nhb9<_ z5s~19g^C;DDzo^$5&5*$D$?>b=i#5X3crEy;?~F^|JulQbX_hxC~fdAV<6cwHzE5t6c zl*k;5ua=2!!+@Dl)X1aUl$eBpYJ5-``X(ZJjrK{9 Date: Thu, 28 Sep 2023 11:00:20 +0100 Subject: [PATCH 096/161] Factor out submit_bridge_pool_tx() --- apps/src/lib/cli/client.rs | 26 ++------------------------ apps/src/lib/client/tx.rs | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index bdb5c7e3b7..5d76a90cf5 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -1,7 +1,6 @@ use color_eyre::eyre::Result; use namada::types::io::Io; -use namada_sdk::tx::dump_tx; -use namada_sdk::{signing, Namada, NamadaImpl}; +use namada_sdk::{Namada, NamadaImpl}; use crate::cli; use crate::cli::api::{CliApi, CliClient}; @@ -220,28 +219,7 @@ impl CliApi { client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); let namada = ctx.to_sdk(&client, io); - let tx_args = args.tx.clone(); - let (mut tx, signing_data, _epoch) = - args.clone().build(&namada).await?; - - signing::generate_test_vector(&namada, &tx).await?; - - if args.tx.dump_tx { - dump_tx::(io, &args.tx, tx); - } else { - tx::submit_reveal_aux( - &namada, - tx_args.clone(), - &args.sender, - ) - .await?; - - namada - .sign(&mut tx, &tx_args, signing_data) - .await?; - - namada.submit(tx, &tx_args).await?; - } + tx::submit_bridge_pool_tx(&namada, args).await?; } Sub::TxUnjailValidator(TxUnjailValidator(mut args)) => { let client = client.unwrap_or_else(|| { diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 1afefab825..8215eaa078 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -96,6 +96,26 @@ pub async fn submit_reveal_aux<'a>( Ok(()) } +pub async fn submit_bridge_pool_tx<'a, N: Namada<'a>>( + namada: &N, + args: args::EthereumBridgePool, +) -> Result<(), error::Error> { + let tx_args = args.tx.clone(); + let (mut tx, signing_data, _epoch) = args.clone().build(namada).await?; + + signing::generate_test_vector(namada, &tx).await?; + + if args.tx.dump_tx { + tx::dump_tx(namada.io(), &args.tx, tx); + } else { + submit_reveal_aux(namada, tx_args.clone(), &args.sender).await?; + namada.sign(&mut tx, &tx_args, signing_data).await?; + namada.submit(tx, &tx_args).await?; + } + + Ok(()) +} + pub async fn submit_custom<'a, N: Namada<'a>>( namada: &N, args: args::TxCustom, From 5aeabd8983e9dd235a30077de8e034d053d5ddfa Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Thu, 28 Sep 2023 13:24:02 +0100 Subject: [PATCH 097/161] Factor out validate_bridge_pool_tx() --- sdk/src/eth_bridge/bridge_pool.rs | 110 +++++++++++++++++++----------- 1 file changed, 72 insertions(+), 38 deletions(-) diff --git a/sdk/src/eth_bridge/bridge_pool.rs b/sdk/src/eth_bridge/bridge_pool.rs index 4a1664a8e8..403af5fe8c 100644 --- a/sdk/src/eth_bridge/bridge_pool.rs +++ b/sdk/src/eth_bridge/bridge_pool.rs @@ -14,6 +14,7 @@ use namada_core::types::eth_abi::Encode; use namada_core::types::eth_bridge_pool::{ GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, }; +use namada_core::types::ethereum_events::EthAddress; use namada_core::types::keccak::KeccakHash; use namada_core::types::storage::Epoch; use namada_core::types::token::{Amount, DenominatedAmount}; @@ -41,8 +42,8 @@ use crate::{ }; /// Craft a transaction that adds a transfer to the Ethereum bridge pool. -pub async fn build_bridge_pool_tx<'a>( - context: &impl Namada<'a>, +pub async fn build_bridge_pool_tx( + context: &impl Namada<'_>, args::EthereumBridgePool { tx: tx_args, nut, @@ -56,26 +57,75 @@ pub async fn build_bridge_pool_tx<'a>( code_path, }: args::EthereumBridgePool, ) -> Result<(Tx, SigningTxData, Option), Error> { - let default_signer = Some(sender.clone()); - let signing_data = aux_signing_data( + let sender_ = sender.clone(); + let (transfer, tx_code_hash, signing_data) = futures::try_join!( + validate_bridge_pool_tx( + context, + tx_args.force, + nut, + asset, + recipient, + sender, + amount, + fee_amount, + fee_payer, + fee_token, + ), + query_wasm_code_hash(context, code_path.to_string_lossy()), + aux_signing_data( + context, + &tx_args, + // token owner + Some(sender_.clone()), + // tx signer + Some(sender_), + ), + )?; + + let chain_id = tx_args + .chain_id + .clone() + .ok_or_else(|| Error::Other("No chain id available".into()))?; + + let mut tx = Tx::new(chain_id, tx_args.expiration); + tx.add_code_from_hash(tx_code_hash).add_data(transfer); + + let epoch = prepare_tx( context, &tx_args, - Some(sender.clone()), - default_signer, + &mut tx, + signing_data.fee_payer.clone(), + None, ) .await?; - let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); - let DenominatedAmount { amount, .. } = validate_amount( - context, - amount, - &wrapped_erc20s::token(&asset), - tx_args.force, - ) - .await - .map_err(|e| Error::Other(format!("Failed to validate amount. {}", e)))?; + + Ok((tx, signing_data, epoch)) +} + +/// Perform client validation checks on a Bridge pool transfer. +#[allow(clippy::too_many_arguments)] +async fn validate_bridge_pool_tx( + context: &impl Namada<'_>, + force: bool, + nut: bool, + asset: EthAddress, + recipient: EthAddress, + sender: Address, + amount: args::InputAmount, + fee_amount: args::InputAmount, + fee_payer: Option
, + fee_token: Address, +) -> Result { + let DenominatedAmount { amount, .. } = + validate_amount(context, amount, &wrapped_erc20s::token(&asset), force) + .await + .map_err(|e| { + Error::Other(format!("Failed to validate amount. {}", e)) + })?; + let DenominatedAmount { amount: fee_amount, .. - } = validate_amount(context, fee_amount, &fee_token, tx_args.force) + } = validate_amount(context, fee_amount, &fee_token, force) .await .map_err(|e| { Error::Other(format!( @@ -83,6 +133,8 @@ pub async fn build_bridge_pool_tx<'a>( e )) })?; + + let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); let transfer = PendingTransfer { transfer: TransferToEthereum { asset, @@ -102,28 +154,11 @@ pub async fn build_bridge_pool_tx<'a>( }, }; - let tx_code_hash = - query_wasm_code_hash(context, code_path.to_string_lossy()).await?; - - let chain_id = tx_args - .chain_id - .clone() - .ok_or_else(|| Error::Other("No chain id available".into()))?; - let mut tx = Tx::new(chain_id, tx_args.expiration); - tx.add_code_from_hash(tx_code_hash).add_data(transfer); - - // TODO(namada#1800): validate the tx on the client side - - let epoch = prepare_tx( - context, - &tx_args, - &mut tx, - signing_data.fee_payer.clone(), - None, - ) - .await?; + // if force { + // return Ok(transfer); + //} - Ok((tx, signing_data, epoch)) + Ok(transfer) } /// A json serializable representation of the Ethereum @@ -999,7 +1034,6 @@ mod recommendations { #[cfg(test)] mod test_recommendations { use namada_core::types::address::Address; - use namada_core::types::ethereum_events::EthAddress; use super::*; use crate::io::StdIo; From 601ada822894f0cced699921302be951ffab4f87 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Thu, 28 Sep 2023 13:52:14 +0100 Subject: [PATCH 098/161] Add new SDK error types --- sdk/src/error.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/sdk/src/error.rs b/sdk/src/error.rs index b34a7a5562..dba1898615 100644 --- a/sdk/src/error.rs +++ b/sdk/src/error.rs @@ -3,6 +3,7 @@ use namada_core::proto::Tx; use namada_core::types::address::Address; use namada_core::types::dec::Dec; +use namada_core::types::ethereum_events::EthAddress; use namada_core::types::storage; use namada_core::types::storage::Epoch; use prost::EncodeError; @@ -324,6 +325,18 @@ pub enum EthereumBridgeError { /// Invalid Bridge pool nonce error. #[error("The Bridge pool nonce is invalid")] InvalidBpNonce, + /// Invalid fee token error. + #[error("An invalid fee token was provided: {0}")] + InvalidFeeToken(Address), + /// Not whitelisted error. + #[error("ERC20 is not whitelisted: {0}")] + Erc20NotWhitelisted(EthAddress), + /// Exceeded token caps error. + #[error("ERC20 token caps exceeded: {0}")] + Erc20TokenCapsExceeded(EthAddress), + /// Transfer already in pool error. + #[error("An identical transfer is already present in the Bridge pool")] + TransferAlreadyInPool, } /// Checks if the given error is an invalid viewing key From a025cfcd03d073c760b97b27b26f05f53b7180e8 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Thu, 28 Sep 2023 14:36:36 +0100 Subject: [PATCH 099/161] Check ERC20 token caps --- sdk/src/queries/shell/eth_bridge.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/sdk/src/queries/shell/eth_bridge.rs b/sdk/src/queries/shell/eth_bridge.rs index 6baf649992..4c765e1144 100644 --- a/sdk/src/queries/shell/eth_bridge.rs +++ b/sdk/src/queries/shell/eth_bridge.rs @@ -57,6 +57,15 @@ pub struct Erc20FlowControl { cap: Amount, } +impl Erc20FlowControl { + /// Check if the `transferred_amount` exceeds the token caps of some ERC20 + /// asset. + #[inline] + pub fn exceeds_token_caps(&self, transferred_amount: Amount) -> bool { + self.supply + transferred_amount > self.cap + } +} + /// Request data to pass to `generate_bridge_pool_proof`. #[derive(Debug, Clone, Eq, PartialEq, BorshSerialize, BorshDeserialize)] pub struct GenBridgePoolProofReq<'transfers, 'relayer> { From 3b3e6e90fa072f602bec4b4e70c9e4aafef64d42 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Thu, 28 Sep 2023 14:39:18 +0100 Subject: [PATCH 100/161] Make ERC20 flow control fields public --- sdk/src/queries/shell/eth_bridge.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/src/queries/shell/eth_bridge.rs b/sdk/src/queries/shell/eth_bridge.rs index 4c765e1144..c9c820ed5a 100644 --- a/sdk/src/queries/shell/eth_bridge.rs +++ b/sdk/src/queries/shell/eth_bridge.rs @@ -50,11 +50,11 @@ use crate::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; )] pub struct Erc20FlowControl { /// Whether the wrapped asset is whitelisted. - whitelisted: bool, + pub whitelisted: bool, /// Total minted supply of some wrapped asset. - supply: Amount, + pub supply: Amount, /// The token cap of some wrapped asset. - cap: Amount, + pub cap: Amount, } impl Erc20FlowControl { From 3525f010a57c07b9f749a976a76f71f06705abcf Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Thu, 28 Sep 2023 13:52:35 +0100 Subject: [PATCH 101/161] Validate Bridge pool client transfers --- sdk/src/eth_bridge/bridge_pool.rs | 188 ++++++++++++++++++++++++++---- 1 file changed, 165 insertions(+), 23 deletions(-) diff --git a/sdk/src/eth_bridge/bridge_pool.rs b/sdk/src/eth_bridge/bridge_pool.rs index 403af5fe8c..31d996906b 100644 --- a/sdk/src/eth_bridge/bridge_pool.rs +++ b/sdk/src/eth_bridge/bridge_pool.rs @@ -8,8 +8,10 @@ use std::sync::Arc; use borsh::BorshSerialize; use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; +use futures::future::FutureExt; +use namada_core::ledger::eth_bridge::storage::bridge_pool::get_pending_key; use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; -use namada_core::types::address::Address; +use namada_core::types::address::{Address, InternalAddress}; use namada_core::types::eth_abi::Encode; use namada_core::types::eth_bridge_pool::{ GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, @@ -17,7 +19,7 @@ use namada_core::types::eth_bridge_pool::{ use namada_core::types::ethereum_events::EthAddress; use namada_core::types::keccak::KeccakHash; use namada_core::types::storage::Epoch; -use namada_core::types::token::{Amount, DenominatedAmount}; +use namada_core::types::token::{balance_key, Amount, DenominatedAmount}; use namada_core::types::voting_power::FractionalVotingPower; use owo_colors::OwoColorize; use serde::Serialize; @@ -25,7 +27,9 @@ use serde::Serialize; use super::{block_on_eth_sync, eth_sync_or_exit, BlockOnEthSync}; use crate::control_flow::install_shutdown_signal; use crate::control_flow::time::{Duration, Instant}; -use crate::error::{EncodingError, Error, EthereumBridgeError, QueryError}; +use crate::error::{ + EncodingError, Error, EthereumBridgeError, QueryError, TxError, +}; use crate::eth_bridge::ethers::abi::AbiDecode; use crate::internal_macros::echo_error; use crate::io::Io; @@ -34,7 +38,7 @@ use crate::queries::{ Client, GenBridgePoolProofReq, GenBridgePoolProofRsp, TransferToErcArgs, RPC, }; -use crate::rpc::{query_wasm_code_hash, validate_amount}; +use crate::rpc::{query_storage_value, query_wasm_code_hash, validate_amount}; use crate::signing::aux_signing_data; use crate::tx::prepare_tx; use crate::{ @@ -116,24 +120,34 @@ async fn validate_bridge_pool_tx( fee_payer: Option
, fee_token: Address, ) -> Result { - let DenominatedAmount { amount, .. } = - validate_amount(context, amount, &wrapped_erc20s::token(&asset), force) - .await - .map_err(|e| { - Error::Other(format!("Failed to validate amount. {}", e)) - })?; + let token_addr = wrapped_erc20s::token(&asset); + let validate_token_amount = + validate_amount(context, amount, &token_addr, force).map(|result| { + result.map_err(|e| { + Error::Other(format!( + "Failed to validate Bridge pool transfer amount: {e}" + )) + }) + }); - let DenominatedAmount { - amount: fee_amount, .. - } = validate_amount(context, fee_amount, &fee_token, force) - .await - .map_err(|e| { - Error::Other(format!( - "Failed to validate Bridge pool fee amount. {}", - e - )) - })?; + let validate_fee_amount = + validate_amount(context, fee_amount, &fee_token, force).map(|result| { + result.map_err(|e| { + Error::Other(format!( + "Failed to validate Bridge pool fee amount: {e}", + )) + }) + }); + + // validate amounts + let ( + tok_denominated @ DenominatedAmount { amount, .. }, + fee_denominated @ DenominatedAmount { + amount: fee_amount, .. + }, + ) = futures::try_join!(validate_token_amount, validate_fee_amount)?; + // build pending Bridge pool transfer let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); let transfer = PendingTransfer { transfer: TransferToEthereum { @@ -154,9 +168,137 @@ async fn validate_bridge_pool_tx( }, }; - // if force { - // return Ok(transfer); - //} + if force { + return Ok(transfer); + } + + //====================================================== + // XXX: the following validations should be kept in sync + // with the validations performed by the Bridge pool VP! + //====================================================== + + // check if an identical transfer is already in the Bridge pool + let transfer_in_pool = RPC + .shell() + .storage_has_key(context.client(), &get_pending_key(&transfer)) + .await + .map_err(|e| Error::Query(QueryError::General(e.to_string())))?; + if transfer_in_pool { + return Err(Error::EthereumBridge( + EthereumBridgeError::TransferAlreadyInPool, + )); + } + + let wnam_addr = RPC + .shell() + .eth_bridge() + .read_native_erc20_contract(context.client()) + .await + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::RetrieveContract( + e.to_string(), + )) + })?; + + // validate gas fee token + match &transfer.gas_fee.token { + Address::Internal(InternalAddress::Nut(_)) => { + return Err(Error::EthereumBridge( + EthereumBridgeError::InvalidFeeToken(transfer.gas_fee.token), + )); + } + fee_token if fee_token == &wrapped_erc20s::token(&wnam_addr) => { + return Err(Error::EthereumBridge( + EthereumBridgeError::InvalidFeeToken(transfer.gas_fee.token), + )); + } + _ => {} + } + + // validate wnam token caps + whitelist + if transfer.transfer.asset == wnam_addr { + let flow_control = RPC + .shell() + .eth_bridge() + .get_erc20_flow_control(context.client(), &wnam_addr) + .await + .map_err(|e| { + Error::Query(QueryError::General(format!( + "Failed to read wrapped NAM flow control data: {e}" + ))) + })?; + + if !flow_control.whitelisted { + return Err(Error::EthereumBridge( + EthereumBridgeError::Erc20NotWhitelisted(wnam_addr), + )); + } + + if flow_control.exceeds_token_caps(transfer.transfer.amount) { + return Err(Error::EthereumBridge( + EthereumBridgeError::Erc20TokenCapsExceeded(wnam_addr), + )); + } + } + + // validate balances + let maybe_balance_error = if token_addr == transfer.gas_fee.token { + let expected_debit = transfer.transfer.amount + transfer.gas_fee.amount; + let balance: Amount = query_storage_value( + context.client(), + &balance_key(&token_addr, &transfer.transfer.sender), + ) + .await?; + + balance + .checked_sub(expected_debit) + .is_none() + .then_some((token_addr, tok_denominated)) + } else { + let check_tokens = async { + let balance: Amount = query_storage_value( + context.client(), + &balance_key(&token_addr, &transfer.transfer.sender), + ) + .await?; + Result::<_, Error>::Ok( + balance + .checked_sub(transfer.transfer.amount) + .is_none() + .then_some((token_addr, tok_denominated)), + ) + }; + let check_fees = async { + let balance: Amount = query_storage_value( + context.client(), + &balance_key( + &transfer.gas_fee.token, + &transfer.transfer.sender, + ), + ) + .await?; + Result::<_, Error>::Ok( + balance + .checked_sub(transfer.gas_fee.amount) + .is_none() + .then_some(( + transfer.gas_fee.token.clone(), + fee_denominated, + )), + ) + }; + + let (err_tokens, err_fees) = + futures::try_join!(check_tokens, check_fees)?; + err_tokens.or(err_fees) + }; + if let Some((token, amount)) = maybe_balance_error { + return Err(Error::Tx(TxError::NegativeBalanceAfterTransfer( + Box::new(transfer.transfer.sender), + amount.to_string(), + Box::new(token), + ))); + } Ok(transfer) } From bbab1c48d3a03fcd0fba27261da11bb1819d7102 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Fri, 29 Sep 2023 13:20:06 +0100 Subject: [PATCH 102/161] Changelog for #1957 --- .changelog/unreleased/SDK/1957-bp-client-validation.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/SDK/1957-bp-client-validation.md diff --git a/.changelog/unreleased/SDK/1957-bp-client-validation.md b/.changelog/unreleased/SDK/1957-bp-client-validation.md new file mode 100644 index 0000000000..ad829f2c88 --- /dev/null +++ b/.changelog/unreleased/SDK/1957-bp-client-validation.md @@ -0,0 +1,2 @@ +- Validate Bridge pool transfers before submitting them to the network + ([\#1957](https://github.com/anoma/namada/pull/1957)) \ No newline at end of file From 672e153b42fecd990c0f778eb88da4ab5a970b08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 13 Oct 2023 17:24:41 +0200 Subject: [PATCH 103/161] changelog: add #1984 --- .changelog/unreleased/bug-fixes/1984-rm-redundant-writes.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .changelog/unreleased/bug-fixes/1984-rm-redundant-writes.md diff --git a/.changelog/unreleased/bug-fixes/1984-rm-redundant-writes.md b/.changelog/unreleased/bug-fixes/1984-rm-redundant-writes.md new file mode 100644 index 0000000000..f88ad5858f --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1984-rm-redundant-writes.md @@ -0,0 +1,4 @@ +- Avoid redundant storage deletions in lazy collections that would incur + extra gas cause and appear in transaction result as changed keys even if not + changed occurred. This may have caused PoS transactions to run out of gas. + ([\#1984](https://github.com/anoma/namada/pull/1984)) \ No newline at end of file From 0bad247ba8adfb505d04e9242cb50f2f2592eb05 Mon Sep 17 00:00:00 2001 From: yito88 Date: Fri, 13 Oct 2023 17:55:03 +0200 Subject: [PATCH 104/161] update ibc-rs for ibc-rs/#911 --- Cargo.lock | 2 +- core/Cargo.toml | 2 +- wasm/Cargo.lock | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84cbd6f48e..6960209c71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3195,7 +3195,7 @@ dependencies = [ [[package]] name = "ibc" version = "0.41.0" -source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=38a827d3901e590b2935ee5b6b81b4d67c399560#38a827d3901e590b2935ee5b6b81b4d67c399560" +source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=206cb5fa74a7ca38038b937d202ae39fbbd63c19#206cb5fa74a7ca38038b937d202ae39fbbd63c19" dependencies = [ "bytes", "cfg-if 1.0.0", diff --git a/core/Cargo.toml b/core/Cargo.toml index ebbb35f383..c0e68bbd3b 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -72,7 +72,7 @@ ferveo = {optional = true, git = "https://github.com/anoma/ferveo", rev = "e5abd ferveo-common = {git = "https://github.com/anoma/ferveo", rev = "e5abd0acc938da90140351a65a26472eb495ce4d"} tpke = {package = "group-threshold-cryptography", optional = true, git = "https://github.com/anoma/ferveo", rev = "e5abd0acc938da90140351a65a26472eb495ce4d"} # TODO using the same version of tendermint-rs as we do here. -ibc = { git = "https://github.com/heliaxdev/cosmos-ibc-rs.git", rev = "38a827d3901e590b2935ee5b6b81b4d67c399560", features = ["serde"], optional = true} +ibc = { git = "https://github.com/heliaxdev/cosmos-ibc-rs.git", rev = "206cb5fa74a7ca38038b937d202ae39fbbd63c19", features = ["serde"], optional = true} ibc-proto = {git = "https://github.com/heliaxdev/ibc-proto-rs.git", rev = "31892ee743b2af017d5629b2af419ee20b6100c7", default-features = false, optional = true} ics23.workspace = true impl-num-traits = "0.1.2" diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 5dbe91c1e4..eef344a119 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -2661,7 +2661,7 @@ dependencies = [ [[package]] name = "ibc" version = "0.41.0" -source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=38a827d3901e590b2935ee5b6b81b4d67c399560#38a827d3901e590b2935ee5b6b81b4d67c399560" +source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=206cb5fa74a7ca38038b937d202ae39fbbd63c19#206cb5fa74a7ca38038b937d202ae39fbbd63c19" dependencies = [ "bytes", "cfg-if 1.0.0", From 6bb2bb06f1b4c50843e2b5cd21eb2e9783911f44 Mon Sep 17 00:00:00 2001 From: yito88 Date: Fri, 13 Oct 2023 18:01:29 +0200 Subject: [PATCH 105/161] add changelog --- .../unreleased/bug-fixes/1989-fix-ibc-client-validation.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/bug-fixes/1989-fix-ibc-client-validation.md diff --git a/.changelog/unreleased/bug-fixes/1989-fix-ibc-client-validation.md b/.changelog/unreleased/bug-fixes/1989-fix-ibc-client-validation.md new file mode 100644 index 0000000000..8adbad706a --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1989-fix-ibc-client-validation.md @@ -0,0 +1,2 @@ +- Update ibc-rs with the fix for ibc-rs/#911 + ([\#1989](https://github.com/anoma/namada/issues/1989)) \ No newline at end of file From 68b5704350d6f1b09b2787499a0e3c4ea2ff5496 Mon Sep 17 00:00:00 2001 From: brentstone Date: Thu, 22 Jun 2023 22:27:21 -0600 Subject: [PATCH 106/161] SQUASHED redelegation --- Cargo.lock | 45 +- Cargo.toml | 5 +- Makefile | 9 +- apps/src/lib/cli.rs | 88 +- apps/src/lib/cli/client.rs | 14 + apps/src/lib/client/rpc.rs | 22 +- apps/src/lib/client/tx.rs | 42 + .../lib/node/ledger/shell/finalize_block.rs | 156 +- apps/src/lib/node/ledger/shell/governance.rs | 2 +- .../lib/node/ledger/shell/prepare_proposal.rs | 5 +- benches/lib.rs | 5 + benches/txs.rs | 52 +- .../storage_api/collections/lazy_map.rs | 236 +- core/src/ledger/storage_api/error.rs | 27 + core/src/ledger/storage_api/token.rs | 2 +- core/src/types/dec.rs | 17 +- core/src/types/token.rs | 32 + core/src/types/transaction/pos.rs | 24 + core/src/types/uint.rs | 35 + ethereum_bridge/src/test_utils.rs | 8 +- proof_of_stake/Cargo.toml | 4 + .../tests/state_machine.txt | 2 - proof_of_stake/src/btree_set.rs | 54 - proof_of_stake/src/epoched.rs | 35 +- proof_of_stake/src/error.rs | 185 + proof_of_stake/src/lib.rs | 3006 ++++++++--- proof_of_stake/src/parameters.rs | 31 + proof_of_stake/src/storage.rs | 73 +- proof_of_stake/src/tests.rs | 3714 ++++++++++++- proof_of_stake/src/tests/state_machine.rs | 3249 +++++++++--- proof_of_stake/src/tests/state_machine_v2.rs | 4584 +++++++++++++++++ proof_of_stake/src/tests/utils.rs | 81 + proof_of_stake/src/types.rs | 117 +- shared/src/ledger/queries/vp/pos.rs | 38 +- shared/src/ledger/queries/vp/token.rs | 2 +- shared/src/sdk/args.rs | 17 + shared/src/sdk/error.rs | 28 + shared/src/sdk/rpc.rs | 27 +- shared/src/sdk/signing.rs | 2 +- shared/src/sdk/tx.rs | 256 +- tests/src/e2e/ledger_tests.rs | 123 +- tests/src/native_vp/pos.rs | 9 +- tx_prelude/src/proof_of_stake.rs | 39 +- wasm/wasm_source/Cargo.toml | 1 + wasm/wasm_source/Makefile | 1 + wasm/wasm_source/src/lib.rs | 2 + wasm/wasm_source/src/tx_bond.rs | 49 +- .../src/tx_change_validator_commission.rs | 2 +- wasm/wasm_source/src/tx_redelegate.rs | 409 ++ wasm/wasm_source/src/tx_unbond.rs | 184 +- wasm/wasm_source/src/tx_withdraw.rs | 11 +- 51 files changed, 15099 insertions(+), 2062 deletions(-) delete mode 100644 proof_of_stake/src/btree_set.rs create mode 100644 proof_of_stake/src/error.rs create mode 100644 proof_of_stake/src/tests/state_machine_v2.rs create mode 100644 proof_of_stake/src/tests/utils.rs create mode 100644 wasm/wasm_source/src/tx_redelegate.rs diff --git a/Cargo.lock b/Cargo.lock index 84cbd6f48e..98d1c28e6d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,15 +95,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "anstream" version = "0.3.2" @@ -1570,16 +1561,6 @@ dependencies = [ "sct 0.6.1", ] -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "ctr" version = "0.9.2" @@ -4291,18 +4272,21 @@ dependencies = [ name = "namada_proof_of_stake" version = "0.23.0" dependencies = [ + "assert_matches", "borsh 0.9.4", "data-encoding", "derivative", "itertools", "namada_core", "once_cell", + "pretty_assertions", "proptest", "proptest-state-machine", "test-log", "thiserror", "tracing 0.1.37", "tracing-subscriber 0.3.17", + "yansi", ] [[package]] @@ -4790,15 +4774,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "output_vt100" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" -dependencies = [ - "winapi", -] - [[package]] name = "overload" version = "0.1.1" @@ -5138,14 +5113,12 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "0.7.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cab0e7c02cf376875e9335e0ba1da535775beb5450d21e1dffca068818ed98b" +checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" dependencies = [ - "ansi_term", - "ctor", "diff", - "output_vt100", + "yansi", ] [[package]] @@ -8290,6 +8263,12 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "zcash_encoding" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 9c731f0fdf..56550fdbf0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -100,7 +100,7 @@ num-traits = "0.2.14" once_cell = "1.8.0" orion = "0.16.0" paste = "1.0.9" -pretty_assertions = "0.7.2" +pretty_assertions = "1.4.0" primitive-types = "0.12.1" proptest = "1.2.0" proptest-state-machine = "0.1.0" @@ -145,7 +145,8 @@ tracing-log = "0.1.2" tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} wasmparser = "0.107.0" winapi = "0.3.9" -zeroize = {version = "1.5.5", features = ["zeroize_derive"]} +yansi = "0.5.1" +zeroize = { version = "1.5.5", features = ["zeroize_derive"] } [patch.crates-io] # TODO temp patch for , and more tba. diff --git a/Makefile b/Makefile index d592fbcb09..74100d3e1b 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,9 @@ NAMADA_E2E_DEBUG ?= true RUST_BACKTRACE ?= 1 NAMADA_MASP_TEST_SEED ?= 0 PROPTEST_CASES ?= 100 +# Disable shrinking in `make test-pos-sm` for CI runs. If the test fail in CI, +# we only want to get the seed. +PROPTEST_MAX_SHRINK_ITERS ?= 0 cargo := $(env) cargo rustup := $(env) rustup @@ -211,11 +214,13 @@ test-debug: test-benches: $(cargo) +$(nightly) test --package namada_benchmarks --benches -# Run PoS state machine tests +# Run PoS state machine tests with shrinking disabled by default (can be +# overriden with `PROPTEST_MAX_SHRINK_ITERS`) test-pos-sm: cd proof_of_stake && \ - RUST_BACKTRACE=1 \ + RUST_BACKTRACE=1 \ PROPTEST_CASES=$(PROPTEST_CASES) \ + PROPTEST_MAX_SHRINK_ITERS=$(PROPTEST_MAX_SHRINK_ITERS) \ RUSTFLAGS='-C debuginfo=2 -C debug-assertions=true -C overflow-checks=true' \ cargo test pos_state_machine_test --release diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 135ff1e3c5..039e15747c 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -228,6 +228,7 @@ pub mod cmds { .subcommand(Bond::def().display_order(2)) .subcommand(Unbond::def().display_order(2)) .subcommand(Withdraw::def().display_order(2)) + .subcommand(Redelegate::def().display_order(2)) .subcommand(TxCommissionRateChange::def().display_order(2)) // Ethereum bridge transactions .subcommand(AddToEthBridgePool::def().display_order(3)) @@ -285,6 +286,7 @@ pub mod cmds { let bond = Self::parse_with_ctx(matches, Bond); let unbond = Self::parse_with_ctx(matches, Unbond); let withdraw = Self::parse_with_ctx(matches, Withdraw); + let redelegate = Self::parse_with_ctx(matches, Redelegate); let query_epoch = Self::parse_with_ctx(matches, QueryEpoch); let query_account = Self::parse_with_ctx(matches, QueryAccount); let query_transfers = Self::parse_with_ctx(matches, QueryTransfers); @@ -328,6 +330,7 @@ pub mod cmds { .or(bond) .or(unbond) .or(withdraw) + .or(redelegate) .or(add_to_eth_bridge_pool) .or(tx_update_steward_commission) .or(tx_resign_steward) @@ -402,6 +405,7 @@ pub mod cmds { Bond(Bond), Unbond(Unbond), Withdraw(Withdraw), + Redelegate(Redelegate), AddToEthBridgePool(AddToEthBridgePool), TxUpdateStewardCommission(TxUpdateStewardCommission), TxResignSteward(TxResignSteward), @@ -1425,6 +1429,27 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct Redelegate(pub args::Redelegate); + + impl SubCmd for Redelegate { + const CMD: &'static str = "redelegate"; + + fn parse(matches: &ArgMatches) -> Option { + matches + .subcommand_matches(Self::CMD) + .map(|matches| Redelegate(args::Redelegate::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Redelegate bonded tokens from one validator to another.", + ) + .add_args::>() + } + } + #[derive(Clone, Debug)] pub struct QueryEpoch(pub args::Query); @@ -2551,6 +2576,7 @@ pub mod args { pub const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; pub const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; pub const TX_UNJAIL_VALIDATOR_WASM: &str = "tx_unjail_validator.wasm"; + pub const TX_REDELEGATE_WASM: &str = "tx_redelegate.wasm"; pub const TX_UPDATE_VP_WASM: &str = "tx_update_vp.wasm"; pub const TX_UPDATE_STEWARD_COMMISSION: &str = "tx_update_steward_commission.wasm"; @@ -2581,7 +2607,7 @@ pub mod args { arg_default( "pool-gas-amount", DefaultFn(|| token::DenominatedAmount { - amount: token::Amount::default(), + amount: token::Amount::zero(), denom: NATIVE_MAX_DECIMAL_PLACES.into(), }), ); @@ -2614,6 +2640,8 @@ pub mod args { pub const DATA_PATH: Arg = arg("data-path"); pub const DECRYPT: ArgFlag = flag("decrypt"); pub const DISPOSABLE_SIGNING_KEY: ArgFlag = flag("disposable-gas-payer"); + pub const DESTINATION_VALIDATOR: Arg = + arg("destination-validator"); pub const DONT_ARCHIVE: ArgFlag = flag("dont-archive"); pub const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); pub const DRY_RUN_TX: ArgFlag = flag("dry-run"); @@ -2718,6 +2746,7 @@ pub mod args { pub const SOURCE: Arg = arg("source"); pub const SOURCE_OPT: ArgOpt = SOURCE.opt(); pub const STEWARD: Arg = arg("steward"); + pub const SOURCE_VALIDATOR: Arg = arg("source-validator"); pub const STORAGE_KEY: Arg = arg("storage-key"); pub const SUSPEND_ACTION: ArgFlag = flag("suspend"); pub const TIMEOUT_HEIGHT: ArgOpt = arg_opt("timeout-height"); @@ -4018,6 +4047,63 @@ pub mod args { } } + impl CliToSdk> for Redelegate { + fn to_sdk(self, ctx: &mut Context) -> Redelegate { + Redelegate:: { + tx: self.tx.to_sdk(ctx), + src_validator: ctx.get(&self.src_validator), + dest_validator: ctx.get(&self.dest_validator), + owner: ctx.get(&self.owner), + amount: self.amount, + tx_code_path: self.tx_code_path.to_path_buf(), + } + } + } + + impl Args for Redelegate { + fn parse(matches: &ArgMatches) -> Self { + let tx = Tx::parse(matches); + let src_validator = SOURCE_VALIDATOR.parse(matches); + let dest_validator = DESTINATION_VALIDATOR.parse(matches); + let owner = OWNER.parse(matches); + let amount = AMOUNT.parse(matches); + let amount = amount + .canonical() + .increase_precision(NATIVE_MAX_DECIMAL_PLACES.into()) + .unwrap_or_else(|e| { + println!("Could not parse bond amount: {:?}", e); + safe_exit(1); + }) + .amount; + let tx_code_path = PathBuf::from(TX_REDELEGATE_WASM); + Self { + tx, + src_validator, + dest_validator, + owner, + amount, + tx_code_path, + } + } + + fn def(app: App) -> App { + app.add_args::>() + .arg( + SOURCE_VALIDATOR + .def() + .help("Source validator address for the redelegation."), + ) + .arg(DESTINATION_VALIDATOR.def().help( + "Destination validator address for the redelegation.", + )) + .arg(OWNER.def().help( + "Delegator (owner) address of the bonds that are being \ + redelegated.", + )) + .arg(AMOUNT.def().help("Amount of tokens to redelegate.")) + } + } + impl CliToSdk> for InitProposal { fn to_sdk(self, ctx: &mut Context) -> InitProposal { InitProposal:: { diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index 1a7d9f534a..1a60172770 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -223,6 +223,20 @@ impl CliApi { tx::submit_withdraw::<_, IO>(&client, ctx, args) .await?; } + Sub::Redelegate(Redelegate(mut args)) => { + let client = client.unwrap_or_else(|| { + C::from_tendermint_address( + &mut args.tx.ledger_address, + ) + }); + client + .wait_until_node_is_synced::() + .await + .proceed_or_else(error)?; + let args = args.to_sdk(&mut ctx); + tx::submit_redelegate::<_, IO>(&client, ctx, args) + .await?; + } Sub::TxCommissionRateChange(TxCommissionRateChange( mut args, )) => { diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index d750ccc759..3885c6d1c9 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -30,7 +30,8 @@ use namada::core::ledger::pgf::parameters::PgfParameters; use namada::core::ledger::pgf::storage::steward::StewardDetail; use namada::ledger::events::Event; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; -use namada::ledger::pos::{CommissionPair, PosParams, Slash}; +use namada::ledger::pos::types::{CommissionPair, Slash}; +use namada::ledger::pos::PosParams; use namada::ledger::queries::RPC; use namada::ledger::storage::ConversionState; use namada::proof_of_stake::types::{ValidatorState, WeightedValidator}; @@ -1454,7 +1455,7 @@ pub async fn query_and_print_unbonds< let unbonds = query_unbond_with_slashing(client, source, validator).await; let current_epoch = query_epoch(client).await.unwrap(); - let mut total_withdrawable = token::Amount::default(); + let mut total_withdrawable = token::Amount::zero(); let mut not_yet_withdrawable = HashMap::::new(); for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { if withdraw_epoch <= current_epoch { @@ -1465,7 +1466,7 @@ pub async fn query_and_print_unbonds< *withdrawable_amount += amount; } } - if total_withdrawable != token::Amount::default() { + if !total_withdrawable.is_zero() { display_line!( IO, "Total withdrawable now: {}.", @@ -1538,7 +1539,7 @@ pub async fn query_bonds( bond.amount.to_string_native() )?; } - if details.bonds_total != token::Amount::zero() { + if !details.bonds_total.is_zero() { display_line!( IO, &mut w; @@ -2339,13 +2340,12 @@ pub async fn get_bond_amount_at( validator: &Address, epoch: Epoch, ) -> Option { - let (_total, total_active) = - unwrap_client_response::( - RPC.vp() - .pos() - .bond_with_slashing(client, delegator, validator, &Some(epoch)) - .await, - ); + let total_active = unwrap_client_response::( + RPC.vp() + .pos() + .bond_with_slashing(client, delegator, validator, &Some(epoch)) + .await, + ); Some(total_active) } diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index c8c42b190b..9633638700 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -1395,6 +1395,48 @@ where Ok(()) } +pub async fn submit_redelegate( + client: &C, + mut ctx: Context, + args: args::Redelegate, +) -> Result<(), error::Error> +where + C: namada::ledger::queries::Client + Sync, + C::Error: std::fmt::Display, +{ + let default_address = args.owner.clone(); + let default_signer = Some(default_address.clone()); + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + &mut ctx.wallet, + &args.tx, + Some(default_address), + default_signer, + ) + .await?; + + let mut tx = tx::build_redelegation::<_, _, _, IO>( + client, + &mut ctx.wallet, + &mut ctx.shielded, + args.clone(), + signing_data.fee_payer.clone(), + ) + .await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; + + if args.tx.dump_tx { + tx::dump_tx::(&args.tx, tx); + } else { + signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; + } + + Ok(()) +} + pub async fn submit_validator_commission_change( client: &C, mut ctx: Context, diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..00c3a077af 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -723,14 +723,12 @@ where let reward = fractional_claim * inflation; // Get validator data at the last epoch - let stake = read_validator_stake( + let stake = Dec::from(read_validator_stake( &self.wl_storage, ¶ms, &address, last_epoch, - )? - .map(Dec::from) - .unwrap_or_default(); + )?); let last_rewards_product = validator_rewards_products_handle(&address) .get(&self.wl_storage, &last_epoch)? @@ -1057,7 +1055,6 @@ mod test_finalize_block { use namada::ledger::pos::PosQueries; use namada::ledger::storage_api; use namada::ledger::storage_api::StorageWrite; - use namada::proof_of_stake::btree_set::BTreeSetShims; use namada::proof_of_stake::storage::{ is_validator_slashes_key, slashes_prefix, }; @@ -1912,7 +1909,6 @@ mod test_finalize_block { validator, Epoch::default(), ) - .unwrap() .unwrap(); let votes = vec![VoteInfo { @@ -1995,10 +1991,10 @@ mod test_finalize_block { let params = read_pos_params(&shell.wl_storage).unwrap(); - let val1 = validator_set.pop_first_shim().unwrap(); - let val2 = validator_set.pop_first_shim().unwrap(); - let val3 = validator_set.pop_first_shim().unwrap(); - let val4 = validator_set.pop_first_shim().unwrap(); + let val1 = validator_set.pop_first().unwrap(); + let val2 = validator_set.pop_first().unwrap(); + let val3 = validator_set.pop_first().unwrap(); + let val4 = validator_set.pop_first().unwrap(); let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) @@ -2831,15 +2827,13 @@ mod test_finalize_block { ¶ms, &val1.address, shell.wl_storage.storage.block.epoch, - )? - .unwrap(); + )?; let stake2 = read_validator_stake( &shell.wl_storage, ¶ms, &val2.address, shell.wl_storage.storage.block.epoch, - )? - .unwrap(); + )?; let total_stake = read_total_stake( &shell.wl_storage, ¶ms, @@ -2930,21 +2924,35 @@ mod test_finalize_block { ¶ms, &val1.address, pipeline_epoch, - )? - .unwrap(); + )?; let stake2 = read_validator_stake( &shell.wl_storage, ¶ms, &val2.address, pipeline_epoch, - )? - .unwrap(); + )?; let total_stake = read_total_stake(&shell.wl_storage, ¶ms, pipeline_epoch)?; - let expected_slashed = cubic_rate * initial_stake; - assert_eq!(stake1, initial_stake - expected_slashed); - assert_eq!(stake2, initial_stake - expected_slashed); + let expected_slashed = initial_stake.mul_ceil(cubic_rate); + + println!( + "Initial stake = {}\nCubic rate = {}\nExpected slashed = {}\n", + initial_stake.to_string_native(), + cubic_rate, + expected_slashed.to_string_native() + ); + + assert!( + (stake1.change() - (initial_stake - expected_slashed).change()) + .abs() + <= 1.into() + ); + assert!( + (stake2.change() - (initial_stake - expected_slashed).change()) + .abs() + <= 1.into() + ); assert_eq!(total_stake, total_initial_stake - 2u64 * expected_slashed); // Unjail one of the validators @@ -3019,7 +3027,6 @@ mod test_finalize_block { /// 4) Self-unbond 15_000 /// 5) Delegate 8_144 to validator /// 6) Discover misbehavior in epoch 3 - /// 7) Discover misbehavior in epoch 3 /// 7) Discover misbehavior in epoch 4 fn test_multiple_misbehaviors_by_num_vals( num_validators: u64, @@ -3046,7 +3053,7 @@ mod test_finalize_block { .read(&slash_balance_key) .expect("must be able to read") .unwrap_or_default(); - debug_assert_eq!(slash_pool_balance_init, token::Amount::default()); + debug_assert_eq!(slash_pool_balance_init, token::Amount::zero()); let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( @@ -3108,6 +3115,7 @@ mod test_finalize_block { &val1.address, self_unbond_1_amount, current_epoch, + false, ) .unwrap(); @@ -3117,8 +3125,7 @@ mod test_finalize_block { &val1.address, current_epoch + params.pipeline_len, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let total_stake = namada_proof_of_stake::read_total_stake( &shell.wl_storage, @@ -3151,6 +3158,7 @@ mod test_finalize_block { &val1.address, del_unbond_1_amount, current_epoch, + false, ) .unwrap(); @@ -3160,8 +3168,7 @@ mod test_finalize_block { &val1.address, current_epoch + params.pipeline_len, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let total_stake = namada_proof_of_stake::read_total_stake( &shell.wl_storage, ¶ms, @@ -3216,6 +3223,7 @@ mod test_finalize_block { &val1.address, self_unbond_2_amount, current_epoch, + false, ) .unwrap(); @@ -3392,8 +3400,7 @@ mod test_finalize_block { &val1.address, Epoch(10), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); assert_eq!( pre_stake_10, initial_stake + del_1_amount @@ -3426,16 +3433,14 @@ mod test_finalize_block { &val1.address, Epoch(3), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let val_stake_4 = namada_proof_of_stake::read_validator_stake( &shell.wl_storage, ¶ms, &val1.address, Epoch(4), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let tot_stake_3 = namada_proof_of_stake::read_total_stake( &shell.wl_storage, @@ -3477,31 +3482,33 @@ mod test_finalize_block { // Check the amount of stake deducted from the futuremost epoch while // processing the slashes - let post_stake_10 = namada_proof_of_stake::read_validator_stake( + let post_stake_10 = read_validator_stake( &shell.wl_storage, ¶ms, &val1.address, Epoch(10), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // The amount unbonded after the infraction that affected the deltas // before processing is `del_unbond_1_amount + self_bond_1_amount - // self_unbond_2_amount` (since this self-bond was enacted then unbonded // all after the infraction). Thus, the additional deltas to be // deducted is the (infraction stake - this) * rate let slash_rate_3 = std::cmp::min(Dec::one(), Dec::two() * cubic_rate); - let exp_slashed_during_processing_9 = slash_rate_3 - * (initial_stake + del_1_amount - - self_unbond_1_amount - - del_unbond_1_amount - + self_bond_1_amount - - self_unbond_2_amount); + let exp_slashed_during_processing_9 = (initial_stake + del_1_amount + - self_unbond_1_amount + - del_unbond_1_amount + + self_bond_1_amount + - self_unbond_2_amount) + .mul_ceil(slash_rate_3); assert!( ((pre_stake_10 - post_stake_10).change() - exp_slashed_during_processing_9.change()) .abs() - < Uint::from(1000) + < Uint::from(1000), + "Expected {}, got {} (with less than 1000 err)", + exp_slashed_during_processing_9.to_string_native(), + (pre_stake_10 - post_stake_10).to_string_native(), ); // Check that we can compute the stake at the pipeline epoch @@ -3518,7 +3525,11 @@ mod test_finalize_block { assert!( exp_pipeline_stake.abs_diff(&Dec::from(post_stake_10)) - <= Dec::new(1, NATIVE_MAX_DECIMAL_PLACES).unwrap() + <= Dec::new(2, NATIVE_MAX_DECIMAL_PLACES).unwrap(), + "Expected {}, got {} (with less than 2 err), diff {}", + exp_pipeline_stake, + post_stake_10.to_string_native(), + exp_pipeline_stake.abs_diff(&Dec::from(post_stake_10)), ); // Check the balance of the Slash Pool @@ -3535,15 +3546,6 @@ mod test_finalize_block { // ); // assert_eq!(slash_pool_balance, exp_slashed_3); - let _pre_stake_11 = namada_proof_of_stake::read_validator_stake( - &shell.wl_storage, - ¶ms, - &val1.address, - Epoch(10), - ) - .unwrap() - .unwrap_or_default(); - // Advance to epoch 10, where the infraction committed in epoch 4 will // be processed let votes = get_default_true_votes( @@ -3562,7 +3564,7 @@ mod test_finalize_block { // .unwrap_or_default(); // let exp_slashed_4 = if dec!(2) * cubic_rate >= Decimal::ONE { - // token::Amount::default() + // token::Amount::zero() // } else if dec!(3) * cubic_rate >= Decimal::ONE { // decimal_mult_amount( // Decimal::ONE - dec!(2) * cubic_rate, @@ -3587,19 +3589,27 @@ mod test_finalize_block { ¶ms, &val1.address, current_epoch + params.pipeline_len, - )? - .unwrap_or_default(); + )?; - let post_stake_11 = namada_proof_of_stake::read_validator_stake( + let post_stake_10 = read_validator_stake( &shell.wl_storage, ¶ms, &val1.address, Epoch(10), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); + + // Stake at current epoch should be equal to stake at pipeline + assert_eq!( + post_stake_10, + val_stake, + "Stake at pipeline in epoch {} ({}) expected to be equal to stake \ + in epoch 10 ({}).", + current_epoch + params.pipeline_len, + val_stake.to_string_native(), + post_stake_10.to_string_native() + ); - assert_eq!(post_stake_11, val_stake); // dbg!(&val_stake); // dbg!(pre_stake_10 - post_stake_10); @@ -3743,12 +3753,16 @@ mod test_finalize_block { self_details.unbonds[1].amount, self_unbond_2_amount - self_bond_1_amount ); - assert_eq!( - self_details.unbonds[1].slashed_amount, - Some( - std::cmp::min(Dec::one(), Dec::new(3, 0).unwrap() * cubic_rate) - * (self_unbond_2_amount - self_bond_1_amount) - ) + let rate = + std::cmp::min(Dec::one(), Dec::new(3, 0).unwrap() * cubic_rate); + assert!( + // at most off by 1 + (self_details.unbonds[1].slashed_amount.unwrap().change() + - (self_unbond_2_amount - self_bond_1_amount) + .mul_ceil(rate) + .change()) + .abs() + <= Uint::from(1) ); assert_eq!(self_details.unbonds[2].amount, self_bond_1_amount); assert_eq!(self_details.unbonds[2].slashed_amount, None); @@ -3766,10 +3780,12 @@ mod test_finalize_block { .unwrap(); let exp_del_withdraw_slashed_amount = - slash_rate_3 * del_unbond_1_amount; - assert_eq!( - del_withdraw, - del_unbond_1_amount - exp_del_withdraw_slashed_amount + del_unbond_1_amount.mul_ceil(slash_rate_3); + assert!( + (del_withdraw + - (del_unbond_1_amount - exp_del_withdraw_slashed_amount)) + .raw_amount() + <= Uint::one() ); // TODO: finish once implemented diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index cc79c9a9f0..1bcad1daaa 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -233,7 +233,7 @@ where source: delegator.clone(), validator: validator.clone(), }; - let (_, delegator_stake) = + let delegator_stake = bond_amount(storage, &bond_id, epoch).unwrap_or_default(); delegators_vote.insert(delegator.clone(), vote_data.into()); diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 3687a6d39b..96a10c087b 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -500,7 +500,6 @@ mod test_prepare_proposal { use namada::ledger::gas::Gas; use namada::ledger::pos::PosQueries; use namada::ledger::replay_protection; - use namada::proof_of_stake::btree_set::BTreeSetShims; use namada::proof_of_stake::types::WeightedValidator; use namada::proof_of_stake::{ consensus_validator_set_handle, @@ -916,8 +915,8 @@ mod test_prepare_proposal { .unwrap() .into_iter() .collect(); - let val1 = consensus_set.pop_first_shim().unwrap(); - let val2 = consensus_set.pop_first_shim().unwrap(); + let val1 = consensus_set.pop_first().unwrap(); + let val2 = consensus_set.pop_first().unwrap(); let pkh1 = get_pkh_from_address( &shell.wl_storage, ¶ms, diff --git a/benches/lib.rs b/benches/lib.rs index 47645abdf4..53a5c15e60 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -111,12 +111,17 @@ pub const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; pub const TX_UPDATE_ACCOUNT_WASM: &str = "tx_update_account.wasm"; pub const TX_VOTE_PROPOSAL_WASM: &str = "tx_vote_proposal.wasm"; pub const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; +pub const TX_REDELEGATE_WASM: &str = "tx_redelegate.wasm"; pub const TX_INIT_PROPOSAL_WASM: &str = "tx_init_proposal.wasm"; pub const TX_REVEAL_PK_WASM: &str = "tx_reveal_pk.wasm"; pub const TX_CHANGE_VALIDATOR_COMMISSION_WASM: &str = "tx_change_validator_commission.wasm"; pub const TX_IBC_WASM: &str = "tx_ibc.wasm"; pub const TX_UNJAIL_VALIDATOR_WASM: &str = "tx_unjail_validator.wasm"; +pub const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; +pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; +pub const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; + pub const VP_VALIDATOR_WASM: &str = "vp_validator.wasm"; pub const ALBERT_PAYMENT_ADDRESS: &str = "albert_payment"; diff --git a/benches/txs.rs b/benches/txs.rs index a1373c7931..d5d60f8eae 100644 --- a/benches/txs.rs +++ b/benches/txs.rs @@ -20,24 +20,24 @@ use namada::types::storage::Key; use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; -use namada::types::transaction::pos::{Bond, CommissionChange, Withdraw}; +use namada::types::transaction::pos::{ + Bond, CommissionChange, Redelegation, Withdraw, +}; use namada::types::transaction::EllipticCurve; use namada_apps::wallet::defaults; use namada_benches::{ generate_ibc_transfer_tx, generate_tx, BenchShell, BenchShieldedCtx, ALBERT_PAYMENT_ADDRESS, ALBERT_SPENDING_KEY, BERTHA_PAYMENT_ADDRESS, - TX_BOND_WASM, TX_CHANGE_VALIDATOR_COMMISSION_WASM, TX_INIT_PROPOSAL_WASM, + TX_BOND_WASM, TX_CHANGE_VALIDATOR_COMMISSION_WASM, TX_INIT_ACCOUNT_WASM, + TX_INIT_PROPOSAL_WASM, TX_INIT_VALIDATOR_WASM, TX_REDELEGATE_WASM, TX_REVEAL_PK_WASM, TX_UNBOND_WASM, TX_UNJAIL_VALIDATOR_WASM, - TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL_WASM, VP_VALIDATOR_WASM, + TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL_WASM, TX_WITHDRAW_WASM, + VP_VALIDATOR_WASM, }; use rand::rngs::StdRng; use rand::SeedableRng; use sha2::Digest; -const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; -const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; -const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; - // TODO: need to benchmark tx_bridge_pool.wasm fn transfer(c: &mut Criterion) { let mut group = c.benchmark_group("transfer"); @@ -286,6 +286,43 @@ fn withdraw(c: &mut Criterion) { group.finish(); } +fn redelegate(c: &mut Criterion) { + let mut group = c.benchmark_group("redelegate"); + + let redelegation = |dest_validator| { + generate_tx( + TX_REDELEGATE_WASM, + Redelegation { + src_validator: defaults::validator_address(), + dest_validator, + owner: defaults::albert_address(), + amount: Amount::from(1), + }, + None, + None, + Some(&defaults::albert_keypair()), + ) + }; + + group.bench_function("redelegate", |b| { + b.iter_batched_ref( + || { + let shell = BenchShell::default(); + // Find the other genesis validator + let current_epoch = shell.wl_storage.get_block_epoch().unwrap(); + let validators = namada::proof_of_stake::read_consensus_validator_set_addresses(&shell.inner.wl_storage, current_epoch).unwrap(); + let validator_2 = validators.into_iter().find(|addr| addr != &defaults::validator_address()).expect("There must be another validator to redelegate to"); + // Prepare the redelegation tx + (shell, redelegation(validator_2)) + }, + |(shell, tx)| shell.execute_tx(tx), + criterion::BatchSize::LargeInput, + ) + }); + + group.finish(); +} + fn reveal_pk(c: &mut Criterion) { let mut csprng = rand::rngs::OsRng {}; let new_implicit_account: common::SecretKey = @@ -687,6 +724,7 @@ criterion_group!( bond, unbond, withdraw, + redelegate, reveal_pk, update_vp, init_account, diff --git a/core/src/ledger/storage_api/collections/lazy_map.rs b/core/src/ledger/storage_api/collections/lazy_map.rs index 4f9aeb426d..8d30e2052c 100644 --- a/core/src/ledger/storage_api/collections/lazy_map.rs +++ b/core/src/ledger/storage_api/collections/lazy_map.rs @@ -1,6 +1,6 @@ //! Lazy map. -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::fmt::Debug; use std::hash::Hash; use std::marker::PhantomData; @@ -101,6 +101,107 @@ pub enum ValidationError { InvalidNestedSubKey(storage::Key), } +// pub trait EagerMapFromIter { +// fn from_iter(iter: I) -> Self +// where +// I: IntoIterator; +// } + +// impl EagerMapFromIter for HashMap { +// fn from_iter(iter: I) -> Self +// where +// I: IntoIterator, +// { +// iter.into_iter().collect() +// } +// } + +// impl EagerMapFromIter for BTreeMap { +// fn from_iter(iter: I) -> Self +// where +// K: Eq + Hash + Ord, +// I: IntoIterator, +// { +// iter.into_iter().collect() +// } +// } + +/// Trait used to facilitate collection of lazy maps into eager maps +pub trait Collectable { + /// The type of the value of the lazy map + type Collected; + + /// Collect the lazy map into an eager map + fn collect_map( + &self, + storage: &S, + ) -> storage_api::Result; +} + +impl Collectable for LazyMap +where + K: Hash + Eq + Clone + Debug + storage::KeySeg + Ord, + V: Collectable + LazyCollection + Debug, +{ + type Collected = BTreeMap; + + fn collect_map( + &self, + storage: &S, + ) -> storage_api::Result + where + S: StorageRead, + { + let mut map = BTreeMap::::new(); + for res in self.iter(storage)? { + let ( + NestedSubKey::Data { + key, + nested_sub_key: _, + }, + _, + ) = res?; + let next_layer = self.at(&key).collect_map(storage)?; + map.insert(key, next_layer); + } + Ok(map) + } +} + +impl Collectable for LazyMap +where + K: Hash + Eq + Clone + Debug + storage::KeySeg + Ord, + V: BorshSerialize + BorshDeserialize + Clone + Debug + 'static, +{ + type Collected = BTreeMap; + + fn collect_map( + &self, + storage: &S, + ) -> storage_api::Result + where + S: StorageRead, + { + let mut map = BTreeMap::::new(); + for res in self.iter(storage)? { + let (key, value) = res?; + map.insert(key, value); + } + Ok(map) + } +} + +// impl Collectable for V { +// type Collected = Self; + +// fn collect_map( +// &self, +// _storage: &S, +// ) -> storage_api::Result { +// Ok(self.clone()) +// } +// } + /// [`LazyMap`] validation result pub type ValidationResult = std::result::Result; @@ -359,14 +460,6 @@ impl LazyMap where K: storage::KeySeg, { - /// Returns whether the set contains a value. - pub fn contains(&self, storage: &S, key: &K) -> Result - where - S: StorageRead, - { - storage.has_key(&self.get_data_key(key)) - } - /// Get the prefix of set's elements storage fn get_data_prefix(&self) -> storage::Key { self.key.push(&DATA_SUBKEY.to_owned()).unwrap() @@ -392,6 +485,16 @@ where V::open(self.get_data_key(key)) } + /// Returns whether the nested map contains a certain key with data inside. + pub fn contains(&self, storage: &S, key: &K) -> Result + where + S: StorageRead, + { + let prefix = self.get_data_key(key); + let mut iter = storage_api::iter_prefix_bytes(storage, &prefix)?; + Ok(iter.next().is_some()) + } + /// Remove all map entries at a given key prefix pub fn remove_all(&self, storage: &mut S, key: &K) -> Result where @@ -505,6 +608,28 @@ where Self::read_key_val(storage, &data_key) } + /// Update a value at the given key with the given function. If no existing + /// value exists, the closure's argument will be `None`. + pub fn update(&self, storage: &mut S, key: K, f: F) -> Result<()> + where + S: StorageWrite + StorageRead, + F: FnOnce(Option) -> V, + { + let data_key = self.get_data_key(&key); + let current = Self::read_key_val(storage, &data_key)?; + let new = f(current); + Self::write_key_val(storage, &data_key, new)?; + Ok(()) + } + + /// Returns whether the map contains a key with a value. + pub fn contains(&self, storage: &S, key: &K) -> Result + where + S: StorageRead, + { + storage.has_key(&self.get_data_key(key)) + } + /// Returns whether the map contains no elements. pub fn is_empty(&self, storage: &S) -> Result where @@ -553,6 +678,19 @@ where })) } + // /// Collect the lazy map into an eager map + // pub fn collect(&self, storage: &S) -> Result + // where + // S: StorageRead, + // M: EagerMapFromIter, + // K: Eq + Hash + Ord, + // { + // let it = self + // .iter(storage)? + // .map(|res| res.expect("Failed to unwrap a lazy map element")); + // Ok(M::from_iter(it)) + // } + /// Reads a value from storage fn read_key_val( storage: &S, @@ -619,6 +757,14 @@ mod test { assert_eq!(lazy_map.get(&storage, &key)?.unwrap(), val); assert_eq!(lazy_map.get(&storage, &key2)?.unwrap(), val2); + let eager_map: BTreeMap<_, _> = lazy_map.collect_map(&storage)?; + assert_eq!( + eager_map, + vec![(123, "Test".to_string()), (456, "Test2".to_string())] + .into_iter() + .collect::>() + ); + // Remove the values and check the map contents let removed = lazy_map.remove(&mut storage, &key)?.unwrap(); assert_eq!(removed, val); @@ -650,6 +796,20 @@ mod test { Some(SubKey::Data(key2)) ); + // Try to update a key that doesn't yet exist. + let updated_val = "updated"; + lazy_map.update(&mut storage, key, |current| { + assert!(current.is_none()); + updated_val.to_string() + })?; + // Try to update a key that exists. + let updated_val_2 = "updated again"; + lazy_map.update(&mut storage, key, |current| { + assert_eq!(¤t.unwrap_or_default(), updated_val); + updated_val_2.to_string() + })?; + assert_eq!(&lazy_map.get(&storage, &key)?.unwrap(), updated_val_2); + Ok(()) } @@ -780,6 +940,7 @@ mod test { nested_map.at(&0).get(&storage, &"string2".to_string())?, None ); + assert!(nested_map.contains(&storage, &0)?); // Insert more values nested_map @@ -789,6 +950,9 @@ mod test { .at(&0) .insert(&mut storage, "string2".to_string(), 300)?; + assert!(nested_map.contains(&storage, &0)?); + assert!(nested_map.contains(&storage, &1)?); + let mut it = nested_map.iter(&storage)?; let ( NestedSubKey::Data { @@ -852,6 +1016,8 @@ mod test { assert_eq!(nested_map.at(&0).len(&storage)?, 0_u64); assert_eq!(nested_map.at(&1).len(&storage)?, 1_u64); assert_eq!(nested_map.iter(&storage)?.count(), 1); + assert!(!nested_map.contains(&storage, &0)?); + assert!(nested_map.contains(&storage, &1)?); // Start removing elements let rem = nested_map @@ -899,4 +1065,56 @@ mod test { assert!(!nested_map.contains(&storage, &1).unwrap()); assert!(nested_map.is_empty(&storage).unwrap()); } + + #[test] + fn test_lazy_map_collection() { + let mut storage = TestWlStorage::default(); + let key_s = storage::Key::parse("testing_simple").unwrap(); + let key_n = storage::Key::parse("testing_nested").unwrap(); + + let simple = LazyMap::::open(key_s); + simple + .insert(&mut storage, "bartle".to_string(), 5) + .unwrap(); + simple.insert(&mut storage, "doo".to_string(), 4).unwrap(); + + let nested_map = NestedMap::>::open(key_n); + nested_map + .at(&0) + .insert(&mut storage, "dingus".to_string(), 5) + .unwrap(); + nested_map + .at(&0) + .insert(&mut storage, "zingus".to_string(), 3) + .unwrap(); + nested_map + .at(&1) + .insert(&mut storage, "dingus".to_string(), 4) + .unwrap(); + + let exp_simple = + vec![("bartle".to_string(), 5), ("doo".to_string(), 4)] + .into_iter() + .collect::>(); + let mut exp_nested: BTreeMap> = + BTreeMap::new(); + exp_nested + .entry(0) + .or_default() + .insert("dingus".to_string(), 5); + exp_nested + .entry(0) + .or_default() + .insert("zingus".to_string(), 3); + exp_nested + .entry(1) + .or_default() + .insert("dingus".to_string(), 4); + + let simple_eager = simple.collect_map(&storage).unwrap(); + let nested_eager = nested_map.collect_map(&storage).unwrap(); + + assert_eq!(exp_simple, simple_eager); + assert_eq!(exp_nested, nested_eager); + } } diff --git a/core/src/ledger/storage_api/error.rs b/core/src/ledger/storage_api/error.rs index f99539bc87..5644bc0a1a 100644 --- a/core/src/ledger/storage_api/error.rs +++ b/core/src/ledger/storage_api/error.rs @@ -63,6 +63,33 @@ impl Error { { Self::CustomWithMessage(msg, CustomError(error.into())) } + + /// Attempt to downgrade the inner error to `E` if any. + /// + /// If this [`enum@Error`] was constructed via [`new`] or [`wrap`] then this + /// function will attempt to perform downgrade on it, otherwise it will + /// return [`Err`]. + /// + /// [`new`]: Error::new + /// [`wrap`]: Error::wrap + /// + /// To match on the inner error type when the downcast is successful, you'll + /// typically want to [`std::ops::Deref::deref`] it out of the [`Box`]. + pub fn downcast(self) -> std::result::Result, Self> + where + E: std::error::Error + Send + Sync + 'static, + { + match self { + Self::Custom(CustomError(b)) + | Self::CustomWithMessage(_, CustomError(b)) + if b.is::() => + { + let res = b.downcast::(); + Ok(res.unwrap()) + } + _ => Err(self), + } + } } /// A custom error diff --git a/core/src/ledger/storage_api/token.rs b/core/src/ledger/storage_api/token.rs index 02adcc32be..2281dc5706 100644 --- a/core/src/ledger/storage_api/token.rs +++ b/core/src/ledger/storage_api/token.rs @@ -171,7 +171,7 @@ where amount } None => { - storage.write(&key, token::Amount::default())?; + storage.write(&key, token::Amount::zero())?; balance } }; diff --git a/core/src/types/dec.rs b/core/src/types/dec.rs index abc80c618c..40494ffad0 100644 --- a/core/src/types/dec.rs +++ b/core/src/types/dec.rs @@ -4,7 +4,8 @@ //! precision. use std::fmt::{Debug, Display, Formatter}; -use std::ops::{Add, AddAssign, Div, Mul, Sub}; +use std::iter::Sum; +use std::ops::{Add, AddAssign, Div, Mul, Neg, Sub}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; @@ -330,6 +331,12 @@ impl AddAssign for Dec { } } +impl Sum for Dec { + fn sum>(iter: I) -> Self { + iter.fold(Dec::default(), |acc, next| acc + next) + } +} + impl Sub for Dec { type Output = Self; @@ -409,6 +416,14 @@ impl Div for Dec { } } +impl Neg for Dec { + type Output = Self; + + fn neg(self) -> Self::Output { + Self(self.0.neg()) + } +} + impl Display for Dec { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let is_neg = self.is_negative(); diff --git a/core/src/types/token.rs b/core/src/types/token.rs index 0ee60b4326..874992ada5 100644 --- a/core/src/types/token.rs +++ b/core/src/types/token.rs @@ -148,6 +148,7 @@ impl Amount { } /// Checked subtraction. Returns `None` on underflow. + #[must_use] pub fn checked_sub(&self, amount: Amount) -> Option { self.raw .checked_sub(amount.raw) @@ -248,6 +249,26 @@ impl Amount { pub fn from_string_precise(string: &str) -> Result { DenominatedAmount::from_str(string).map(|den| den.amount) } + + /// Multiply by a decimal [`Dec`] with the result rounded up. + /// + /// # Panics + /// Panics when the `dec` is negative. + #[must_use] + pub fn mul_ceil(&self, dec: Dec) -> Self { + assert!(!dec.is_negative()); + let tot = self.raw * dec.abs(); + let denom = Uint::from(10u64.pow(POS_DECIMAL_PRECISION as u32)); + let floor_div = tot / denom; + let rem = tot % denom; + // dbg!(tot, denom, floor_div, rem); + let raw = if !rem.is_zero() { + floor_div + Self::from(1_u64) + } else { + floor_div + }; + Self { raw } + } } /// Given a number represented as `M*B^D`, then @@ -1159,6 +1180,17 @@ mod tests { let non_zero = Amount::from_uint(1, 0).expect("Test failed"); assert!(!non_zero.is_zero()); } + + #[test] + fn test_token_amount_mul_ceil() { + let one = Amount::from(1); + let two = Amount::from(2); + let three = Amount::from(3); + let dec = Dec::from_str("0.34").unwrap(); + assert_eq!(one.mul_ceil(dec), one); + assert_eq!(two.mul_ceil(dec), one); + assert_eq!(three.mul_ceil(dec), two); + } } /// Helpers for testing with addresses. diff --git a/core/src/types/transaction/pos.rs b/core/src/types/transaction/pos.rs index fa0e3d0891..e3ea9d3a21 100644 --- a/core/src/types/transaction/pos.rs +++ b/core/src/types/transaction/pos.rs @@ -95,6 +95,30 @@ pub struct Withdraw { pub source: Option
, } +/// A redelegation of bonded tokens from one validator to another. +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Hash, + Eq, + Serialize, + Deserialize, +)] +pub struct Redelegation { + /// Source validator address + pub src_validator: Address, + /// Destination validator address + pub dest_validator: Address, + /// Owner (delegator) of the bonds to be redelegate + pub owner: Address, + /// The amount of tokens + pub amount: token::Amount, +} + /// A change to the validator commission rate. #[derive( Debug, diff --git a/core/src/types/uint.rs b/core/src/types/uint.rs index ee14e67ad1..db4e664c8b 100644 --- a/core/src/types/uint.rs +++ b/core/src/types/uint.rs @@ -11,6 +11,7 @@ use num_integer::Integer; use num_traits::CheckedMul; use uint::construct_uint; +use super::dec::{Dec, POS_DECIMAL_PRECISION}; use crate::types::token; use crate::types::token::{Amount, AmountParseError, MaspDenom}; @@ -337,6 +338,22 @@ impl I256 { Err(AmountParseError::InvalidRange) } } + + /// Multiply by a decimal [`Dec`] with the result rounded up. + #[must_use] + pub fn mul_ceil(&self, dec: Dec) -> Self { + let is_res_negative = self.is_negative() ^ dec.is_negative(); + let tot = self.abs() * dec.0.abs(); + let denom = Uint::from(10u64.pow(POS_DECIMAL_PRECISION as u32)); + let floor_div = tot / denom; + let rem = tot % denom; + let abs_res = Self(if !rem.is_zero() && !is_res_negative { + floor_div + Uint::from(1_u64) + } else { + floor_div + }); + if is_res_negative { -abs_res } else { abs_res } + } } impl From for I256 { @@ -554,6 +571,8 @@ impl TryFrom for i128 { #[cfg(test)] mod test_uint { + use std::str::FromStr; + use super::*; /// Test that dividing two [`Uint`]s with the specified precision @@ -710,4 +729,20 @@ mod test_uint { let amount: Result = serde_json::from_str(r#""1000000000.2""#); assert!(amount.is_err()); } + + #[test] + fn test_i256_mul_ceil() { + let one = I256::from(1); + let two = I256::from(2); + let dec = Dec::from_str("0.25").unwrap(); + assert_eq!(one.mul_ceil(dec), one); + assert_eq!(two.mul_ceil(dec), one); + assert_eq!(I256::from(4).mul_ceil(dec), one); + assert_eq!(I256::from(5).mul_ceil(dec), two); + + assert_eq!((-one).mul_ceil(-dec), one); + + assert_eq!((-one).mul_ceil(dec), I256::zero()); + assert_eq!(one.mul_ceil(-dec), I256::zero()); + } } diff --git a/ethereum_bridge/src/test_utils.rs b/ethereum_bridge/src/test_utils.rs index 9c24e9edfa..5a4e014253 100644 --- a/ethereum_bridge/src/test_utils.rs +++ b/ethereum_bridge/src/test_utils.rs @@ -8,6 +8,7 @@ use namada_core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; use namada_core::ledger::eth_bridge::storage::whitelist; use namada_core::ledger::storage::mockdb::MockDBWriteBatch; use namada_core::ledger::storage::testing::{TestStorage, TestWlStorage}; +use namada_core::ledger::storage_api::token::credit_tokens; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::types::address::{self, wnam, Address}; use namada_core::types::dec::Dec; @@ -20,7 +21,8 @@ use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::pos_queries::PosQueries; use namada_proof_of_stake::types::GenesisValidator; use namada_proof_of_stake::{ - become_validator, bond_tokens, store_total_consensus_stake, BecomeValidator, + become_validator, bond_tokens, staking_token_address, + store_total_consensus_stake, BecomeValidator, }; use crate::parameters::{ @@ -263,6 +265,8 @@ pub fn append_validators_to_storage( let mut all_keys = HashMap::new(); let params = wl_storage.pos_queries().get_pos_params(); + let staking_token = staking_token_address(wl_storage); + for (validator, stake) in consensus_validators { let keys = TestValidatorKeys::generate(); @@ -282,6 +286,8 @@ pub fn append_validators_to_storage( max_commission_rate_change: Dec::new(1, 2).unwrap(), }) .expect("Test failed"); + credit_tokens(wl_storage, &staking_token, &validator, stake) + .expect("Test failed"); bond_tokens(wl_storage, None, &validator, stake, current_epoch) .expect("Test failed"); diff --git a/proof_of_stake/Cargo.toml b/proof_of_stake/Cargo.toml index a5b407df36..5506ec5174 100644 --- a/proof_of_stake/Cargo.toml +++ b/proof_of_stake/Cargo.toml @@ -33,8 +33,12 @@ tracing.workspace = true [dev-dependencies] namada_core = {path = "../core", features = ["testing"]} +assert_matches.workspace = true itertools.workspace = true proptest.workspace = true proptest-state-machine.workspace = true test-log.workspace = true tracing-subscriber.workspace = true +pretty_assertions.workspace = true +derivative.workspace = true +yansi.workspace = true diff --git a/proof_of_stake/proptest-regressions/tests/state_machine.txt b/proof_of_stake/proptest-regressions/tests/state_machine.txt index 4c02bc0ede..341ba3ff3d 100644 --- a/proof_of_stake/proptest-regressions/tests/state_machine.txt +++ b/proof_of_stake/proptest-regressions/tests/state_machine.txt @@ -4,5 +4,3 @@ # # It is recommended to check this file in to source control so that # everyone who runs the test benefits from these saved cases. -cc 3076c8509d56c546d5915febcf429f218ab79a7bac34c75c288f531b88110bc3 # shrinks to (initial_state, transitions) = (AbstractPosState { epoch: Epoch(0), params: PosParams { max_validator_slots: 4, pipeline_len: 2, unbonding_len: 4, tm_votes_per_token: 0.0614, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001, cubic_slashing_window_length: 1 }, genesis_validators: [GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { micro: 9185807 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6, tokens: Amount { micro: 5025206 }, consensus_key: Ed25519(PublicKey(VerificationKey("17888c2ca502371245e5e35d5bcf35246c3bc36878e859938c9ead3c54db174f"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc, tokens: Amount { micro: 4424807 }, consensus_key: Ed25519(PublicKey(VerificationKey("478243aed376da313d7cf3a60637c264cb36acc936efb341ff8d3d712092d244"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, tokens: Amount { micro: 4119410 }, consensus_key: Ed25519(PublicKey(VerificationKey("c5bbbb60e412879bbec7bb769804fa8e36e68af10d5477280b63deeaca931bed"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, tokens: Amount { micro: 3619078 }, consensus_key: Ed25519(PublicKey(VerificationKey("4f44e6c7bdfed3d9f48d86149ee3d29382cae8c83ca253e06a70be54a301828b"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, tokens: Amount { micro: 2691447 }, consensus_key: Ed25519(PublicKey(VerificationKey("ff87a0b0a3c7c0ce827e9cada5ff79e75a44a0633bfcb5b50f99307ddb26b337"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, tokens: Amount { micro: 224944 }, consensus_key: Ed25519(PublicKey(VerificationKey("191fc38f134aaf1b7fdb1f86330b9d03e94bd4ba884f490389de964448e89b3f"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, tokens: Amount { micro: 142614 }, consensus_key: Ed25519(PublicKey(VerificationKey("e2e8aa145e1ec5cb01ebfaa40e10e12f0230c832fd8135470c001cb86d77de00"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }], bonds: {BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }: {Epoch(0): 142614}, BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }: {Epoch(0): 4119410}, BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }: {Epoch(0): 9185807}, BondId { source: Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6, validator: Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6 }: {Epoch(0): 5025206}, BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }: {Epoch(0): 2691447}, BondId { source: Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc, validator: Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc }: {Epoch(0): 4424807}, BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }: {Epoch(0): 224944}, BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }: {Epoch(0): 3619078}}, validator_stakes: {Epoch(0): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 142614, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 4119410, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 9185807, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: 5025206, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 2691447, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: 4424807, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 224944, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3619078}, Epoch(1): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 142614, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 4119410, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 9185807, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: 5025206, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 2691447, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: 4424807, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 224944, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3619078}, Epoch(2): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 142614, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 4119410, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 9185807, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: 5025206, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 2691447, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: 4424807, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 224944, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3619078}}, consensus_set: {Epoch(0): {Amount { micro: 4119410 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 4424807 }: [Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc], Amount { micro: 5025206 }: [Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6], Amount { micro: 9185807 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(1): {Amount { micro: 4119410 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 4424807 }: [Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc], Amount { micro: 5025206 }: [Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6], Amount { micro: 9185807 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(2): {Amount { micro: 4119410 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 4424807 }: [Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc], Amount { micro: 5025206 }: [Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6], Amount { micro: 9185807 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}}, below_capacity_set: {Epoch(0): {ReverseOrdTokenAmount(Amount { micro: 142614 }): [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6], ReverseOrdTokenAmount(Amount { micro: 224944 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { micro: 2691447 }): [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], ReverseOrdTokenAmount(Amount { micro: 3619078 }): [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd]}, Epoch(1): {ReverseOrdTokenAmount(Amount { micro: 142614 }): [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6], ReverseOrdTokenAmount(Amount { micro: 224944 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { micro: 2691447 }): [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], ReverseOrdTokenAmount(Amount { micro: 3619078 }): [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd]}, Epoch(2): {ReverseOrdTokenAmount(Amount { micro: 142614 }): [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6], ReverseOrdTokenAmount(Amount { micro: 224944 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { micro: 2691447 }): [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], ReverseOrdTokenAmount(Amount { micro: 3619078 }): [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd]}}, validator_states: {Epoch(0): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: BelowCapacity, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: BelowCapacity, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: BelowCapacity}, Epoch(1): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: BelowCapacity, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: BelowCapacity, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: BelowCapacity}, Epoch(2): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: BelowCapacity, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: BelowCapacity, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: BelowCapacity}}, unbonds: {}, validator_slashes: {}, enqueued_slashes: {}, validator_last_slash_epochs: {}, unbond_records: {} }, [InitValidator { address: Established: atest1v4ehgw36xgunxvj9xqmny3jyxycnzdzxxqeng33ngvunqsfsx5mnwdfjgvenvwfk89prwdpjd0cjrk, consensus_key: Ed25519(PublicKey(VerificationKey("bea04de1e5be8ca0ae27be8ad935df8d757e96c1e067e96aedeba0ded0df997d"))), commission_rate: 0.39428, max_commission_rate_change: 0.12485 }]) -cc c0ffe7b368967ea0c456da20046f7d8a78c232c066ea116d3a123c945b7882fb # shrinks to (initial_state, transitions) = (AbstractPosState { epoch: Epoch(0), params: PosParams { max_validator_slots: 4, pipeline_len: 2, unbonding_len: 7, tm_votes_per_token: Dec(900700.000000), block_proposer_reward: Dec(125000.000000), block_vote_reward: Dec(100000.000000), max_inflation_rate: Dec(100000.000000), target_staked_ratio: Dec(666700.000000), duplicate_vote_min_slash_rate: Dec(1000.000000), light_client_attack_min_slash_rate: Dec(1000.000000), cubic_slashing_window_length: 1 }, genesis_validators: [GenesisValidator { address: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, tokens: Amount { raw: 8937727 }, consensus_key: Ed25519(PublicKey(VerificationKey("e2e8aa145e1ec5cb01ebfaa40e10e12f0230c832fd8135470c001cb86d77de00"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }, GenesisValidator { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, tokens: Amount { raw: 8738693 }, consensus_key: Ed25519(PublicKey(VerificationKey("ff87a0b0a3c7c0ce827e9cada5ff79e75a44a0633bfcb5b50f99307ddb26b337"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }, GenesisValidator { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, tokens: Amount { raw: 8373784 }, consensus_key: Ed25519(PublicKey(VerificationKey("c5bbbb60e412879bbec7bb769804fa8e36e68af10d5477280b63deeaca931bed"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }, GenesisValidator { address: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, tokens: Amount { raw: 3584214 }, consensus_key: Ed25519(PublicKey(VerificationKey("4f44e6c7bdfed3d9f48d86149ee3d29382cae8c83ca253e06a70be54a301828b"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }, GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { raw: 553863 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }, GenesisValidator { address: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, tokens: Amount { raw: 218044 }, consensus_key: Ed25519(PublicKey(VerificationKey("191fc38f134aaf1b7fdb1f86330b9d03e94bd4ba884f490389de964448e89b3f"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }], bonds: {BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }: {Epoch(0): 8.937727}, BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }: {Epoch(0): 8.373784}, BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }: {Epoch(0): 0.553863}, BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }: {Epoch(0): 8.738693}, BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }: {Epoch(0): 0.218044}, BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }: {Epoch(0): 3.584214}}, validator_stakes: {Epoch(0): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 8.937727, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 8.373784, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 0.553863, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 8.738693, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 0.218044, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3.584214}, Epoch(1): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 8.937727, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 8.373784, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 0.553863, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 8.738693, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 0.218044, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3.584214}, Epoch(2): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 8.937727, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 8.373784, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 0.553863, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 8.738693, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 0.218044, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3.584214}}, consensus_set: {Epoch(0): {Amount { raw: 3584214 }: [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd], Amount { raw: 8373784 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { raw: 8738693 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { raw: 8937727 }: [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6]}, Epoch(1): {Amount { raw: 3584214 }: [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd], Amount { raw: 8373784 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { raw: 8738693 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { raw: 8937727 }: [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6]}, Epoch(2): {Amount { raw: 3584214 }: [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd], Amount { raw: 8373784 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { raw: 8738693 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { raw: 8937727 }: [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6]}}, below_capacity_set: {Epoch(0): {ReverseOrdTokenAmount(Amount { raw: 218044 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { raw: 553863 }): [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(1): {ReverseOrdTokenAmount(Amount { raw: 218044 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { raw: 553863 }): [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(2): {ReverseOrdTokenAmount(Amount { raw: 218044 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { raw: 553863 }): [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}}, validator_states: {Epoch(0): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: BelowCapacity, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: Consensus}, Epoch(1): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: BelowCapacity, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: Consensus}, Epoch(2): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: BelowCapacity, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: Consensus}}, unbonds: {}, validator_slashes: {}, enqueued_slashes: {}, validator_last_slash_epochs: {}, unbond_records: {} }, [Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 267 } }, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 7610143 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 9863718 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 7102818 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 63132 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 9663084 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 2694963 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 7453740 } }, NextEpoch, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 14974324 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2628172 } }, NextEpoch, NextEpoch, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 282055 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 11228090 } }, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 2027105 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2034080 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 3329590 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 854661 } }, Misbehavior { address: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, slash_type: DuplicateVote, infraction_epoch: Epoch(1), height: 0 }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 227931 } }, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 2701887 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 1776100 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 3717491 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 5281559 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 2426117 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 2005749 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 7883312 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 7300122 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 3388459 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 195542 } }, NextEpoch, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2251455 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 1237777 } }, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 691613 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 1244599 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2645543 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 8384136 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 590662 } }, NextEpoch, InitValidator { address: Established: atest1v4ehgw368qcrqd2ygvmyyvf4g9qnvv3kxucrwv3hxg6ryve4x56r233cxucnysjrxsmygdj9yer4pz, consensus_key: Ed25519(PublicKey(VerificationKey("afa2335747c0249f66eca84e88fba1a0e3ccec6a8f6f97f3177a42ffbb216492"))), commission_rate: Dec(195450.000000), max_commission_rate_change: Dec(954460.000000) }, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 1687952 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 12754717 } }, Misbehavior { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, slash_type: LightClientAttack, infraction_epoch: Epoch(4), height: 0 }, Bond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 8952712 } }, NextEpoch, Withdraw { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 519835 } }, UnjailValidator { address: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 2207493 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 236124 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 71122 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 1158688 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 267618 } }, InitValidator { address: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch, consensus_key: Ed25519(PublicKey(VerificationKey("822cfec1ec829a50306424ac3d11115e880b952f5f54ac9a624277898991ee70"))), commission_rate: Dec(614520.000000), max_commission_rate_change: Dec(369920.000000) }, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 8634884 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 8660668 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 8436873 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 515615 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 46481 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 4153966 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 2272563 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g5eyzwf3xqc5gwzxg3pnq3jpgsenxwp3x56rjvz9x5crwsf3gerrgwphxqen2sjz4hscvd, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 7491749 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 1921487 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 8316111 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 11873152 } }, NextEpoch, Withdraw { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv } }, Withdraw { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk } }, Withdraw { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 4728535 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g5eyzwf3xqc5gwzxg3pnq3jpgsenxwp3x56rjvz9x5crwsf3gerrgwphxqen2sjz4hscvd, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 2828807 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g5eyzwf3xqc5gwzxg3pnq3jpgsenxwp3x56rjvz9x5crwsf3gerrgwphxqen2sjz4hscvd, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 655500 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 234416 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 330322 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 222600 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 2538059 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 168498 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 510701 } }, Misbehavior { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, slash_type: DuplicateVote, infraction_epoch: Epoch(8), height: 0 }, InitValidator { address: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r, consensus_key: Ed25519(PublicKey(VerificationKey("afc853489cf37abedeb6a97d036f3dc60934194af7169a2cc15fb3f85e4e287c"))), commission_rate: Dec(52690.000000), max_commission_rate_change: Dec(56470.000000) }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 7098849 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 2180088 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 243441 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 1621261 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 7650954 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 1201023 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 9702706 } }, InitValidator { address: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, consensus_key: Ed25519(PublicKey(VerificationKey("f8506f129faaf3bac1397ad0ab3bfa6d1a00d5c1064c4fafe740f2844be8fb04"))), commission_rate: Dec(575190.000000), max_commission_rate_change: Dec(602710.000000) }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 347187 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 5536481 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36xc6nvvf4g9znxvf3xdrrgvfexuen2dek8qmnqse58q6ygdpkxeznz3j9xyeyydfht747xe, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 1859243 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 1907757 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 3007741 } }, Misbehavior { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, slash_type: DuplicateVote, infraction_epoch: Epoch(9), height: 0 }, Bond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 8226972 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 602759 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 8350223 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 3787232 } }, InitValidator { address: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595, consensus_key: Ed25519(PublicKey(VerificationKey("0b88c50c1b9b5b1e83c89110e388908dc3cc18ce0551494ab1c82bece24b2714"))), commission_rate: Dec(674000.000000), max_commission_rate_change: Dec(247230.000000) }, Bond { id: BondId { source: Established: atest1v4ehgw36gdp52wp4xv6yyd3nx9pnysfn89znjsen8quyvwfkgycnjs29x9ryxveh8prygsfecye5dj, validator: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r }, amount: Amount { raw: 1391049 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36gve5zdf4gccygv6zxgcnxwzrgv65x32rg4zrxv34g9prvs2pxqmnzve5xvuns33czq9awp, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 4008194 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368pq5g3f3gceygvpjxuenyveexary2wzx8ycnw3zpg9zrvvp4xger2dzyxuunwvjz4n93ww, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 9368360 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 9140634 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 600383 } }, Misbehavior { address: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, slash_type: DuplicateVote, infraction_epoch: Epoch(7), height: 0 }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 8599835 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 345454 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 12448069 } }, NextEpoch, Withdraw { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 5151682 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 1862578 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 10904134 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368pq5g3f3gceygvpjxuenyveexary2wzx8ycnw3zpg9zrvvp4xger2dzyxuunwvjz4n93ww, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 773655 } }, Bond { id: BondId { source: Implicit: atest1d9khqw3689rrqdp58pznydecgyu5xs3cxdznvd6xxsmng32zxumrxvpj8qenydejgfzygwzxlu6r7s, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 8927299 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 1288039 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2861830 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 445593 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368pq5g3f3gceygvpjxuenyveexary2wzx8ycnw3zpg9zrvvp4xger2dzyxuunwvjz4n93ww, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 8204875 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 602527 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 5812026 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw3689rrqdp58pznydecgyu5xs3cxdznvd6xxsmng32zxumrxvpj8qenydejgfzygwzxlu6r7s, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 211165 } }, NextEpoch, Bond { id: BondId { source: Implicit: atest1d9khqw36xsun2decx9p52v2xg5cr2vphxym5vve58yerqve5x5c5yve3gepyzs3ngycy233eufckzz, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 350302 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 4560437 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 3515009 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch, validator: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch }, amount: Amount { raw: 4956849 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xsun2decx9p52v2xg5cr2vphxym5vve58yerqve5x5c5yve3gepyzs3ngycy233eufckzz, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 290427 } }, NextEpoch, Unbond { id: BondId { source: Implicit: atest1d9khqw36gve5zdf4gccygv6zxgcnxwzrgv65x32rg4zrxv34g9prvs2pxqmnzve5xvuns33czq9awp, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 3261985 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch, validator: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch }, amount: Amount { raw: 8946479 } }, Withdraw { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 } }, NextEpoch, InitValidator { address: Established: atest1v4ehgw36gvcrgdeex5ensvfkgccyxve3x3pnys6xxpzr2s6rxuurv3j9g4pyysjzxq6ygdzyt2wxa3, consensus_key: Ed25519(PublicKey(VerificationKey("a856fc650a2404e2d0c152d89c1c221bd9056a6103980e1d821b0cbae213ff44"))), commission_rate: Dec(324920.000000), max_commission_rate_change: Dec(512260.000000) }, Withdraw { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 82795 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 128956 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 2043203 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 6764953 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g5eyzwf3xqc5gwzxg3pnq3jpgsenxwp3x56rjvz9x5crwsf3gerrgwphxqen2sjz4hscvd, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 6413168 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368pq5g3f3gceygvpjxuenyveexary2wzx8ycnw3zpg9zrvvp4xger2dzyxuunwvjz4n93ww, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 6384185 } }, Misbehavior { address: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595, slash_type: LightClientAttack, infraction_epoch: Epoch(13), height: 0 }, Bond { id: BondId { source: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 8314982 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36xscrsve3geqnwd2x8qmrzwpe89z5zsekgvenqwp5x4p5ydzp8qmrz3zpgcmnydjptyfc40, validator: Established: atest1v4ehgw36gvcrgdeex5ensvfkgccyxve3x3pnys6xxpzr2s6rxuurv3j9g4pyysjzxq6ygdzyt2wxa3 }, amount: Amount { raw: 9139532 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 34693 } }, Bond { id: BondId { source: Implicit: atest1d9khqw3689rrqdp58pznydecgyu5xs3cxdznvd6xxsmng32zxumrxvpj8qenydejgfzygwzxlu6r7s, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 9487215 } }, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 799953 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xscrsve3geqnwd2x8qmrzwpe89z5zsekgvenqwp5x4p5ydzp8qmrz3zpgcmnydjptyfc40, validator: Established: atest1v4ehgw36gvcrgdeex5ensvfkgccyxve3x3pnys6xxpzr2s6rxuurv3j9g4pyysjzxq6ygdzyt2wxa3 }, amount: Amount { raw: 3334636 } }, NextEpoch, Withdraw { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch, validator: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch }, amount: Amount { raw: 7942329 } }, NextEpoch, Unbond { id: BondId { source: Established: atest1v4ehgw36gdp52wp4xv6yyd3nx9pnysfn89znjsen8quyvwfkgycnjs29x9ryxveh8prygsfecye5dj, validator: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r }, amount: Amount { raw: 878389 } }, Withdraw { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 } }, UnjailValidator { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, UnjailValidator { address: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, Bond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 5376602 } }, UnjailValidator { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xc6nvvf4g9znxvf3xdrrgvfexuen2dek8qmnqse58q6ygdpkxeznz3j9xyeyydfht747xe, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 1118174 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 286221 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 73579 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 2010212 } }, Bond { id: BondId { source: Implicit: atest1d9khqw3689rrqdp58pznydecgyu5xs3cxdznvd6xxsmng32zxumrxvpj8qenydejgfzygwzxlu6r7s, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 4276553 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 54860 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gdp52wp4xv6yyd3nx9pnysfn89znjsen8quyvwfkgycnjs29x9ryxveh8prygsfecye5dj, validator: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r }, amount: Amount { raw: 145154 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 1941194 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 93 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw3689rrqdp58pznydecgyu5xs3cxdznvd6xxsmng32zxumrxvpj8qenydejgfzygwzxlu6r7s, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 9992596 } }, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 504024 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 5640962 } }, InitValidator { address: Established: atest1v4ehgw368qmnzsfeg5urqw2p8pq5gsf4ggcnqdz9xvc5vsfjxc6nvsekgsmyv3jp8ym52wph0hm33r, consensus_key: Ed25519(PublicKey(VerificationKey("2bccbdf7490f98b2e258a399b75c74bd1b71e9f6f4cc2160edbe3186e23d30e4"))), commission_rate: Dec(427420.000000), max_commission_rate_change: Dec(574220.000000) }, Misbehavior { address: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, slash_type: DuplicateVote, infraction_epoch: Epoch(12), height: 0 }, Bond { id: BondId { source: Implicit: atest1d9khqw368pq5g3f3gceygvpjxuenyveexary2wzx8ycnw3zpg9zrvvp4xger2dzyxuunwvjz4n93ww, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 4019468 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36xscrsve3geqnwd2x8qmrzwpe89z5zsekgvenqwp5x4p5ydzp8qmrz3zpgcmnydjptyfc40, validator: Established: atest1v4ehgw36gvcrgdeex5ensvfkgccyxve3x3pnys6xxpzr2s6rxuurv3j9g4pyysjzxq6ygdzyt2wxa3 }, amount: Amount { raw: 5683219 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368pz5zd3sgeqnxve4g9ryv3zzggerqdf3xqmrywfng4zrs3pkx5enydesg5mr2v6p4v8rst, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 6886837 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 7852494 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 749047 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gdp52wp4xv6yyd3nx9pnysfn89znjsen8quyvwfkgycnjs29x9ryxveh8prygsfecye5dj, validator: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r }, amount: Amount { raw: 9097957 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 6781624 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36gve5zdf4gccygv6zxgcnxwzrgv65x32rg4zrxv34g9prvs2pxqmnzve5xvuns33czq9awp, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 123577 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gvmrzsf58yurxsjxgfqnqv6yg56nwv69xv6yv3zpx9znv3jpg4p5zdpnxpznzv3hq7q2az, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 1515359 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 9136180 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 190090 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368pz5zd3sgeqnxve4g9ryv3zzggerqdf3xqmrywfng4zrs3pkx5enydesg5mr2v6p4v8rst, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2817512 } }, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 5207922 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36x5uyvv2px4pr2d3cgdpry3zzxq6nsd6yg5mnwsjzgcervdpegsunqd3kgy6ygvpjyvyhzj, validator: Established: atest1v4ehgw368qcrqd2ygvmyyvf4g9qnvv3kxucrwv3hxg6ryve4x56r233cxucnysjrxsmygdj9yer4pz }, amount: Amount { raw: 70961 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gdzns33sgsmr2wz9x4rrxdenx3zyysfcxcmry32pgeznjw2zx4zrysjxgeryxsfc2etu33, validator: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r }, amount: Amount { raw: 9056961 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gvmrzsf58yurxsjxgfqnqv6yg56nwv69xv6yv3zpx9znv3jpg4p5zdpnxpznzv3hq7q2az, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 1451932 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36gcunwdzyxpz5xs2rxuuyxvfcgfznzd3hg9zrzdfnx5crwv69ggcnvsjpgc65gd33uuymj8, validator: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch }, amount: Amount { raw: 1463719 } }, Withdraw { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36x5uyvv2px4pr2d3cgdpry3zzxq6nsd6yg5mnwsjzgcervdpegsunqd3kgy6ygvpjyvyhzj, validator: Established: atest1v4ehgw368qcrqd2ygvmyyvf4g9qnvv3kxucrwv3hxg6ryve4x56r233cxucnysjrxsmygdj9yer4pz }, amount: Amount { raw: 792907 } }, InitValidator { address: Established: atest1v4ehgw36xy65xd3cgvcyxsesgsunys3hgg6nyvekxgerz3fjxaprqvfhxser2wphg5mnjdzpf7edt5, consensus_key: Ed25519(PublicKey(VerificationKey("8f6eeade76a7ce1ccf1d3138807774696d51fcf2c8879e53aa2b082e34eec42b"))), commission_rate: Dec(592790.000000), max_commission_rate_change: Dec(854710.000000) }]) diff --git a/proof_of_stake/src/btree_set.rs b/proof_of_stake/src/btree_set.rs deleted file mode 100644 index 48460b2f0b..0000000000 --- a/proof_of_stake/src/btree_set.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! This module adds shims for BTreeSet methods that not yet stable. - -use std::collections::BTreeSet; - -/// This trait adds shims for BTreeSet methods that not yet stable. They have -/// the same behavior as their nightly counterparts, but additionally require -/// `Clone` bound on the element type (for `pop_first` and `pop_last`). -pub trait BTreeSetShims { - /// Returns a reference to the first value in the set, if any. This value is - /// always the minimum of all values in the set. - fn first_shim(&self) -> Option<&T>; - - /// Returns a reference to the last value in the set, if any. This value is - /// always the maximum of all values in the set. - fn last_shim(&self) -> Option<&T>; - - /// Removes the first value from the set and returns it, if any. The first - /// value is always the minimum value in the set. - fn pop_first_shim(&mut self) -> Option; - - /// Removes the last value from the set and returns it, if any. The last - /// value is always the maximum value in the set. - fn pop_last_shim(&mut self) -> Option; -} - -impl BTreeSetShims for BTreeSet { - fn first_shim(&self) -> Option<&T> { - let mut iter = self.iter(); - iter.next() - } - - fn last_shim(&self) -> Option<&T> { - let iter = self.iter(); - iter.last() - } - - fn pop_first_shim(&mut self) -> Option { - let mut iter = self.iter(); - let first = iter.next().cloned(); - if let Some(first) = first { - return self.take(&first); - } - None - } - - fn pop_last_shim(&mut self) -> Option { - let iter = self.iter(); - let last = iter.last().cloned(); - if let Some(last) = last { - return self.take(&last); - } - None - } -} diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index d5a567fc94..c06a6efd8e 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -434,7 +434,6 @@ where &self, storage: &S, epoch: Epoch, - _params: &PosParams, ) -> storage_api::Result> where S: StorageRead, @@ -482,6 +481,26 @@ where } } + /// Initialize or add a value to the current delta value at the given epoch + /// offset. + pub fn add( + &self, + storage: &mut S, + value: Data, + current_epoch: Epoch, + offset: u64, + ) -> storage_api::Result<()> + where + S: StorageWrite + StorageRead, + Data: Default, + { + self.update_data(storage, current_epoch)?; + let cur_value = self + .get_delta_val(storage, current_epoch + offset)? + .unwrap_or_default(); + self.set_at_epoch(storage, cur_value + value, current_epoch, offset) + } + /// Initialize or set the value at the given epoch offset. pub fn set( &self, @@ -1074,6 +1093,20 @@ mod test { assert_eq!(data_handler.get(&s, &Epoch(9))?, None); assert_eq!(data_handler.get(&s, &Epoch(10))?, Some(6)); + epoched.add(&mut s, 15, Epoch(10), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(10))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + assert_eq!(data_handler.get(&s, &Epoch(5))?, Some(5)); + assert_eq!(data_handler.get(&s, &Epoch(6))?, None); + assert_eq!(data_handler.get(&s, &Epoch(7))?, None); + assert_eq!(data_handler.get(&s, &Epoch(8))?, None); + assert_eq!(data_handler.get(&s, &Epoch(9))?, None); + assert_eq!(data_handler.get(&s, &Epoch(10))?, Some(21)); + Ok(()) } diff --git a/proof_of_stake/src/error.rs b/proof_of_stake/src/error.rs new file mode 100644 index 0000000000..96123d6feb --- /dev/null +++ b/proof_of_stake/src/error.rs @@ -0,0 +1,185 @@ +/// Custom error types +use std::num::TryFromIntError; + +use namada_core::ledger::storage_api; +use namada_core::types::address::Address; +use namada_core::types::dec::Dec; +use namada_core::types::storage::Epoch; +use thiserror::Error; + +use crate::rewards; +use crate::types::{BondId, ValidatorState}; + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum GenesisError { + #[error("Voting power overflow: {0}")] + VotingPowerOverflow(TryFromIntError), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum InflationError { + #[error("Error in calculating rewards: {0}")] + Rewards(rewards::RewardsError), + #[error("Expected validator {0} to be in consensus set but got: {1:?}")] + ExpectedValidatorInConsensus(Address, Option), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum BecomeValidatorError { + #[error("The given address {0} is already a validator")] + AlreadyValidator(Address), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum BondError { + #[error("The given address {0} is not a validator address")] + NotAValidator(Address), + #[error( + "The given source address {0} is a validator address. Validators may \ + not delegate." + )] + SourceMustNotBeAValidator(Address), + #[error("The given validator address {0} is inactive")] + InactiveValidator(Address), + #[error("Voting power overflow: {0}")] + VotingPowerOverflow(TryFromIntError), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum UnbondError { + #[error("No bond could be found")] + NoBondFound, + #[error( + "Trying to withdraw more tokens ({0}) than the amount bonded ({0})" + )] + UnbondAmountGreaterThanBond(String, String), + #[error("No bonds found for the validator {0}")] + ValidatorHasNoBonds(Address), + #[error("Voting power not found for the validator {0}")] + ValidatorHasNoVotingPower(Address), + #[error("Voting power overflow: {0}")] + VotingPowerOverflow(TryFromIntError), + #[error("Trying to unbond from a frozen validator: {0}")] + ValidatorIsFrozen(Address), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum WithdrawError { + #[error("No unbond could be found for {0}")] + NoUnbondFound(BondId), + #[error("No unbond may be withdrawn yet for {0}")] + NoWithdrawableUnbond(BondId), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum SlashError { + #[error("The validator {0} has no total deltas value")] + ValidatorHasNoTotalDeltas(Address), + #[error("The validator {0} has no voting power")] + ValidatorHasNoVotingPower(Address), + #[error("Unexpected slash token change")] + InvalidSlashChange(i128), + #[error("Voting power overflow: {0}")] + VotingPowerOverflow(TryFromIntError), + #[error("Unexpected negative stake {0} for validator {1}")] + NegativeStake(i128, Address), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum CommissionRateChangeError { + #[error("Unexpected negative commission rate {0} for validator {1}")] + NegativeRate(Dec, Address), + #[error("Rate change of {0} is too large for validator {1}")] + RateChangeTooLarge(Dec, Address), + #[error( + "There is no maximum rate change written in storage for validator {0}" + )] + NoMaxSetInStorage(Address), + #[error("Cannot write to storage for validator {0}")] + CannotWrite(Address), + #[error("Cannot read storage for validator {0}")] + CannotRead(Address), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum UnjailValidatorError { + #[error("The given address {0} is not a validator address")] + NotAValidator(Address), + #[error("The given address {0} is not jailed in epoch {1}")] + NotJailed(Address, Epoch), + #[error( + "The given address {0} is not eligible for unnjailing until epoch \ + {1}: current epoch is {2}" + )] + NotEligible(Address, Epoch, Epoch), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum RedelegationError { + #[error("The redelegation is chained")] + IsChainedRedelegation, + #[error("The source and destination validator must be different")] + RedelegationSrcEqDest, + #[error("The delegator must not be a validator")] + DelegatorIsValidator, + #[error("The address {0} must be a validator")] + NotAValidator(Address), +} + +impl From for storage_api::Error { + fn from(err: BecomeValidatorError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: BondError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: UnbondError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: WithdrawError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: CommissionRateChangeError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: InflationError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: UnjailValidatorError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: RedelegationError) -> Self { + Self::new(err) + } +} diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 0fbbf2231b..0fadd6c728 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -12,7 +12,6 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] -pub mod btree_set; pub mod epoched; pub mod parameters; pub mod pos_queries; @@ -21,22 +20,22 @@ pub mod storage; pub mod types; // pub mod validation; +mod error; #[cfg(test)] mod tests; use core::fmt::Debug; use std::cmp::{self, Reverse}; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::num::TryFromIntError; use borsh::BorshDeserialize; +pub use error::*; use namada_core::ledger::storage_api::collections::lazy_map::{ - NestedSubKey, SubKey, + Collectable, LazyMap, NestedMap, NestedSubKey, SubKey, }; use namada_core::ledger::storage_api::collections::{LazyCollection, LazySet}; -use namada_core::ledger::storage_api::token::credit_tokens; use namada_core::ledger::storage_api::{ - self, ResultExt, StorageRead, StorageWrite, + self, token, ResultExt, StorageRead, StorageWrite, }; use namada_core::types::address::{Address, InternalAddress}; use namada_core::types::dec::Dec; @@ -44,31 +43,33 @@ use namada_core::types::key::{ common, tm_consensus_key_raw_hash, PublicKeyTmRawHash, }; pub use namada_core::types::storage::{Epoch, Key, KeySeg}; -use namada_core::types::token; use once_cell::unsync::Lazy; -use parameters::PosParams; +pub use parameters::PosParams; use rewards::PosRewardsCalculator; use storage::{ bonds_for_source_prefix, bonds_prefix, consensus_keys_key, - get_validator_address_from_bond, into_tm_voting_power, is_bond_key, - is_unbond_key, is_validator_slashes_key, last_block_proposer_key, - params_key, slashes_prefix, unbonds_for_source_prefix, unbonds_prefix, + get_validator_address_from_bond, is_bond_key, is_unbond_key, + is_validator_slashes_key, last_block_proposer_key, params_key, + slashes_prefix, unbonds_for_source_prefix, unbonds_prefix, validator_address_raw_hash_key, validator_last_slash_key, - validator_max_commission_rate_change_key, BondDetails, - BondsAndUnbondsDetail, BondsAndUnbondsDetails, EpochedSlashes, - ReverseOrdTokenAmount, RewardsAccumulator, SlashedAmount, - TotalConsensusStakes, UnbondDetails, ValidatorAddresses, - ValidatorUnbondRecords, + validator_max_commission_rate_change_key, }; -use thiserror::Error; use types::{ - BelowCapacityValidatorSet, BelowCapacityValidatorSets, BondId, Bonds, - CommissionRates, ConsensusValidator, ConsensusValidatorSet, - ConsensusValidatorSets, GenesisValidator, Position, RewardsProducts, Slash, - SlashType, Slashes, TotalDeltas, Unbonds, ValidatorConsensusKeys, - ValidatorDeltas, ValidatorEthColdKeys, ValidatorEthHotKeys, - ValidatorPositionAddresses, ValidatorSetPositions, ValidatorSetUpdate, - ValidatorState, ValidatorStates, VoteInfo, WeightedValidator, + into_tm_voting_power, BelowCapacityValidatorSet, + BelowCapacityValidatorSets, BondDetails, BondId, Bonds, + BondsAndUnbondsDetail, BondsAndUnbondsDetails, CommissionRates, + ConsensusValidator, ConsensusValidatorSet, ConsensusValidatorSets, + DelegatorRedelegatedBonded, DelegatorRedelegatedUnbonded, + EagerRedelegatedBondsMap, EpochedSlashes, GenesisValidator, + IncomingRedelegations, OutgoingRedelegations, Position, + RedelegatedBondsOrUnbonds, RedelegatedTokens, ReverseOrdTokenAmount, + RewardsAccumulator, RewardsProducts, Slash, SlashType, SlashedAmount, + Slashes, TotalConsensusStakes, TotalDeltas, TotalRedelegatedBonded, + TotalRedelegatedUnbonded, UnbondDetails, Unbonds, ValidatorAddresses, + ValidatorConsensusKeys, ValidatorDeltas, ValidatorEthColdKeys, + ValidatorEthHotKeys, ValidatorPositionAddresses, ValidatorSetPositions, + ValidatorSetUpdate, ValidatorState, ValidatorStates, + ValidatorTotalUnbonded, VoteInfo, WeightedValidator, }; /// Address of the PoS account implemented as a native VP @@ -89,160 +90,7 @@ pub fn staking_token_address(storage: &impl StorageRead) -> Address { /// stored const STORE_VALIDATOR_SETS_LEN: u64 = 2; -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum GenesisError { - #[error("Voting power overflow: {0}")] - VotingPowerOverflow(TryFromIntError), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum InflationError { - #[error("Error in calculating rewards: {0}")] - Rewards(rewards::RewardsError), - #[error("Expected validator {0} to be in consensus set but got: {1:?}")] - ExpectedValidatorInConsensus(Address, Option), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum BecomeValidatorError { - #[error("The given address {0} is already a validator")] - AlreadyValidator(Address), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum BondError { - #[error("The given address {0} is not a validator address")] - NotAValidator(Address), - #[error( - "The given source address {0} is a validator address. Validators may \ - not delegate." - )] - SourceMustNotBeAValidator(Address), - #[error("The given validator address {0} is inactive")] - InactiveValidator(Address), - #[error("Voting power overflow: {0}")] - VotingPowerOverflow(TryFromIntError), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum UnbondError { - #[error("No bond could be found")] - NoBondFound, - #[error( - "Trying to withdraw more tokens ({0}) than the amount bonded ({0})" - )] - UnbondAmountGreaterThanBond(String, String), - #[error("No bonds found for the validator {0}")] - ValidatorHasNoBonds(Address), - #[error("Voting power not found for the validator {0}")] - ValidatorHasNoVotingPower(Address), - #[error("Voting power overflow: {0}")] - VotingPowerOverflow(TryFromIntError), - #[error("Trying to unbond from a frozen validator: {0}")] - ValidatorIsFrozen(Address), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum WithdrawError { - #[error("No unbond could be found for {0}")] - NoUnbondFound(BondId), - #[error("No unbond may be withdrawn yet for {0}")] - NoWithdrawableUnbond(BondId), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum SlashError { - #[error("The validator {0} has no total deltas value")] - ValidatorHasNoTotalDeltas(Address), - #[error("The validator {0} has no voting power")] - ValidatorHasNoVotingPower(Address), - #[error("Unexpected slash token change")] - InvalidSlashChange(i128), - #[error("Voting power overflow: {0}")] - VotingPowerOverflow(TryFromIntError), - #[error("Unexpected negative stake {0} for validator {1}")] - NegativeStake(i128, Address), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum CommissionRateChangeError { - #[error("Unexpected negative commission rate {0} for validator {1}")] - NegativeRate(Dec, Address), - #[error("Rate change of {0} is too large for validator {1}")] - RateChangeTooLarge(Dec, Address), - #[error( - "There is no maximum rate change written in storage for validator {0}" - )] - NoMaxSetInStorage(Address), - #[error("Cannot write to storage for validator {0}")] - CannotWrite(Address), - #[error("Cannot read storage for validator {0}")] - CannotRead(Address), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum UnjailValidatorError { - #[error("The given address {0} is not a validator address")] - NotAValidator(Address), - #[error("The given address {0} is not jailed in epoch {1}")] - NotJailed(Address, Epoch), - #[error( - "The given address {0} is not eligible for unnjailing until epoch \ - {1}: current epoch is {2}" - )] - NotEligible(Address, Epoch, Epoch), -} - -impl From for storage_api::Error { - fn from(err: BecomeValidatorError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: BondError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: UnbondError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: WithdrawError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: CommissionRateChangeError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: InflationError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: UnjailValidatorError) -> Self { - Self::new(err) - } -} +// ---- Storage handles ---- /// Get the storage handle to the epoched consensus validator set pub fn consensus_validator_set_handle() -> ConsensusValidatorSets { @@ -319,7 +167,8 @@ pub fn validator_commission_rate_handle( CommissionRates::open(key) } -/// Get the storage handle to a bond +/// Get the storage handle to a bond, which is dynamically updated with when +/// unbonding pub fn bond_handle(source: &Address, validator: &Address) -> Bonds { let bond_id = BondId { source: source.clone(), @@ -329,7 +178,8 @@ pub fn bond_handle(source: &Address, validator: &Address) -> Bonds { Bonds::open(key) } -/// Get the storage handle to a validator's total bonds +/// Get the storage handle to a validator's total bonds, which are not updated +/// due to unbonding pub fn total_bonded_handle(validator: &Address) -> Bonds { let key = storage::validator_total_bonded_key(validator); Bonds::open(key) @@ -346,9 +196,9 @@ pub fn unbond_handle(source: &Address, validator: &Address) -> Unbonds { } /// Get the storage handle to a validator's total-unbonded map -pub fn unbond_records_handle(validator: &Address) -> ValidatorUnbondRecords { +pub fn total_unbonded_handle(validator: &Address) -> ValidatorTotalUnbonded { let key = storage::validator_total_unbonded_key(validator); - ValidatorUnbondRecords::open(key) + ValidatorTotalUnbonded::open(key) } /// Get the storage handle to a PoS validator's deltas @@ -394,6 +244,54 @@ pub fn delegator_rewards_products_handle( RewardsProducts::open(key) } +/// Get the storage handle to a validator's incoming redelegations +pub fn validator_incoming_redelegations_handle( + validator: &Address, +) -> IncomingRedelegations { + let key = storage::validator_incoming_redelegations_key(validator); + IncomingRedelegations::open(key) +} + +/// Get the storage handle to a validator's outgoing redelegations +pub fn validator_outgoing_redelegations_handle( + validator: &Address, +) -> OutgoingRedelegations { + let key: Key = storage::validator_outgoing_redelegations_key(validator); + OutgoingRedelegations::open(key) +} + +/// Get the storage handle to a validator's total redelegated bonds +pub fn validator_total_redelegated_bonded_handle( + validator: &Address, +) -> TotalRedelegatedBonded { + let key: Key = storage::validator_total_redelegated_bonded_key(validator); + TotalRedelegatedBonded::open(key) +} + +/// Get the storage handle to a validator's outgoing redelegations +pub fn validator_total_redelegated_unbonded_handle( + validator: &Address, +) -> TotalRedelegatedUnbonded { + let key: Key = storage::validator_total_redelegated_unbonded_key(validator); + TotalRedelegatedUnbonded::open(key) +} + +/// Get the storage handle to a delegator's redelegated bonds information +pub fn delegator_redelegated_bonds_handle( + delegator: &Address, +) -> DelegatorRedelegatedBonded { + let key: Key = storage::delegator_redelegated_bonds_key(delegator); + DelegatorRedelegatedBonded::open(key) +} + +/// Get the storage handle to a delegator's redelegated unbonds information +pub fn delegator_redelegated_unbonds_handle( + delegator: &Address, +) -> DelegatorRedelegatedUnbonded { + let key: Key = storage::delegator_redelegated_unbonds_key(delegator); + DelegatorRedelegatedUnbonded::open(key) +} + /// Init genesis pub fn init_genesis( storage: &mut S, @@ -407,7 +305,7 @@ where tracing::debug!("Initializing PoS genesis"); write_pos_params(storage, params.clone())?; - let mut total_bonded = token::Amount::default(); + let mut total_bonded = token::Amount::zero(); consensus_validator_set_handle().init(storage, current_epoch)?; below_capacity_validator_set_handle().init(storage, current_epoch)?; validator_set_positions_handle().init(storage, current_epoch)?; @@ -466,20 +364,19 @@ where eth_cold_key, current_epoch, )?; - let delta = token::Change::from(tokens); validator_deltas_handle(&address).init_at_genesis( storage, - delta, + tokens.change(), current_epoch, )?; bond_handle(&address, &address).init_at_genesis( storage, - delta, + tokens, current_epoch, )?; total_bonded_handle(&address).init_at_genesis( storage, - delta, + tokens, current_epoch, )?; validator_commission_rate_handle(&address).init_at_genesis( @@ -501,7 +398,7 @@ where // Credit bonded token amount to the PoS account let staking_token = staking_token_address(storage); - credit_tokens(storage, &staking_token, &ADDRESS, total_bonded)?; + token::credit_tokens(storage, &staking_token, &ADDRESS, total_bonded)?; // Copy the genesis validator set into the pipeline epoch as well for epoch in (current_epoch.next()).iter_range(params.pipeline_len) { copy_validator_sets_and_positions(storage, current_epoch, epoch)?; @@ -634,42 +531,44 @@ where } /// Read PoS validator's delta value. -pub fn read_validator_delta_value( +pub fn read_validator_deltas_value( storage: &S, - params: &PosParams, validator: &Address, - epoch: namada_core::types::storage::Epoch, + epoch: &namada_core::types::storage::Epoch, ) -> storage_api::Result> where S: StorageRead, { let handle = validator_deltas_handle(validator); - handle.get_delta_val(storage, epoch, params) + handle.get_delta_val(storage, *epoch) } /// Read PoS validator's stake (sum of deltas). -/// Returns `None` when the given address is not a validator address. For a -/// validator with `0` stake, this returns `Ok(token::Amount::default())`. +/// For non-validators and validators with `0` stake, this returns the default - +/// `token::Amount::zero()`. pub fn read_validator_stake( storage: &S, params: &PosParams, validator: &Address, epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result> +) -> storage_api::Result where S: StorageRead, { let handle = validator_deltas_handle(validator); let amount = handle .get_sum(storage, epoch, params)? - .map(token::Amount::from_change); + .map(|change| { + debug_assert!(change.non_negative()); + token::Amount::from_change(change) + }) + .unwrap_or_default(); Ok(amount) } /// Add or remove PoS validator's stake delta value pub fn update_validator_deltas( storage: &mut S, - params: &PosParams, validator: &Address, delta: token::Change, current_epoch: namada_core::types::storage::Epoch, @@ -680,7 +579,7 @@ where { let handle = validator_deltas_handle(validator); let val = handle - .get_delta_val(storage, current_epoch + offset, params)? + .get_delta_val(storage, current_epoch + offset)? .unwrap_or_default(); handle.set(storage, val + delta, current_epoch, offset) } @@ -697,7 +596,10 @@ where let handle = total_deltas_handle(); let amnt = handle .get_sum(storage, epoch, params)? - .map(token::Amount::from_change) + .map(|change| { + debug_assert!(change.non_negative()); + token::Amount::from_change(change) + }) .unwrap_or_default(); Ok(amnt) } @@ -848,7 +750,6 @@ where /// Note: for EpochedDelta, write the value to change storage by pub fn update_total_deltas( storage: &mut S, - params: &PosParams, delta: token::Change, current_epoch: namada_core::types::storage::Epoch, offset: u64, @@ -858,7 +759,7 @@ where { let handle = total_deltas_handle(); let val = handle - .get_delta_val(storage, current_epoch + offset, params)? + .get_delta_val(storage, current_epoch + offset)? .unwrap_or_default(); handle.set(storage, val + delta, current_epoch, offset) } @@ -920,13 +821,18 @@ pub fn bond_tokens( where S: StorageRead + StorageWrite, { - let amount = amount.change(); tracing::debug!( "Bonding token amount {} at epoch {current_epoch}", amount.to_string_native() ); + if amount.is_zero() { + return Ok(()); + } + let params = read_pos_params(storage)?; let pipeline_epoch = current_epoch + params.pipeline_len; + + // Check that the source is not a validator if let Some(source) = source { if source != validator && is_validator(storage, source)? { return Err( @@ -934,6 +840,8 @@ where ); } } + + // Check that the validator is actually a validator let validator_state_handle = validator_state_handle(validator); let state = validator_state_handle.get(storage, pipeline_epoch, ¶ms)?; if state.is_none() { @@ -942,6 +850,7 @@ where let source = source.unwrap_or(validator); tracing::debug!("Source {} --> Validator {}", source, validator); + let bond_handle = bond_handle(source, validator); let total_bonded_handle = total_bonded_handle(validator); @@ -955,52 +864,27 @@ where } } - tracing::debug!("\nBonds before incrementing:"); - for ep in Epoch::default().iter_range(current_epoch.0 + 3) { - let delta = bond_handle - .get_delta_val(storage, ep, ¶ms)? - .unwrap_or_default(); - if !delta.is_zero() { - tracing::debug!( - "bond ∆ at epoch {}: {}", - ep, - delta.to_string_native() - ); - } + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds before incrementing: {bonds:#?}"); } // Initialize or update the bond at the pipeline offset - let offset = params.pipeline_len; - let cur_remain = bond_handle - .get_delta_val(storage, current_epoch + offset, ¶ms)? - .unwrap_or_default(); - bond_handle.set(storage, cur_remain + amount, current_epoch, offset)?; - let cur_remain_global = total_bonded_handle - .get_delta_val(storage, current_epoch + offset, ¶ms)? - .unwrap_or_default(); - total_bonded_handle.set( + bond_handle.add(storage, amount, current_epoch, params.pipeline_len)?; + total_bonded_handle.add( storage, - cur_remain_global + amount, + amount, current_epoch, - offset, + params.pipeline_len, )?; - tracing::debug!("\nBonds after incrementing:"); - for ep in Epoch::default().iter_range(current_epoch.0 + 3) { - let delta = bond_handle - .get_delta_val(storage, ep, ¶ms)? - .unwrap_or_default(); - if !delta.is_zero() { - tracing::debug!( - "bond ∆ at epoch {}: {}", - ep, - delta.to_string_native() - ); - } + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds after incrementing: {bonds:#?}"); } // Update the validator set - // We allow bonding if the validator is jailed, however if jailed, there + // Allow bonding even if the validator is jailed. However, if jailed, there // must be no changes to the validator set. Check at the pipeline epoch. let is_jailed_at_pipeline = matches!( validator_state_handle @@ -1013,32 +897,30 @@ where storage, ¶ms, validator, - amount, - current_epoch, + amount.change(), + pipeline_epoch, )?; } // Update the validator and total deltas update_validator_deltas( storage, - ¶ms, validator, - amount, + amount.change(), current_epoch, - offset, + params.pipeline_len, )?; - update_total_deltas(storage, ¶ms, amount, current_epoch, offset)?; + update_total_deltas( + storage, + amount.change(), + current_epoch, + params.pipeline_len, + )?; // Transfer the bonded tokens from the source to PoS let staking_token = staking_token_address(storage); - transfer_tokens( - storage, - &staking_token, - token::Amount::from_change(amount), - source, - &ADDRESS, - )?; + token::transfer(storage, &staking_token, source, &ADDRESS, amount)?; Ok(()) } @@ -1155,7 +1037,7 @@ fn update_validator_set( params: &PosParams, validator: &Address, token_change: token::Change, - current_epoch: Epoch, + epoch: Epoch, ) -> storage_api::Result<()> where S: StorageRead + StorageWrite, @@ -1163,26 +1045,23 @@ where if token_change.is_zero() { return Ok(()); } - let pipeline_epoch = current_epoch + params.pipeline_len; + // let pipeline_epoch = current_epoch + params.pipeline_len; tracing::debug!( - "Update epoch for validator set: {pipeline_epoch}, validator: \ - {validator}" + "Update epoch for validator set: {epoch}, validator: {validator}" ); let consensus_validator_set = consensus_validator_set_handle(); let below_capacity_validator_set = below_capacity_validator_set_handle(); // Validator sets at the pipeline offset - let consensus_val_handle = consensus_validator_set.at(&pipeline_epoch); - let below_capacity_val_handle = - below_capacity_validator_set.at(&pipeline_epoch); + let consensus_val_handle = consensus_validator_set.at(&epoch); + let below_capacity_val_handle = below_capacity_validator_set.at(&epoch); - let tokens_pre = - read_validator_stake(storage, params, validator, pipeline_epoch)? - .unwrap_or_default(); + let tokens_pre = read_validator_stake(storage, params, validator, epoch)?; // tracing::debug!("VALIDATOR STAKE BEFORE UPDATE: {}", tokens_pre); let tokens_post = tokens_pre.change() + token_change; + debug_assert!(tokens_post.non_negative()); let tokens_post = token::Amount::from_change(tokens_post); // If token amounts both before and after the action are below the threshold @@ -1195,12 +1074,8 @@ where // The position is only set when the validator is in consensus or // below_capacity set (not in below_threshold set) - let position = read_validator_set_position( - storage, - validator, - pipeline_epoch, - params, - )?; + let position = + read_validator_set_position(storage, validator, epoch, params)?; if let Some(position) = position { let consensus_vals_pre = consensus_val_handle.at(&tokens_pre); @@ -1234,13 +1109,13 @@ where validator_state_handle(validator).set( storage, ValidatorState::BelowThreshold, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; // Remove the validator's position from storage validator_set_positions_handle() - .at(&pipeline_epoch) + .at(&epoch) .remove(storage, validator)?; // Promote the next below-cap validator if there is one @@ -1265,14 +1140,14 @@ where insert_validator_into_set( &consensus_val_handle.at(&max_bc_amount), storage, - &pipeline_epoch, + &epoch, &removed_max_below_capacity, )?; validator_state_handle(&removed_max_below_capacity).set( storage, ValidatorState::Consensus, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; } } else if tokens_post < max_below_capacity_validator_amount { @@ -1300,28 +1175,28 @@ where &consensus_val_handle .at(&max_below_capacity_validator_amount), storage, - &pipeline_epoch, + &epoch, &removed_max_below_capacity, )?; validator_state_handle(&removed_max_below_capacity).set( storage, ValidatorState::Consensus, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; // Insert the current validator into the below-capacity set insert_validator_into_set( &below_capacity_val_handle.at(&tokens_post.into()), storage, - &pipeline_epoch, + &epoch, validator, )?; validator_state_handle(validator).set( storage, ValidatorState::BelowCapacity, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; } else { tracing::debug!("Validator remains in consensus set"); @@ -1330,7 +1205,7 @@ where insert_validator_into_set( &consensus_val_handle.at(&tokens_post), storage, - &pipeline_epoch, + &epoch, validator, )?; } @@ -1361,11 +1236,10 @@ where insert_into_consensus_and_demote_to_below_cap( storage, - params, validator, tokens_post, min_consensus_validator_amount, - current_epoch, + epoch, &consensus_val_handle, &below_capacity_val_handle, )?; @@ -1375,14 +1249,14 @@ where insert_validator_into_set( &below_capacity_val_handle.at(&tokens_post.into()), storage, - &pipeline_epoch, + &epoch, validator, )?; validator_state_handle(validator).set( storage, ValidatorState::BelowCapacity, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; } else { // The current validator is demoted to the below-threshold set @@ -1393,13 +1267,13 @@ where validator_state_handle(validator).set( storage, ValidatorState::BelowThreshold, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; // Remove the validator's position from storage validator_set_positions_handle() - .at(&pipeline_epoch) + .at(&epoch) .remove(storage, validator)?; } } @@ -1411,7 +1285,7 @@ where // Move the validator into the appropriate set let num_consensus_validators = - get_num_consensus_validators(storage, pipeline_epoch)?; + get_num_consensus_validators(storage, epoch)?; if num_consensus_validators < params.max_validator_slots { // Just insert into the consensus set tracing::debug!("Inserting validator into the consensus set"); @@ -1419,14 +1293,14 @@ where insert_validator_into_set( &consensus_val_handle.at(&tokens_post), storage, - &pipeline_epoch, + &epoch, validator, )?; validator_state_handle(validator).set( storage, ValidatorState::Consensus, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; } else { let min_consensus_validator_amount = @@ -1444,11 +1318,10 @@ where insert_into_consensus_and_demote_to_below_cap( storage, - params, validator, tokens_post, min_consensus_validator_amount, - current_epoch, + epoch, &consensus_val_handle, &below_capacity_val_handle, )?; @@ -1461,14 +1334,14 @@ where insert_validator_into_set( &below_capacity_val_handle.at(&tokens_post.into()), storage, - &pipeline_epoch, + &epoch, validator, )?; validator_state_handle(validator).set( storage, ValidatorState::BelowCapacity, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; } } @@ -1480,11 +1353,10 @@ where #[allow(clippy::too_many_arguments)] fn insert_into_consensus_and_demote_to_below_cap( storage: &mut S, - params: &PosParams, validator: &Address, tokens_post: token::Amount, min_consensus_amount: token::Amount, - current_epoch: Epoch, + epoch: Epoch, consensus_set: &ConsensusValidatorSet, below_capacity_set: &BelowCapacityValidatorSet, ) -> storage_api::Result<()> @@ -1500,35 +1372,35 @@ where .remove(storage, &last_position_of_min_consensus_vals)? .expect("There must be always be at least 1 consensus validator"); - let pipeline_epoch = current_epoch + params.pipeline_len; + // let pipeline_epoch = current_epoch + params.pipeline_len; // Insert the min consensus validator into the below-capacity // set insert_validator_into_set( &below_capacity_set.at(&min_consensus_amount.into()), storage, - &pipeline_epoch, + &epoch, &removed_min_consensus, )?; validator_state_handle(&removed_min_consensus).set( storage, ValidatorState::BelowCapacity, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; // Insert the current validator into the consensus set insert_validator_into_set( &consensus_set.at(&tokens_post), storage, - &pipeline_epoch, + &epoch, validator, )?; validator_state_handle(validator).set( storage, ValidatorState::Consensus, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; Ok(()) } @@ -1583,8 +1455,6 @@ where below_cap_in_mem.insert((stake, position), address); } - tracing::debug!("{consensus_in_mem:?}"); - for ((val_stake, val_position), val_address) in consensus_in_mem.into_iter() { consensus_validator_set @@ -1592,11 +1462,6 @@ where .at(&val_stake) .insert(storage, val_position, val_address)?; } - tracing::debug!("New validator set should be inserted:"); - tracing::debug!( - "{:?}", - read_consensus_validator_set_addresses(storage, target_epoch)? - ); for ((val_stake, val_position), val_address) in below_cap_in_mem.into_iter() { @@ -1842,23 +1707,42 @@ struct BondAndUnbondUpdates { unbond_value: token::Change, } +/// Temp: In quint this is from `ResultUnbondTx` field `resultSlashing: {sum: +/// int, epochMap: Epoch -> int}` +#[derive(Debug, Default)] +pub struct ResultSlashing { + /// The token amount unbonded from the validator stake after accounting for + /// slashes + pub sum: token::Amount, + /// Map from bond start epoch to token amount after slashing + pub epoch_map: BTreeMap, +} + /// Unbond tokens that are bonded between a validator and a source (self or -/// delegator) +/// delegator). +/// +/// This fn is also called during redelegation for a source validator, in +/// which case the `is_redelegation` param must be true. pub fn unbond_tokens( storage: &mut S, source: Option<&Address>, validator: &Address, amount: token::Amount, current_epoch: Epoch, -) -> storage_api::Result<()> + is_redelegation: bool, +) -> storage_api::Result where S: StorageRead + StorageWrite, { - let amount = amount.change(); tracing::debug!( - "Unbonding token amount {} at epoch {current_epoch}", - amount.to_string_native() + "Unbonding token amount {} at epoch {}", + amount.to_string_native(), + current_epoch ); + if amount.is_zero() { + return Ok(ResultSlashing::default()); + } + let params = read_pos_params(storage)?; let pipeline_epoch = current_epoch + params.pipeline_len; @@ -1879,146 +1763,252 @@ where return Err(UnbondError::ValidatorIsFrozen(validator.clone()).into()); } - // Should be able to unbond inactive validators - - // Check that validator is not inactive at anywhere between the current - // epoch and pipeline offset - // let validator_state_handle = validator_state_handle(validator); - // for epoch in current_epoch.iter_range(params.pipeline_len) { - // if let Some(ValidatorState::Inactive) = - // validator_state_handle.get(storage, epoch, ¶ms)? - // { - // return - // Err(BondError::InactiveValidator(validator.clone()).into()); } - // } + // TODO: check that validator is not inactive (when implemented)! let source = source.unwrap_or(validator); let bonds_handle = bond_handle(source, validator); - tracing::debug!("\nBonds before decrementing:"); - for ep in Epoch::default().iter_range(current_epoch.0 + 3) { - let delta = bonds_handle - .get_delta_val(storage, ep, ¶ms)? - .unwrap_or_default(); - if !delta.is_zero() { - tracing::debug!( - "bond ∆ at epoch {}: {}", - ep, - delta.to_string_native() - ); - } - } - // Make sure there are enough tokens left in the bond at the pipeline offset let remaining_at_pipeline = bonds_handle .get_sum(storage, pipeline_epoch, ¶ms)? .unwrap_or_default(); if amount > remaining_at_pipeline { return Err(UnbondError::UnbondAmountGreaterThanBond( - token::Amount::from_change(amount).to_string_native(), - token::Amount::from_change(remaining_at_pipeline) - .to_string_native(), + amount.to_string_native(), + remaining_at_pipeline.to_string_native(), ) .into()); } + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds before decrementing: {bonds:#?}"); + } + let unbonds = unbond_handle(source, validator); - // TODO: think if this should be +1 or not!!! let withdrawable_epoch = current_epoch + params.withdrawable_epoch_offset(); - let mut remaining = amount; - let mut amount_after_slashing = token::Change::default(); + let redelegated_bonds = + delegator_redelegated_bonds_handle(source).at(validator); - // Iterate thru bonds, find non-zero delta entries starting from - // future-most, then decrement those values. For every val that - // gets decremented down to 0, need a unique unbond object. - // Read all matched bonds into memory to do reverse iteration - #[allow(clippy::needless_collect)] - let bonds: Vec> = - bonds_handle.get_data_handler().iter(storage)?.collect(); + #[cfg(debug_assertions)] + let redel_bonds_pre = redelegated_bonds.collect_map(storage)?; - let mut bond_iter = bonds.into_iter().rev(); - let mut new_bond_values = HashSet::::new(); + // `resultUnbonding` + // Find the bonds to fully unbond (remove) and one to partially unbond, if + // necessary + let bonds_to_unbond = find_bonds_to_remove( + storage, + &bonds_handle.get_data_handler(), + amount, + )?; - while remaining > token::Change::default() { - let bond = bond_iter.next().transpose()?; - if bond.is_none() { - continue; + // `modifiedRedelegation` + // A bond may have both redelegated and non-redelegated tokens in it. If + // this is the case, compute the modified state of the redelegation. + let modified_redelegation = match bonds_to_unbond.new_entry { + Some((bond_epoch, new_bond_amount)) => { + if redelegated_bonds.contains(storage, &bond_epoch)? { + let cur_bond_amount = bonds_handle + .get_delta_val(storage, bond_epoch)? + .unwrap_or_default(); + compute_modified_redelegation( + storage, + &redelegated_bonds.at(&bond_epoch), + bond_epoch, + cur_bond_amount - new_bond_amount, + )? + } else { + ModifiedRedelegation::default() + } } - let (bond_epoch, bond_amount) = bond.unwrap(); - // println!("\nBond (epoch, amnt) = ({}, {})", bond_epoch, bond_amount); - // println!("remaining = {}", remaining); + None => ModifiedRedelegation::default(), + }; - let to_unbond = cmp::min(bond_amount, remaining); - new_bond_values.insert(BondAndUnbondUpdates { - bond_start: bond_epoch, - new_bond_value: bond_amount - to_unbond, - unbond_value: to_unbond, - }); - // println!("to_unbond (init) = {}", to_unbond); + // Compute the new unbonds eagerly + // `keysUnbonds` + // Get a set of epochs from which we're unbonding (fully and partially). + let bond_epochs_to_unbond = + if let Some((start_epoch, _)) = bonds_to_unbond.new_entry { + let mut to_remove = bonds_to_unbond.epochs.clone(); + to_remove.insert(start_epoch); + to_remove + } else { + bonds_to_unbond.epochs.clone() + }; - let slashes_for_this_bond = - find_slashes_in_range(storage, bond_epoch, None, validator)?; + // `newUnbonds` + // For each epoch we're unbonding, find the amount that's being unbonded. + // For full unbonds, this is the current bond value. For partial unbonds + // it is a difference between the current and new bond amount. + let new_unbonds_map = bond_epochs_to_unbond + .into_iter() + .map(|epoch| { + let cur_bond_value = bonds_handle + .get_delta_val(storage, epoch) + .unwrap() + .unwrap_or_default(); + let value = if let Some((start_epoch, new_bond_amount)) = + bonds_to_unbond.new_entry + { + if start_epoch == epoch { + cur_bond_value - new_bond_amount + } else { + cur_bond_value + } + } else { + cur_bond_value + }; + (epoch, value) + }) + .collect::>(); - amount_after_slashing += get_slashed_amount( - ¶ms, - token::Amount::from_change(to_unbond), - &slashes_for_this_bond, - )?; - // println!("Cur amnt after slashing = {}", &amount_after_slashing); + // `updatedBonded` + // Remove bonds for all the full unbonds. + for epoch in &bonds_to_unbond.epochs { + bonds_handle.get_data_handler().remove(storage, epoch)?; + } + // Replace bond amount for partial unbond, if any. + if let Some((bond_epoch, new_bond_amount)) = bonds_to_unbond.new_entry { + bonds_handle.set(storage, new_bond_amount, bond_epoch, 0)?; + } - // Update the unbond records - let cur_amnt = unbond_records_handle(validator) - .at(&pipeline_epoch) - .get(storage, &bond_epoch)? - .unwrap_or_default(); - unbond_records_handle(validator) - .at(&pipeline_epoch) - .insert( + // `updatedUnbonded` + // Update the unbonds in storage using the eager map computed above + if !is_redelegation { + for (start_epoch, &unbond_amount) in new_unbonds_map.iter() { + unbonds.at(start_epoch).update( storage, - bond_epoch, - cur_amnt + token::Amount::from_change(to_unbond), + withdrawable_epoch, + |cur_val| cur_val.unwrap_or_default() + unbond_amount, )?; + } + } - remaining -= to_unbond; + // `newRedelegatedUnbonds` + // This is what the delegator's redelegated unbonds would look like if this + // was the only unbond in the PoS system. We need to add these redelegated + // unbonds to the existing redelegated unbonds + let new_redelegated_unbonds = compute_new_redelegated_unbonds( + storage, + &redelegated_bonds, + &bonds_to_unbond.epochs, + &modified_redelegation, + )?; + + // `updatedRedelegatedBonded` + // NOTE: for now put this here after redelegated unbonds calc bc that one + // uses the pre-modified redelegated bonds from storage! + // First remove redelegation entries in epochs with full unbonds. + for epoch_to_remove in &bonds_to_unbond.epochs { + redelegated_bonds.remove_all(storage, epoch_to_remove)?; + } + if let Some(epoch) = modified_redelegation.epoch { + tracing::debug!("\nIs modified redelegation"); + if modified_redelegation.validators_to_remove.is_empty() { + redelegated_bonds.remove_all(storage, &epoch)?; + } else { + // Then update the redelegated bonds at this epoch + let rbonds = redelegated_bonds.at(&epoch); + update_redelegated_bonds(storage, &rbonds, &modified_redelegation)?; + } } - drop(bond_iter); - // Write the in-memory bond and unbond values back to storage - for BondAndUnbondUpdates { - bond_start, - new_bond_value, - unbond_value, - } in new_bond_values.into_iter() - { - bonds_handle.set(storage, new_bond_value, bond_start, 0)?; - update_unbond( - &unbonds, - storage, - &withdrawable_epoch, - &bond_start, - token::Amount::from_change(unbond_value), - )?; + if !is_redelegation { + // `val updatedRedelegatedUnbonded` with updates applied below + // Delegator's redelegated unbonds to this validator. + let delegator_redelegated_unbonded = + delegator_redelegated_unbonds_handle(source).at(validator); + + // Quint `def updateRedelegatedUnbonded` with `val + // updatedRedelegatedUnbonded` together with last statement + // in `updatedDelegator.with("redelegatedUnbonded", ...` updated + // directly in storage + for (start, unbonds) in &new_redelegated_unbonds { + let this_redelegated_unbonded = delegator_redelegated_unbonded + .at(start) + .at(&withdrawable_epoch); + + // Update the delegator's redelegated unbonds with the change + for (src_validator, redelegated_unbonds) in unbonds { + let redelegated_unbonded = + this_redelegated_unbonded.at(src_validator); + for (&redelegation_epoch, &change) in redelegated_unbonds { + redelegated_unbonded.update( + storage, + redelegation_epoch, + |current| current.unwrap_or_default() + change, + )?; + } + } + } + } + // all `val updatedDelegator` changes are applied at this point + + // `val updatedTotalBonded` and `val updatedTotalUnbonded` with updates + // Update the validator's total bonded and unbonded amounts + let total_bonded = total_bonded_handle(validator).get_data_handler(); + let total_unbonded = total_unbonded_handle(validator).at(&pipeline_epoch); + for (&start_epoch, &amount) in &new_unbonds_map { + total_bonded.update(storage, start_epoch, |current| { + current.unwrap_or_default() - amount + })?; + total_unbonded.update(storage, start_epoch, |current| { + current.unwrap_or_default() + amount + })?; } - tracing::debug!("Bonds after decrementing:"); - for ep in Epoch::default().iter_range(current_epoch.0 + 3) { - let delta = bonds_handle - .get_delta_val(storage, ep, ¶ms)? - .unwrap_or_default(); - if !delta.is_zero() { - tracing::debug!( - "bond ∆ at epoch {}: {}", - ep, - delta.to_string_native() - ); + let total_redelegated_bonded = + validator_total_redelegated_bonded_handle(validator); + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(validator); + for (redelegation_start_epoch, unbonds) in &new_redelegated_unbonds { + for (src_validator, changes) in unbonds { + for (bond_start_epoch, change) in changes { + // total redelegated bonded + let bonded_sub_map = total_redelegated_bonded + .at(redelegation_start_epoch) + .at(src_validator); + bonded_sub_map.update( + storage, + *bond_start_epoch, + |current| current.unwrap_or_default() - *change, + )?; + + // total redelegated unbonded + let unbonded_sub_map = total_redelegated_unbonded + .at(&pipeline_epoch) + .at(redelegation_start_epoch) + .at(src_validator); + unbonded_sub_map.update( + storage, + *bond_start_epoch, + |current| current.unwrap_or_default() + *change, + )?; + } } } - tracing::debug!( - "Token change including slashes on unbond = {}", - (-amount_after_slashing).to_string_native() + + let slashes = find_validator_slashes(storage, validator)?; + // `val resultSlashing` + let result_slashing = compute_amount_after_slashing_unbond( + storage, + ¶ms, + &new_unbonds_map, + &new_redelegated_unbonds, + slashes, + )?; + #[cfg(debug_assertions)] + let redel_bonds_post = redelegated_bonds.collect_map(storage)?; + debug_assert!( + result_slashing.sum <= amount, + "Amount after slashing ({}) must be <= requested amount to unbond \ + ({}).", + result_slashing.sum.to_string_native(), + amount.to_string_native(), ); + let change_after_slashing = -result_slashing.sum.change(); // Update the validator set at the pipeline offset. Since unbonding from a // jailed validator who is no longer frozen is allowed, only update the // validator set if the validator is not jailed @@ -2033,54 +2023,548 @@ where storage, ¶ms, validator, - -amount_after_slashing, - current_epoch, + change_after_slashing, + pipeline_epoch, )?; } // Update the validator and total deltas at the pipeline offset update_validator_deltas( storage, - ¶ms, validator, - -amount_after_slashing, + change_after_slashing, current_epoch, params.pipeline_len, )?; update_total_deltas( storage, - ¶ms, - -amount_after_slashing, + change_after_slashing, current_epoch, params.pipeline_len, )?; - Ok(()) + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds after decrementing: {bonds:#?}"); + } + + // Invariant: in the affected epochs, the delta of bonds must be >= delta of + // redelegated bonds deltas sum + #[cfg(debug_assertions)] + { + let mut epochs = bonds_to_unbond.epochs.clone(); + if let Some((epoch, _)) = bonds_to_unbond.new_entry { + epochs.insert(epoch); + } + for epoch in epochs { + let cur_bond = bonds_handle + .get_delta_val(storage, epoch)? + .unwrap_or_default(); + let redelegated_deltas = redelegated_bonds + .at(&epoch) + // Sum of redelegations from any src validator + .collect_map(storage)? + .into_values() + .map(|redeleg| redeleg.into_values().sum()) + .sum(); + debug_assert!( + cur_bond >= redelegated_deltas, + "After unbonding, in epoch {epoch} the bond amount {} must be \ + >= redelegated deltas at pipeline {}.\n\nredelegated_bonds \ + pre: {redel_bonds_pre:#?}\nredelegated_bonds post: \ + {redel_bonds_post:#?},\nmodified_redelegation: \ + {modified_redelegation:#?},\nbonds_to_unbond: \ + {bonds_to_unbond:#?}", + cur_bond.to_string_native(), + redelegated_deltas.to_string_native() + ); + } + } + + Ok(result_slashing) } -/// Compute a token amount after slashing, given the initial amount and a set of -/// slashes. It is assumed that the input `slashes` are those commited while the -/// `amount` was contributing to voting power. -fn get_slashed_amount( +#[derive(Debug, Default, Eq, PartialEq)] +struct FoldRedelegatedBondsResult { + total_redelegated: token::Amount, + total_after_slashing: token::Amount, +} + +/// Iterates over a `redelegated_unbonds` and computes the both the sum of all +/// redelegated tokens and how much is left after applying all relevant slashes. +// `def foldAndSlashRedelegatedBondsMap` +fn fold_and_slash_redelegated_bonds( + storage: &S, params: &PosParams, - amount: token::Amount, - slashes: &BTreeMap, -) -> storage_api::Result { - // println!("FN `get_slashed_amount`"); + redelegated_unbonds: &EagerRedelegatedBondsMap, + start_epoch: Epoch, + list_slashes: &[Slash], + slash_epoch_filter: impl Fn(Epoch) -> bool, +) -> FoldRedelegatedBondsResult +where + S: StorageRead, +{ + let mut result = FoldRedelegatedBondsResult::default(); + for (src_validator, bonds_map) in redelegated_unbonds { + for (bond_start, &change) in bonds_map { + // Merge the two lists of slashes + let mut merged: Vec = + // Look-up slashes for this validator ... + validator_slashes_handle(src_validator) + .iter(storage) + .unwrap() + .map(Result::unwrap) + .filter(|slash| { + params.in_redelegation_slashing_window( + slash.epoch, + params.redelegation_start_epoch_from_end( + start_epoch, + ), + start_epoch, + ) && *bond_start <= slash.epoch + && slash_epoch_filter(slash.epoch) + }) + // ... and add `list_slashes` + .chain(list_slashes.iter().cloned()) + .collect(); + + // Sort slashes by epoch + merged.sort_by(|s1, s2| s1.epoch.partial_cmp(&s2.epoch).unwrap()); + + result.total_redelegated += change; + result.total_after_slashing += + apply_list_slashes(params, &merged, change); + } + } + result +} - let mut updated_amount = amount; - let mut computed_amounts = Vec::::new(); +/// Computes how much remains from an amount of tokens after applying a list of +/// slashes. +/// +/// - `slashes` - a list of slashes ordered by misbehaving epoch. +/// - `amount` - the amount of slashable tokens. +// `def applyListSlashes` +fn apply_list_slashes( + params: &PosParams, + slashes: &[Slash], + amount: token::Amount, +) -> token::Amount { + let mut final_amount = amount; + let mut computed_slashes = BTreeMap::::new(); + for slash in slashes { + let slashed_amount = + compute_slashable_amount(params, slash, amount, &computed_slashes); + final_amount = + final_amount.checked_sub(slashed_amount).unwrap_or_default(); + computed_slashes.insert(slash.epoch, slashed_amount); + } + final_amount +} + +/// Computes how much is left from a bond or unbond after applying a slash given +/// that a set of slashes may have been previously applied. +// `def computeSlashableAmount` +fn compute_slashable_amount( + params: &PosParams, + slash: &Slash, + amount: token::Amount, + computed_slashes: &BTreeMap, +) -> token::Amount { + let updated_amount = computed_slashes + .iter() + .filter(|(&epoch, _)| { + // Keep slashes that have been applied and processed before the + // current slash occurred. We use `<=` because slashes processed at + // `slash.epoch` (at the start of the epoch) are also processed + // before this slash occurred. + epoch + params.slash_processing_epoch_offset() <= slash.epoch + }) + .fold(amount, |acc, (_, &amnt)| { + acc.checked_sub(amnt).unwrap_or_default() + }); + updated_amount.mul_ceil(slash.rate) +} + +/// Epochs for full and partial unbonds. +#[derive(Debug, Default)] +struct BondsForRemovalRes { + /// Full unbond epochs + pub epochs: BTreeSet, + /// Partial unbond epoch associated with the new bond amount + pub new_entry: Option<(Epoch, token::Amount)>, +} + +/// In decreasing epoch order, decrement the non-zero bond amount entries until +/// the full `amount` has been removed. Returns a `BondsForRemovalRes` object +/// that contains the epochs for which the full bond amount is removed and +/// additionally information for the one epoch whose bond amount is partially +/// removed, if any. +fn find_bonds_to_remove( + storage: &S, + bonds_handle: &LazyMap, + amount: token::Amount, +) -> storage_api::Result +where + S: StorageRead, +{ + #[allow(clippy::needless_collect)] + let bonds: Vec> = bonds_handle.iter(storage)?.collect(); + + let mut bonds_for_removal = BondsForRemovalRes::default(); + let mut remaining = amount; + + for bond in bonds.into_iter().rev() { + let (bond_epoch, bond_amount) = bond?; + let to_unbond = cmp::min(bond_amount, remaining); + if to_unbond == bond_amount { + bonds_for_removal.epochs.insert(bond_epoch); + } else { + bonds_for_removal.new_entry = + Some((bond_epoch, bond_amount - to_unbond)); + } + remaining -= to_unbond; + if remaining.is_zero() { + break; + } + } + Ok(bonds_for_removal) +} + +#[derive(Debug, Default, PartialEq, Eq)] +struct ModifiedRedelegation { + epoch: Option, + validators_to_remove: BTreeSet
, + validator_to_modify: Option
, + epochs_to_remove: BTreeSet, + epoch_to_modify: Option, + new_amount: Option, +} + +/// Used in `fn unbond_tokens` to compute the modified state of a redelegation +/// if redelegated tokens are being unbonded. +fn compute_modified_redelegation( + storage: &S, + redelegated_bonds: &RedelegatedTokens, + start_epoch: Epoch, + amount_to_unbond: token::Amount, +) -> storage_api::Result +where + S: StorageRead, +{ + let mut modified_redelegation = ModifiedRedelegation::default(); + + let mut src_validators = BTreeSet::
::new(); + let mut total_redelegated = token::Amount::zero(); + for rb in redelegated_bonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: src_validator, + nested_sub_key: _, + }, + amount, + ) = rb?; + total_redelegated += amount; + src_validators.insert(src_validator); + } + + modified_redelegation.epoch = Some(start_epoch); + + // If the total amount of redelegated bonds is less than the target amount, + // then all redelegated bonds must be unbonded. + if total_redelegated <= amount_to_unbond { + return Ok(modified_redelegation); + } + + let mut remaining = amount_to_unbond; + for src_validator in src_validators.into_iter() { + if remaining.is_zero() { + break; + } + let rbonds = redelegated_bonds.at(&src_validator); + let total_src_val_amount = rbonds + .iter(storage)? + .map(|res| { + let (_, amount) = res?; + Ok(amount) + }) + .sum::>()?; + + // TODO: move this into the `if total_redelegated <= remaining` branch + // below, then we don't have to remove it in `fn + // update_redelegated_bonds` when `validator_to_modify` is Some (and + // avoid `modified_redelegation.validators_to_remove.clone()`). + // It affects assumption 2. in `fn compute_new_redelegated_unbonds`, but + // that looks trivial to change. + // NOTE: not sure if this TODO is still relevant... + modified_redelegation + .validators_to_remove + .insert(src_validator.clone()); + if total_src_val_amount <= remaining { + remaining -= total_src_val_amount; + } else { + let bonds_to_remove = + find_bonds_to_remove(storage, &rbonds, remaining)?; + + remaining = token::Amount::zero(); + + // NOTE: When there are multiple `src_validators` from which we're + // unbonding, `validator_to_modify` cannot get overriden, because + // only one of them can be a partial unbond (`new_entry` + // is partial unbond) + if let Some((bond_epoch, new_bond_amount)) = + bonds_to_remove.new_entry + { + modified_redelegation.validator_to_modify = Some(src_validator); + modified_redelegation.epochs_to_remove = { + let mut epochs = bonds_to_remove.epochs; + // TODO: remove this insertion then we don't have to remove + // it again in `fn update_redelegated_bonds` + // when `epoch_to_modify` is Some (and avoid + // `modified_redelegation.epochs_to_remove.clone`) + // It affects assumption 3. in `fn + // compute_new_redelegated_unbonds`, but that also looks + // trivial to change. + epochs.insert(bond_epoch); + epochs + }; + modified_redelegation.epoch_to_modify = Some(bond_epoch); + modified_redelegation.new_amount = Some(new_bond_amount); + } else { + modified_redelegation.validator_to_modify = Some(src_validator); + modified_redelegation.epochs_to_remove = bonds_to_remove.epochs; + } + } + } + Ok(modified_redelegation) +} + +fn update_redelegated_bonds( + storage: &mut S, + redelegated_bonds: &RedelegatedTokens, + modified_redelegation: &ModifiedRedelegation, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + if let Some(val_to_modify) = &modified_redelegation.validator_to_modify { + let mut updated_vals_to_remove = + modified_redelegation.validators_to_remove.clone(); + updated_vals_to_remove.remove(val_to_modify); + + // Remove the updated_vals_to_remove keys from the + // redelegated_bonds map + for val in &updated_vals_to_remove { + redelegated_bonds.remove_all(storage, val)?; + } + + if let Some(epoch_to_modify) = modified_redelegation.epoch_to_modify { + let mut updated_epochs_to_remove = + modified_redelegation.epochs_to_remove.clone(); + updated_epochs_to_remove.remove(&epoch_to_modify); + let val_bonds_to_modify = redelegated_bonds.at(val_to_modify); + for epoch in updated_epochs_to_remove { + val_bonds_to_modify.remove(storage, &epoch)?; + } + val_bonds_to_modify.insert( + storage, + epoch_to_modify, + modified_redelegation.new_amount.unwrap(), + )?; + } else { + // Then remove to epochs_to_remove from the redelegated bonds of the + // val_to_modify + let val_bonds_to_modify = redelegated_bonds.at(val_to_modify); + for epoch in &modified_redelegation.epochs_to_remove { + val_bonds_to_modify.remove(storage, epoch)?; + } + } + } else { + // Remove all validators in modified_redelegation.validators_to_remove + // from redelegated_bonds + for val in &modified_redelegation.validators_to_remove { + redelegated_bonds.remove_all(storage, val)?; + } + } + Ok(()) +} + +/// Temp helper type to match quint model. +/// Result of `compute_new_redelegated_unbonds` that contains a map of +/// redelegated unbonds. +/// The map keys from outside in are: +/// +/// - redelegation end epoch where redeleg stops contributing to src validator +/// - src validator address +/// - src bond start epoch where it started contributing to src validator +// TODO: refactor out +type EagerRedelegatedUnbonds = BTreeMap; + +/// Computes a map of redelegated unbonds from a set of redelegated bonds. +/// +/// - `redelegated_bonds` - a map of redelegated bonds from epoch to +/// `RedelegatedTokens`. +/// - `epochs_to_remove` - a set of epochs that indicate the set of epochs +/// unbonded. +/// - `modified` record that represents a redelegated bond that it is only +/// partially unbonded. +/// +/// The function assumes that: +/// +/// 1. `modified.epoch` is not in the `epochs_to_remove` set. +/// 2. `modified.validator_to_modify` is in `modified.vals_to_remove`. +/// 3. `modified.epoch_to_modify` is in in `modified.epochs_to_remove`. +// TODO: try to optimize this by only writing to storage via Lazy! +// `def computeNewRedelegatedUnbonds` from Quint +fn compute_new_redelegated_unbonds( + storage: &S, + redelegated_bonds: &RedelegatedBondsOrUnbonds, + epochs_to_remove: &BTreeSet, + modified: &ModifiedRedelegation, +) -> storage_api::Result +where + S: StorageRead + StorageWrite, +{ + let unbonded_epochs = if let Some(epoch) = modified.epoch { + debug_assert!( + !epochs_to_remove.contains(&epoch), + "1. assumption in `fn compute_new_redelegated_unbonds` doesn't \ + hold" + ); + let mut epochs = epochs_to_remove.clone(); + epochs.insert(epoch); + epochs + .iter() + .cloned() + .filter(|e| redelegated_bonds.contains(storage, e).unwrap()) + .collect::>() + } else { + epochs_to_remove + .iter() + .cloned() + .filter(|e| redelegated_bonds.contains(storage, e).unwrap()) + .collect::>() + }; + debug_assert!( + modified + .validator_to_modify + .as_ref() + .map(|validator| modified.validators_to_remove.contains(validator)) + .unwrap_or(true), + "2. assumption in `fn compute_new_redelegated_unbonds` doesn't hold" + ); + debug_assert!( + modified + .epoch_to_modify + .as_ref() + .map(|epoch| modified.epochs_to_remove.contains(epoch)) + .unwrap_or(true), + "3. assumption in `fn compute_new_redelegated_unbonds` doesn't hold" + ); - for (infraction_epoch, slash_rate) in slashes { - // println!("Slash epoch: {}, rate: {}", infraction_epoch, slash_rate); + // quint `newRedelegatedUnbonds` returned from + // `computeNewRedelegatedUnbonds` + let new_redelegated_unbonds: EagerRedelegatedUnbonds = unbonded_epochs + .into_iter() + .map(|start| { + let mut rbonds = EagerRedelegatedBondsMap::default(); + if modified + .epoch + .map(|redelegation_epoch| start != redelegation_epoch) + .unwrap_or(true) + || modified.validators_to_remove.is_empty() + { + for res in redelegated_bonds.at(&start).iter(storage).unwrap() { + let ( + NestedSubKey::Data { + key: validator, + nested_sub_key: SubKey::Data(epoch), + }, + amount, + ) = res.unwrap(); + rbonds + .entry(validator.clone()) + .or_default() + .insert(epoch, amount); + } + (start, rbonds) + } else { + for src_validator in &modified.validators_to_remove { + if modified + .validator_to_modify + .as_ref() + .map(|validator| src_validator != validator) + .unwrap_or(true) + { + let raw_bonds = + redelegated_bonds.at(&start).at(src_validator); + for res in raw_bonds.iter(storage).unwrap() { + let (bond_epoch, bond_amount) = res.unwrap(); + rbonds + .entry(src_validator.clone()) + .or_default() + .insert(bond_epoch, bond_amount); + } + } else { + for bond_start in &modified.epochs_to_remove { + let cur_redel_bond_amount = redelegated_bonds + .at(&start) + .at(src_validator) + .get(storage, bond_start) + .unwrap() + .unwrap_or_default(); + let raw_bonds = rbonds + .entry(src_validator.clone()) + .or_default(); + if modified + .epoch_to_modify + .as_ref() + .map(|epoch| bond_start != epoch) + .unwrap_or(true) + { + raw_bonds + .insert(*bond_start, cur_redel_bond_amount); + } else { + raw_bonds.insert( + *bond_start, + cur_redel_bond_amount + - modified + .new_amount + // Safe unwrap - it shouldn't + // get to + // this if it's None + .unwrap(), + ); + } + } + } + } + (start, rbonds) + } + }) + .collect(); + + Ok(new_redelegated_unbonds) +} + +/// Compute a token amount after slashing, given the initial amount and a set of +/// slashes. It is assumed that the input `slashes` are those commited while the +/// `amount` was contributing to voting power. +fn get_slashed_amount( + params: &PosParams, + amount: token::Amount, + slashes: &BTreeMap, +) -> storage_api::Result { + let mut updated_amount = amount; + let mut computed_amounts = Vec::::new(); + + for (&infraction_epoch, &slash_rate) in slashes { let mut computed_to_remove = BTreeSet::>::new(); for (ix, slashed_amount) in computed_amounts.iter().enumerate() { // Update amount with slashes that happened more than unbonding_len // epochs before this current slash - // TODO: understand this better (from Informal) - // TODO: do bounds of this need to be changed with a +/- 1?? if slashed_amount.epoch + params.slash_processing_epoch_offset() - <= *infraction_epoch + <= infraction_epoch { updated_amount = updated_amount .checked_sub(slashed_amount.amount) @@ -2095,13 +2579,10 @@ fn get_slashed_amount( computed_amounts.remove(item.0); } computed_amounts.push(SlashedAmount { - amount: *slash_rate * updated_amount, - epoch: *infraction_epoch, + amount: updated_amount.mul_ceil(slash_rate), + epoch: infraction_epoch, }); } - // println!("Finished loop over slashes in `get_slashed_amount`"); - // println!("Updated amount: {:?}", &updated_amount); - // println!("Computed amounts: {:?}", &computed_amounts); let total_computed_amounts = computed_amounts .into_iter() @@ -2112,29 +2593,126 @@ fn get_slashed_amount( .checked_sub(total_computed_amounts) .unwrap_or_default(); - Ok(final_amount.change()) + Ok(final_amount) } -fn update_unbond( - handle: &Unbonds, - storage: &mut S, - withdraw_epoch: &Epoch, - start_epoch: &Epoch, - amount: token::Amount, -) -> storage_api::Result<()> +// `def computeAmountAfterSlashingUnbond` +fn compute_amount_after_slashing_unbond( + storage: &S, + params: &PosParams, + unbonds: &BTreeMap, + redelegated_unbonds: &EagerRedelegatedUnbonds, + slashes: Vec, +) -> storage_api::Result where - S: StorageRead + StorageWrite, + S: StorageRead, { - let current = handle - .at(withdraw_epoch) - .get(storage, start_epoch)? - .unwrap_or_default(); - handle.at(withdraw_epoch).insert( - storage, - *start_epoch, - current + amount, - )?; - Ok(()) + let mut result_slashing = ResultSlashing::default(); + for (&start_epoch, amount) in unbonds { + // `val listSlashes` + let list_slashes: Vec = slashes + .iter() + .filter(|slash| slash.epoch >= start_epoch) + .cloned() + .collect(); + // `val resultFold` + let result_fold = if let Some(redelegated_unbonds) = + redelegated_unbonds.get(&start_epoch) + { + fold_and_slash_redelegated_bonds( + storage, + params, + redelegated_unbonds, + start_epoch, + &list_slashes, + |_| true, + ) + } else { + FoldRedelegatedBondsResult::default() + }; + // `val totalNoRedelegated` + let total_not_redelegated = amount + .checked_sub(result_fold.total_redelegated) + .unwrap_or_default(); + // `val afterNoRedelegated` + let after_not_redelegated = + apply_list_slashes(params, &list_slashes, total_not_redelegated); + // `val amountAfterSlashing` + let amount_after_slashing = + after_not_redelegated + result_fold.total_after_slashing; + // Accumulation step + result_slashing.sum += amount_after_slashing; + result_slashing + .epoch_map + .insert(start_epoch, amount_after_slashing); + } + Ok(result_slashing) +} + +/// Compute from a set of unbonds (both redelegated and not) how much is left +/// after applying all relevant slashes. +// `def computeAmountAfterSlashingWithdraw` +fn compute_amount_after_slashing_withdraw( + storage: &S, + params: &PosParams, + unbonds_and_redelegated_unbonds: &BTreeMap< + (Epoch, Epoch), + (token::Amount, EagerRedelegatedBondsMap), + >, + slashes: Vec, +) -> storage_api::Result +where + S: StorageRead, +{ + let mut result_slashing = ResultSlashing::default(); + + for ((start_epoch, withdraw_epoch), (amount, redelegated_unbonds)) in + unbonds_and_redelegated_unbonds.iter() + { + // TODO: check if slashes in the same epoch can be + // folded into one effective slash + let end_epoch = *withdraw_epoch + - params.unbonding_len + - params.cubic_slashing_window_length; + // Find slashes that apply to `start_epoch..end_epoch` + let list_slashes = slashes + .iter() + .filter(|slash| { + // Started before the slash occurred + start_epoch <= &slash.epoch + // Ends after the slash + && end_epoch > slash.epoch + }) + .cloned() + .collect::>(); + + // Find the sum and the sum after slashing of the redelegated unbonds + let result_fold = fold_and_slash_redelegated_bonds( + storage, + params, + redelegated_unbonds, + *start_epoch, + &list_slashes, + |_| true, + ); + + // Unbond amount that didn't come from a redelegation + let total_not_redelegated = *amount - result_fold.total_redelegated; + // Find how much remains after slashing non-redelegated amount + let after_not_redelegated = + apply_list_slashes(params, &list_slashes, total_not_redelegated); + + // Add back the unbond and redelegated unbond amount after slashing + let amount_after_slashing = + after_not_redelegated + result_fold.total_after_slashing; + + result_slashing.sum += amount_after_slashing; + result_slashing + .epoch_map + .insert(*start_epoch, amount_after_slashing); + } + + Ok(result_slashing) } /// Arguments to [`become_validator`]. @@ -2221,7 +2799,7 @@ where )?; validator_deltas_handle(address).set( storage, - token::Change::default(), + token::Change::zero(), current_epoch, params.pipeline_len, )?; @@ -2248,12 +2826,17 @@ pub fn withdraw_tokens( where S: StorageRead + StorageWrite, { - tracing::debug!("Withdrawing tokens in epoch {current_epoch}"); let params = read_pos_params(storage)?; let source = source.unwrap_or(validator); + + tracing::debug!("Withdrawing tokens in epoch {current_epoch}"); tracing::debug!("Source {} --> Validator {}", source, validator); - let unbond_handle = unbond_handle(source, validator); + let unbond_handle: Unbonds = unbond_handle(source, validator); + let redelegated_unbonds = + delegator_redelegated_unbonds_handle(source).at(validator); + + // Check that there are unbonded tokens available for withdrawal if unbond_handle.is_empty(storage)? { return Err(WithdrawError::NoUnbondFound(BondId { source: source.clone(), @@ -2262,84 +2845,109 @@ where .into()); } - // let mut total_slashed = token::Amount::default(); - let mut withdrawable_amount = token::Amount::default(); - // (withdraw_epoch, start_epoch) - let mut unbonds_to_remove: Vec<(Epoch, Epoch)> = Vec::new(); + let mut unbonds_and_redelegated_unbonds: BTreeMap< + (Epoch, Epoch), + (token::Amount, EagerRedelegatedBondsMap), + > = BTreeMap::new(); for unbond in unbond_handle.iter(storage)? { let ( NestedSubKey::Data { - key: withdraw_epoch, - nested_sub_key: SubKey::Data(start_epoch), + key: start_epoch, + nested_sub_key: SubKey::Data(withdraw_epoch), }, amount, ) = unbond?; + // Logging tracing::debug!( "Unbond delta ({start_epoch}..{withdraw_epoch}), amount {}", amount.to_string_native() ); - - // TODO: adding slash rates in same epoch, applying cumulatively in dif - // epochs if withdraw_epoch > current_epoch { tracing::debug!( "Not yet withdrawable until epoch {withdraw_epoch}" ); continue; } - let slashes_for_this_unbond = find_slashes_in_range( - storage, - start_epoch, - Some( - withdraw_epoch - - params.unbonding_len - - params.cubic_slashing_window_length, - ), - validator, - )?; - let amount_after_slashing = - get_slashed_amount(¶ms, amount, &slashes_for_this_unbond)?; + let mut eager_redelegated_unbonds = EagerRedelegatedBondsMap::default(); + let matching_redelegated_unbonds = + redelegated_unbonds.at(&start_epoch).at(&withdraw_epoch); + for ub in matching_redelegated_unbonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: address, + nested_sub_key: SubKey::Data(epoch), + }, + amount, + ) = ub?; + eager_redelegated_unbonds + .entry(address) + .or_default() + .entry(epoch) + .or_insert(amount); + } - // total_slashed += amount - token::Amount::from(amount_after_slashing); - withdrawable_amount += token::Amount::from(amount_after_slashing); - unbonds_to_remove.push((withdraw_epoch, start_epoch)); + unbonds_and_redelegated_unbonds.insert( + (start_epoch, withdraw_epoch), + (amount, eager_redelegated_unbonds), + ); } + let slashes = find_validator_slashes(storage, validator)?; + + // `val resultSlashing` + let result_slashing = compute_amount_after_slashing_withdraw( + storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + )?; + + let withdrawable_amount = result_slashing.sum; tracing::debug!( "Withdrawing total {}", withdrawable_amount.to_string_native() ); - // Remove the unbond data from storage - for (withdraw_epoch, start_epoch) in unbonds_to_remove { + // `updateDelegator` with `unbonded` and `redelegeatedUnbonded` + for ((start_epoch, withdraw_epoch), _unbond_and_redelegations) in + unbonds_and_redelegated_unbonds + { tracing::debug!("Remove ({start_epoch}..{withdraw_epoch}) from unbond"); unbond_handle - .at(&withdraw_epoch) - .remove(storage, &start_epoch)?; - // TODO: check if the `end_epoch` layer is now empty and remove it if - // so, may need to implement remove/delete for nested map + .at(&start_epoch) + .remove(storage, &withdraw_epoch)?; + redelegated_unbonds + .at(&start_epoch) + .remove_all(storage, &withdraw_epoch)?; + + if unbond_handle.at(&start_epoch).is_empty(storage)? { + unbond_handle.remove_all(storage, &start_epoch)?; + } + if redelegated_unbonds.at(&start_epoch).is_empty(storage)? { + redelegated_unbonds.remove_all(storage, &start_epoch)?; + } } // Transfer the withdrawable tokens from the PoS address back to the source let staking_token = staking_token_address(storage); - transfer_tokens( + token::transfer( storage, &staking_token, - withdrawable_amount, &ADDRESS, source, + withdrawable_amount, )?; // TODO: Transfer the slashed tokens from the PoS address to the Slash Pool // address - // transfer_tokens( + // token::transfer( // storage, // &staking_token, - // total_slashed, // &ADDRESS, // &SLASH_POOL_ADDRESS, + // total_slashed, // )?; Ok(withdrawable_amount) @@ -2405,53 +3013,6 @@ where commission_handle.set(storage, new_rate, current_epoch, params.pipeline_len) } -/// Transfer tokens between accounts -/// TODO: may want to move this into core crate -pub fn transfer_tokens( - storage: &mut S, - token: &Address, - amount: token::Amount, - src: &Address, - dest: &Address, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let src_key = token::balance_key(token, src); - let dest_key = token::balance_key(token, dest); - if let Some(mut src_balance) = storage.read::(&src_key)? { - // let mut src_balance: token::Amount = - // decode(src_balance).unwrap_or_default(); - if src_balance < amount { - tracing::error!( - "PoS system transfer error, the source doesn't have \ - sufficient balance. It has {}, but {} is required", - src_balance.to_string_native(), - amount.to_string_native(), - ); - } - src_balance.spend(&amount); - let mut dest_balance = storage - .read::(&dest_key)? - .unwrap_or_default(); - - // let dest_balance = storage.read_bytes(&dest_key).unwrap_or_default(); - // let mut dest_balance: token::Amount = dest_balance - // .and_then(|b| decode(b).ok()) - // .unwrap_or_default(); - dest_balance.receive(&amount); - storage - .write(&src_key, src_balance) - .expect("Unable to write token balance for PoS system"); - storage - .write(&dest_key, dest_balance) - .expect("Unable to write token balance for PoS system"); - } else { - tracing::error!("PoS system transfer error, the source has no balance"); - } - Ok(()) -} - /// Check if the given consensus key is already being used to ensure uniqueness. /// /// If it's not being used, it will be inserted into the set that's being used @@ -2481,54 +3042,273 @@ where } /// Get the total bond amount, including slashes, for a given bond ID and epoch. -/// Returns a two-element tuple of the raw bond amount and the post-slashed bond -/// amount, respectively. -/// -/// TODO: does epoch of discovery need to be considered for precise accuracy? +/// Returns the bond amount after slashing. For future epochs the value is +/// subject to change. pub fn bond_amount( storage: &S, bond_id: &BondId, epoch: Epoch, -) -> storage_api::Result<(token::Amount, token::Amount)> +) -> storage_api::Result where S: StorageRead, { - // TODO: review this logic carefully, apply rewards + // TODO: our method of applying slashes is not correct! This needs review + + println!("FN BOND AMOUNT"); + let params = read_pos_params(storage)?; + + // TODO: apply rewards let slashes = find_validator_slashes(storage, &bond_id.validator)?; - let slash_rates = slashes.into_iter().fold( - BTreeMap::::new(), - |mut map, slash| { - let tot_rate = map.entry(slash.epoch).or_default(); - *tot_rate = cmp::min(Dec::one(), *tot_rate + slash.rate); - map - }, - ); + dbg!(&slashes); + let slash_rates = + slashes + .iter() + .fold(BTreeMap::::new(), |mut map, slash| { + let tot_rate = map.entry(slash.epoch).or_default(); + *tot_rate = cmp::min(Dec::one(), *tot_rate + slash.rate); + map + }); + dbg!(&slash_rates); + + // Accumulate incoming redelegations slashes from source validator, if any. + // This ensures that if there're slashes on both src validator and dest + // validator, they're combined correctly. + let mut redelegation_slashes = BTreeMap::::new(); + for res in delegator_redelegated_bonds_handle(&bond_id.source) + .at(&bond_id.validator) + .iter(storage)? + { + let ( + NestedSubKey::Data { + key: redelegation_end, + nested_sub_key: + NestedSubKey::Data { + key: src_validator, + nested_sub_key: SubKey::Data(start), + }, + }, + delta, + ) = res?; + + let list_slashes = validator_slashes_handle(&src_validator) + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + let slash_processing_epoch = + slash.epoch + params.slash_processing_epoch_offset(); + start <= slash.epoch + && redelegation_end > slash.epoch + && slash_processing_epoch + > redelegation_end - params.pipeline_len + }) + .collect::>(); + + let slashed_delta = apply_list_slashes(¶ms, &list_slashes, delta); + + // let mut slashed_delta = delta; + // let slashes = find_slashes_in_range( + // storage, + // start, + // Some(redelegation_end), + // &src_validator, + // )?; + // for (slash_epoch, rate) in slashes { + // let slash_processing_epoch = + // slash_epoch + params.slash_processing_epoch_offset(); + // // If the slash was processed after redelegation was submitted + // // it has to be slashed now + // if slash_processing_epoch > redelegation_end - + // params.pipeline_len { let slashed = + // slashed_delta.mul_ceil(rate); slashed_delta -= + // slashed; } + // } + *redelegation_slashes.entry(redelegation_end).or_default() += + delta - slashed_delta; + } + dbg!(&redelegation_slashes); let bonds = bond_handle(&bond_id.source, &bond_id.validator).get_data_handler(); - let mut total = token::Amount::default(); - let mut total_active = token::Amount::default(); + let mut total_active = token::Amount::zero(); for next in bonds.iter(storage)? { - let (bond_epoch, delta) = next?; + let (bond_epoch, delta) = dbg!(next?); if bond_epoch > epoch { continue; } - total += token::Amount::from(delta); - total_active += token::Amount::from(delta); + let list_slashes = slashes + .iter() + .filter(|slash| bond_epoch <= slash.epoch) + .cloned() + .collect::>(); + + let mut slashed_delta = + apply_list_slashes(¶ms, &list_slashes, delta); + + // Deduct redelegation src validator slash, if any + if let Some(&redelegation_slash) = redelegation_slashes.get(&bond_epoch) + { + slashed_delta -= redelegation_slash; + } + + // let list_slashes = slashes + // .iter() + // .map(Result::unwrap) + // .filter(|slash| bond_epoch <= slash.epoch) + // .collect::>(); + + // for (&slash_epoch, &rate) in &slash_rates { + // if slash_epoch < bond_epoch { + // continue; + // } + // // TODO: think about truncation + // let current_slash = slashed_delta.mul_ceil(rate); + // slashed_delta -= current_slash; + // } + total_active += slashed_delta; + } + dbg!(&total_active); + + // Add unbonds that are still contributing to stake + let unbonds = unbond_handle(&bond_id.source, &bond_id.validator); + for next in unbonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: start, + nested_sub_key: SubKey::Data(withdrawable_epoch), + }, + delta, + ) = next?; + let end = withdrawable_epoch - params.withdrawable_epoch_offset() + + params.pipeline_len; + + if start <= epoch && end > epoch { + let list_slashes = slashes + .iter() + .filter(|slash| start <= slash.epoch && end > slash.epoch) + .cloned() + .collect::>(); + + let slashed_delta = + apply_list_slashes(¶ms, &list_slashes, delta); + + // let mut slashed_delta = delta; + // for (&slash_epoch, &rate) in &slash_rates { + // if start <= slash_epoch && end > slash_epoch { + // // TODO: think about truncation + // let current_slash = slashed_delta.mul_ceil(rate); + // slashed_delta -= current_slash; + // } + // } + total_active += slashed_delta; + } + } + dbg!(&total_active); + + if bond_id.validator != bond_id.source { + // Add outgoing redelegations that are still contributing to the source + // validator's stake + let redelegated_bonds = + delegator_redelegated_bonds_handle(&bond_id.source); + for res in redelegated_bonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: _dest_validator, + nested_sub_key: + NestedSubKey::Data { + key: end, + nested_sub_key: + NestedSubKey::Data { + key: src_validator, + nested_sub_key: SubKey::Data(start), + }, + }, + }, + delta, + ) = res?; + if src_validator == bond_id.validator + && start <= epoch + && end > epoch + { + let list_slashes = slashes + .iter() + .filter(|slash| start <= slash.epoch && end > slash.epoch) + .cloned() + .collect::>(); + + let slashed_delta = + apply_list_slashes(¶ms, &list_slashes, delta); + + // let mut slashed_delta = delta; + // for (&slash_epoch, &rate) in &slash_rates { + // if start <= slash_epoch && end > slash_epoch { + // // TODO: think about truncation + // let current_slash = delta.mul_ceil(rate); + // slashed_delta -= current_slash; + // } + // } + total_active += slashed_delta; + } + } + dbg!(&total_active); - for (slash_epoch, rate) in &slash_rates { - if *slash_epoch < bond_epoch { - continue; + // Add outgoing redelegation unbonds that are still contributing to + // the source validator's stake + let redelegated_unbonds = + delegator_redelegated_unbonds_handle(&bond_id.source); + for res in redelegated_unbonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: _dest_validator, + nested_sub_key: + NestedSubKey::Data { + key: redelegation_epoch, + nested_sub_key: + NestedSubKey::Data { + key: withdraw_epoch, + nested_sub_key: + NestedSubKey::Data { + key: src_validator, + nested_sub_key: SubKey::Data(start), + }, + }, + }, + }, + delta, + ) = res?; + let end = withdraw_epoch - params.withdrawable_epoch_offset() + + params.pipeline_len; + if src_validator == bond_id.validator + // If the unbonded bond was redelegated after this epoch ... + && redelegation_epoch > epoch + // ... the start was before or at this epoch ... + && start <= epoch + // ... and the end after this epoch + && end > epoch + { + let list_slashes = slashes + .iter() + .filter(|slash| start <= slash.epoch && end > slash.epoch) + .cloned() + .collect::>(); + + let slashed_delta = + apply_list_slashes(¶ms, &list_slashes, delta); + + // let mut slashed_delta = delta; + // for (&slash_epoch, &rate) in &slash_rates { + // if start <= slash_epoch && end > slash_epoch { + // let current_slash = delta.mul_ceil(rate); + // slashed_delta -= current_slash; + // } + // } + total_active += slashed_delta; } - // TODO: think about truncation - let current_slashed = *rate * delta; - total_active - .checked_sub(token::Amount::from(current_slashed)) - .unwrap_or_default(); } } - Ok((total, total_active)) + dbg!(&total_active); + + Ok(total_active) } /// Get the genesis consensus validators stake and consensus key for Tendermint, @@ -2618,8 +3398,7 @@ where &address, current_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); into_tm_voting_power( params.tm_votes_per_token, prev_validator_stake, @@ -2642,7 +3421,7 @@ where } // If both previous and current voting powers are 0, and the // validator_stake_threshold is 0, skip update - if params.validator_stake_threshold == token::Amount::default() + if params.validator_stake_threshold.is_zero() && *prev_tm_voting_power == 0 && *new_tm_voting_power == 0 { @@ -2690,8 +3469,7 @@ where &address, current_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); into_tm_voting_power( params.tm_votes_per_token, prev_validator_stake, @@ -2702,8 +3480,7 @@ where // it in the `new_consensus_validators` iterator above if matches!(new_state, Some(ValidatorState::Consensus)) { return None; - } else if params.validator_stake_threshold - == token::Amount::default() + } else if params.validator_stake_threshold.is_zero() && *prev_tm_voting_power == 0 { // If the new state is not Consensus but its prev voting power @@ -2782,11 +3559,10 @@ where "Delegation key should contain validator address.", ) })?; - let amount = bond_handle(owner, &validator_address) + let deltas_sum = bond_handle(owner, &validator_address) .get_sum(storage, *epoch, ¶ms)? .unwrap_or_default(); - delegations - .insert(validator_address, token::Amount::from_change(amount)); + delegations.insert(validator_address, deltas_sum); } Ok(delegations) } @@ -2807,7 +3583,7 @@ pub fn find_bonds( storage: &S, source: &Address, validator: &Address, -) -> storage_api::Result> +) -> storage_api::Result> where S: StorageRead, { @@ -2831,8 +3607,8 @@ where .map(|next_result| { let ( NestedSubKey::Data { - key: withdraw_epoch, - nested_sub_key: SubKey::Data(start_epoch), + key: start_epoch, + nested_sub_key: SubKey::Data(withdraw_epoch), }, amount, ) = next_result?; @@ -2976,7 +3752,7 @@ where { return None; } - let change: token::Change = + let change: token::Amount = BorshDeserialize::try_from_slice(&val_bytes).ok()?; if change.is_zero() { return None; @@ -3101,12 +3877,12 @@ where let bonds = find_bonds(storage, &source, &validator)? .into_iter() - .filter(|(_start, change)| *change > token::Change::default()) - .map(|(start, change)| { + .filter(|(_start, amount)| *amount > token::Amount::zero()) + .map(|(start, amount)| { make_bond_details( params, &validator, - change, + amount, start, &slashes, &mut applied_slashes, @@ -3140,7 +3916,7 @@ where fn make_bond_details( params: &PosParams, validator: &Address, - change: token::Change, + deltas_sum: token::Amount, start: Epoch, slashes: &[Slash], applied_slashes: &mut HashMap>, @@ -3150,7 +3926,7 @@ fn make_bond_details( .get(validator) .cloned() .unwrap_or_default(); - let amount = token::Amount::from_change(change); + let mut slash_rates_by_epoch = BTreeMap::::new(); let validator_slashes = @@ -3169,15 +3945,15 @@ fn make_bond_details( let slashed_amount = if slash_rates_by_epoch.is_empty() { None } else { - let amount_after_slashing = token::Amount::from_change( - get_slashed_amount(params, amount, &slash_rates_by_epoch).unwrap(), - ); - Some(amount - amount_after_slashing) + let amount_after_slashing = + get_slashed_amount(params, deltas_sum, &slash_rates_by_epoch) + .unwrap(); + Some(deltas_sum - amount_after_slashing) }; BondDetails { start, - amount, + amount: deltas_sum, slashed_amount, } } @@ -3221,9 +3997,8 @@ fn make_unbond_details( let slashed_amount = if slash_rates_by_epoch.is_empty() { None } else { - let amount_after_slashing = token::Amount::from_change( - get_slashed_amount(params, amount, &slash_rates_by_epoch).unwrap(), - ); + let amount_after_slashing = + get_slashed_amount(params, amount, &slash_rates_by_epoch).unwrap(); Some(amount - amount_after_slashing) }; @@ -3255,7 +4030,7 @@ where let consensus_validators = consensus_validator_set_handle().at(&epoch); // Get total stake of the consensus validator set - let mut total_consensus_stake = token::Amount::default(); + let mut total_consensus_stake = token::Amount::zero(); for validator in consensus_validators.iter(storage)? { let ( NestedSubKey::Data { @@ -3270,7 +4045,7 @@ where // Get set of signing validator addresses and the combined stake of // these signers let mut signer_set: HashSet
= HashSet::new(); - let mut total_signing_stake = token::Amount::default(); + let mut total_signing_stake = token::Amount::zero(); for VoteInfo { validator_address, validator_vp, @@ -3291,8 +4066,7 @@ where } let stake_from_deltas = - read_validator_stake(storage, ¶ms, &validator_address, epoch)? - .unwrap_or_default(); + read_validator_stake(storage, ¶ms, &validator_address, epoch)?; // Ensure TM stake updates properly with a debug_assert if cfg!(debug_assertions) { @@ -3325,7 +4099,7 @@ where "PoS rewards coefficients {coeffs:?}, inputs: {rewards_calculator:?}." ); - // println!( + // tracing::debug!( // "TOTAL SIGNING STAKE (LOGGING BLOCK REWARDS) = {}", // signing_stake // ); @@ -3348,13 +4122,13 @@ where // When below-threshold validator set is added, this shouldn't be needed // anymore since some minimal stake will be required to be in at least // the consensus set - if stake == token::Amount::default() { + if stake.is_zero() { continue; } let mut rewards_frac = Dec::zero(); let stake_unscaled: Dec = stake.into(); - // println!( + // tracing::debug!( // "NAMADA VALIDATOR STAKE (LOGGING BLOCK REWARDS) OF EPOCH {} = // {}", epoch, stake // ); @@ -3396,7 +4170,7 @@ pub fn compute_cubic_slash_rate( where S: StorageRead, { - // println!("COMPUTING CUBIC SLASH RATE"); + // tracing::debug!("COMPUTING CUBIC SLASH RATE"); let mut sum_vp_fraction = Dec::zero(); let (start_epoch, end_epoch) = params.cubic_slash_epoch_window(infraction_epoch); @@ -3424,9 +4198,9 @@ where ) = res?; let validator_stake = - read_validator_stake(storage, params, &validator, epoch)? - .unwrap_or_default(); - // println!("Val {} stake: {}", &validator, validator_stake); + read_validator_stake(storage, params, &validator, epoch)?; + // tracing::debug!("Val {} stake: {}", &validator, + // validator_stake); Ok(acc + Dec::from(validator_stake)) // TODO: does something more complex need to be done @@ -3436,7 +4210,7 @@ where )?; sum_vp_fraction += infracting_stake / consensus_stake; } - // println!("sum_vp_fraction: {}", sum_vp_fraction); + // tracing::debug!("sum_vp_fraction: {}", sum_vp_fraction); Ok(Dec::new(9, 0).unwrap() * sum_vp_fraction * sum_vp_fraction) } @@ -3495,16 +4269,15 @@ where .expect("Expected to find a valid validator."); match prev_state { ValidatorState::Consensus => { - let amount_pre = validator_deltas_handle(validator) - .get_sum(storage, epoch, params)? - .unwrap_or_default(); + let amount_pre = + read_validator_stake(storage, params, validator, epoch)?; let val_position = validator_set_positions_handle() .at(&epoch) .get(storage, validator)? .expect("Could not find validator's position in storage."); let _ = consensus_validator_set_handle() .at(&epoch) - .at(&token::Amount::from_change(amount_pre)) + .at(&amount_pre) .remove(storage, &val_position)?; validator_set_positions_handle() .at(&epoch) @@ -3557,6 +4330,7 @@ where let amount_pre = validator_deltas_handle(validator) .get_sum(storage, epoch, params)? .unwrap_or_default(); + debug_assert!(amount_pre.non_negative()); let val_position = validator_set_positions_handle() .at(&epoch) .get(storage, validator)? @@ -3570,10 +4344,10 @@ where .remove(storage, validator)?; } ValidatorState::BelowThreshold => { - println!("Below-threshold"); + tracing::debug!("Below-threshold"); } ValidatorState::Inactive => { - println!("INACTIVE"); + tracing::debug!("INACTIVE"); panic!( "Shouldn't be here - haven't implemented inactive vals yet" ) @@ -3604,11 +4378,7 @@ where Ok(()) } -/// Process slashes that have been queued up after discovery. Calculate the -/// cubic slashing rate, store the finalized slashes, update the deltas, then -/// transfer slashed tokens from PoS to the Slash Pool. This function is called -/// at the beginning of the epoch that is `unbonding_length + 1 + -/// cubic_slashing_window_length` epochs after the infraction epoch. +/// Process slashes NEW pub fn process_slashes( storage: &mut S, current_epoch: Epoch, @@ -3641,8 +4411,11 @@ where compute_cubic_slash_rate(storage, ¶ms, infraction_epoch)?; // Collect the enqueued slashes and update their rates - let mut validators_and_slashes: HashMap> = - HashMap::new(); + let mut eager_validator_slashes: BTreeMap> = + BTreeMap::new(); // TODO: will need to update this in storage later + let mut eager_validator_slash_rates: HashMap = HashMap::new(); + + // `slashPerValidator` and `slashesMap` while also updating in storage for enqueued_slash in enqueued_slashes.iter(storage)? { let ( NestedSubKey::Data { @@ -3666,254 +4439,585 @@ where r#type: enqueued_slash.r#type, rate: slash_rate, }; - tracing::debug!( - "Slash for validator {} committed in epoch {} has rate {}", - &validator, - enqueued_slash.epoch, - slash_rate - ); - let cur_slashes = validators_and_slashes.entry(validator).or_default(); + let cur_slashes = eager_validator_slashes + .entry(validator.clone()) + .or_default(); cur_slashes.push(updated_slash); + let cur_rate = + eager_validator_slash_rates.entry(validator).or_default(); + *cur_rate = cmp::min(Dec::one(), *cur_rate + slash_rate); } - let mut deltas_for_update: HashMap> = - HashMap::new(); - - // Store the final processed slashes to their corresponding validators, then - // update the deltas - for (validator, enqueued_slashes) in validators_and_slashes.into_iter() { - let validator_stake_at_infraction = read_validator_stake( + // `resultSlashing` + let mut map_validator_slash: EagerRedelegatedBondsMap = BTreeMap::new(); + for (validator, slash_rate) in eager_validator_slash_rates { + process_validator_slash( storage, ¶ms, &validator, - infraction_epoch, - )? - .unwrap_or_default(); - - tracing::debug!( - "Validator {} stake at infraction epoch {} = {}", - &validator, - infraction_epoch, - validator_stake_at_infraction.to_string_native() - ); - - let mut total_rate = Dec::zero(); + slash_rate, + current_epoch, + &mut map_validator_slash, + )?; + } + tracing::debug!("Slashed amounts for validators: {map_validator_slash:#?}"); - for enqueued_slash in &enqueued_slashes { - // Add this slash to the list of validator's slashes in storage - validator_slashes_handle(&validator) - .push(storage, enqueued_slash.clone())?; + // Now update the remaining parts of storage - total_rate += enqueued_slash.rate; + // Write slashes themselves into storage + for (validator, slashes) in eager_validator_slashes { + let validator_slashes = validator_slashes_handle(&validator); + for slash in slashes { + validator_slashes.push(storage, slash)?; } - total_rate = cmp::min(Dec::one(), total_rate); - - // Find the total amount deducted from the deltas due to unbonds that - // became active after the infraction epoch, accounting for slashes - let mut total_unbonded = token::Amount::default(); - - let total_bonded_handle = total_bonded_handle(&validator); - let mut sum_post_bonds = token::Change::default(); - - // Start from after the infraction epoch up thru last epoch before - // processing - tracing::debug!("Iterating over unbonds after the infraction epoch"); - for epoch in Epoch::iter_bounds_inclusive( - infraction_epoch.next(), - current_epoch.prev(), - ) { - tracing::debug!("Epoch {}", epoch); - let mut recent_unbonds = token::Change::default(); - let unbonds = unbond_records_handle(&validator).at(&epoch); - for unbond in unbonds.iter(storage)? { - let (start, unbond_amount) = unbond?; - tracing::debug!( - "UnbondRecord: amount = {}, start_epoch {}", - unbond_amount.to_string_native(), - &start - ); - if start <= infraction_epoch { - let prev_slashes = find_slashes_in_range( - storage, - start, - Some( - infraction_epoch - .checked_sub(Epoch( - params.unbonding_len - + params.cubic_slashing_window_length, - )) - .unwrap_or_default(), - ), - &validator, - )?; - tracing::debug!( - "Slashes for this unbond: {:?}", - prev_slashes - ); + } - total_unbonded += - token::Amount::from_change(get_slashed_amount( - ¶ms, - unbond_amount, - &prev_slashes, - )?); - } else { - recent_unbonds += unbond_amount.change(); - } + // Update the validator stakes + for (validator, slash_amounts) in map_validator_slash { + let mut slash_acc = token::Amount::zero(); - tracing::debug!( - "Total unbonded (epoch {}) w slashing = {}", + // Update validator sets first because it needs to be able to read + // validator stake before we make any changes to it + for (&epoch, &slash_amount) in &slash_amounts { + let state = validator_state_handle(&validator) + .get(storage, epoch, ¶ms)? + .unwrap(); + if state != ValidatorState::Jailed { + update_validator_set( + storage, + ¶ms, + &validator, + -slash_amount.change(), epoch, - total_unbonded.to_string_native() - ); + )?; } + } + // Then update validator and total deltas + for (epoch, slash_amount) in slash_amounts { + let slash_delta = slash_amount - slash_acc; + slash_acc += slash_delta; - sum_post_bonds += total_bonded_handle - .get_delta_val(storage, epoch, ¶ms)? - .unwrap_or_default() - - recent_unbonds; + update_validator_deltas( + storage, + &validator, + -slash_delta.change(), + epoch, + 0, + )?; + update_total_deltas(storage, -slash_delta.change(), epoch, 0)?; } - // Compute the adjusted validator deltas and slashed amounts from the - // current up until the pipeline epoch - let mut last_slash = token::Change::default(); - for offset in 0..params.pipeline_len { - tracing::debug!( - "Epoch {}\nLast slash = {}", - current_epoch + offset, - last_slash.to_string_native() - ); - let mut recent_unbonds = token::Change::default(); - let unbonds = - unbond_records_handle(&validator).at(&(current_epoch + offset)); + // TODO: should we clear some storage here as is done in Quint?? + // Possibly make the `unbonded` LazyMaps epoched so that it is done + // automatically? + } - for unbond in unbonds.iter(storage)? { - let (start, unbond_amount) = unbond?; - tracing::debug!( - "UnbondRecord: amount = {}, start_epoch {}", - unbond_amount.to_string_native(), - &start - ); - if start <= infraction_epoch { - let prev_slashes = find_slashes_in_range( - storage, - start, - Some( - infraction_epoch - .checked_sub(Epoch( - params.unbonding_len - + params.cubic_slashing_window_length, - )) - .unwrap_or_default(), - ), - &validator, - )?; - tracing::debug!( - "Slashes for this unbond: {:?}", - prev_slashes - ); + Ok(()) +} - total_unbonded += - token::Amount::from_change(get_slashed_amount( - ¶ms, - unbond_amount, - &prev_slashes, - )?); - } else { - recent_unbonds += unbond_amount.change(); - } +/// Process a slash by (i) slashing the misbehaving validator; and (ii) any +/// validator to which it has redelegated some tokens and the slash misbehaving +/// epoch is wihtin the redelegation slashing window. +/// +/// `validator` - the misbehaving validator. +/// `slash_rate` - the slash rate. +/// `slashed_amounts_map` - a map from validator address to a map from epoch to +/// already processed slash amounts. +/// +/// Adds any newly processed slash amount of any involved validator to +/// `slashed_amounts_map`. +// Quint `processSlash` +fn process_validator_slash( + storage: &mut S, + params: &PosParams, + validator: &Address, + slash_rate: Dec, + current_epoch: Epoch, + slashed_amount_map: &mut EagerRedelegatedBondsMap, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + // `resultSlashValidator + let result_slash = slash_validator( + storage, + params, + validator, + slash_rate, + current_epoch, + &slashed_amount_map + .get(validator) + .cloned() + .unwrap_or_default(), + )?; - tracing::debug!( - "Total unbonded (offset {}) w slashing = {}", - offset, - total_unbonded.to_string_native() - ); - } + // `updatedSlashedAmountMap` + let validator_slashes = + slashed_amount_map.entry(validator.clone()).or_default(); + for (epoch, slash) in result_slash { + *validator_slashes.entry(epoch).or_default() += slash; + } - let this_slash = total_rate - * (validator_stake_at_infraction - total_unbonded).change(); - let diff_slashed_amount = last_slash - this_slash; - last_slash = this_slash; - // println!("This slash = {}", this_slash); - // println!("Diff slashed amount = {}", diff_slashed_amount); - // total_slashed -= diff_slashed_amount; - // total_unbonded = token::Amount::default(); - - sum_post_bonds += total_bonded_handle - .get_delta_val(storage, current_epoch + offset, ¶ms)? - .unwrap_or_default() - - recent_unbonds; - - let validator_stake_at_offset = read_validator_stake( - storage, - ¶ms, - &validator, - current_epoch + offset, - )? - .unwrap_or_default() - .change(); - let slashable_stake_at_offset = - validator_stake_at_offset - sum_post_bonds; - assert!(slashable_stake_at_offset >= token::Change::default()); - - let change = - cmp::max(-slashable_stake_at_offset, diff_slashed_amount); - - let val_updates = - deltas_for_update.entry(validator.clone()).or_default(); - val_updates.push((offset, change)); - } + // `outgoingRedelegation` + let outgoing_redelegations = + validator_outgoing_redelegations_handle(validator); + + // Final loop in `processSlash` + let dest_validators = outgoing_redelegations + .iter(storage)? + .map(|res| { + let ( + NestedSubKey::Data { + key: dest_validator, + nested_sub_key: _, + }, + _redelegation, + ) = res?; + Ok(dest_validator) + }) + .collect::>>()?; + + for dest_validator in dest_validators { + let to_modify = slashed_amount_map + .entry(dest_validator.clone()) + .or_default(); + + tracing::debug!( + "Slashing {} redelegation to {}", + validator, + &dest_validator + ); + + // `slashValidatorRedelegation` + slash_validator_redelegation( + storage, + params, + validator, + current_epoch, + &outgoing_redelegations.at(&dest_validator), + &validator_slashes_handle(validator), + &validator_total_redelegated_unbonded_handle(&dest_validator), + slash_rate, + to_modify, + )?; } - // println!("\nUpdating deltas"); - // Update the deltas in storage - // let mut total_slashed = token::Change::default(); - for (validator, updates) in deltas_for_update { - for (offset, delta) in updates { - // println!("Val {}, offset {}, delta {}", &validator, offset, - // delta); - tracing::debug!( - "Deltas change = {} at offset {} for validator {}", - delta.to_string_native(), - offset, - &validator - ); - // total_slashed -= change; + Ok(()) +} - update_validator_deltas( - storage, - ¶ms, - &validator, - delta, - current_epoch, - offset, - )?; - update_total_deltas( +/// In the context of a redelegation, the function computes how much a validator +/// (the destination validator of the redelegation) should be slashed due to the +/// misbehaving of a second validator (the source validator of the +/// redelegation). The function computes how much the validator whould be +/// slashed at all epochs between the current epoch (curEpoch) + 1 and the +/// current epoch + 1 + PIPELINE_OFFSET, accounting for any tokens of the +/// redelegation already unbonded. +/// +/// - `src_validator` - the source validator +/// - `outgoing_redelegations` - a map from pair of epochs to int that includes +/// all the redelegations from the source validator to the destination +/// validator. +/// - The outer key is epoch at which the bond started at the source +/// validator. +/// - The inner key is epoch at which the redelegation started (the epoch at +/// which was issued). +/// - `slashes` a list of slashes of the source validator. +/// - `dest_total_redelegated_unbonded` - a map of unbonded redelegated tokens +/// at the destination validator. +/// - `slash_rate` - the rate of the slash being processed. +/// - `dest_slashed_amounts` - a map from epoch to already processed slash +/// amounts. +/// +/// Adds any newly processed slash amount to `dest_slashed_amounts`. +#[allow(clippy::too_many_arguments)] +fn slash_validator_redelegation( + storage: &S, + params: &PosParams, + src_validator: &Address, + current_epoch: Epoch, + outgoing_redelegations: &NestedMap>, + slashes: &Slashes, + dest_total_redelegated_unbonded: &TotalRedelegatedUnbonded, + slash_rate: Dec, + dest_slashed_amounts: &mut BTreeMap, +) -> storage_api::Result<()> +where + S: StorageRead, +{ + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + for res in outgoing_redelegations.iter(storage)? { + let ( + NestedSubKey::Data { + key: bond_start, + nested_sub_key: SubKey::Data(redel_start), + }, + amount, + ) = res?; + + if params.in_redelegation_slashing_window( + infraction_epoch, + redel_start, + params.redelegation_end_epoch_from_start(redel_start), + ) && bond_start <= infraction_epoch + { + slash_redelegation( storage, - ¶ms, - delta, + params, + amount, + bond_start, + params.redelegation_end_epoch_from_start(redel_start), + src_validator, current_epoch, - offset, + slashes, + dest_total_redelegated_unbonded, + slash_rate, + dest_slashed_amounts, )?; } } - // debug_assert!(total_slashed >= token::Change::default()); + Ok(()) +} - // TODO: Transfer all slashed tokens from PoS account to Slash Pool address - // let staking_token = staking_token_address(storage); - // transfer_tokens( - // storage, - // &staking_token, - // token::Amount::from_change(total_slashed), - // &ADDRESS, - // &SLASH_POOL_ADDRESS, - // )?; +#[allow(clippy::too_many_arguments)] +fn slash_redelegation( + storage: &S, + params: &PosParams, + amount: token::Amount, + bond_start: Epoch, + redel_bond_start: Epoch, + src_validator: &Address, + current_epoch: Epoch, + slashes: &Slashes, + total_redelegated_unbonded: &TotalRedelegatedUnbonded, + slash_rate: Dec, + slashed_amounts: &mut BTreeMap, +) -> storage_api::Result<()> +where + S: StorageRead, +{ + tracing::debug!( + "\nSlashing redelegation amount {} - bond start {} and \ + redel_bond_start {} - at rate {}\n", + amount.to_string_native(), + bond_start, + redel_bond_start, + slash_rate + ); + + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + // Slash redelegation destination validator from the next epoch only + // as they won't be jailed + let set_update_epoch = current_epoch.next(); + + let mut init_tot_unbonded = + Epoch::iter_bounds_inclusive(infraction_epoch.next(), set_update_epoch) + .map(|epoch| { + let redelegated_unbonded = total_redelegated_unbonded + .at(&epoch) + .at(&redel_bond_start) + .at(src_validator) + .get(storage, &bond_start)? + .unwrap_or_default(); + Ok(redelegated_unbonded) + }) + .sum::>()?; + + for epoch in Epoch::iter_range(set_update_epoch, params.pipeline_len) { + let updated_total_unbonded = { + let redelegated_unbonded = total_redelegated_unbonded + .at(&epoch) + .at(&redel_bond_start) + .at(src_validator) + .get(storage, &bond_start)? + .unwrap_or_default(); + init_tot_unbonded + redelegated_unbonded + }; + + let list_slashes = slashes + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + params.in_redelegation_slashing_window( + slash.epoch, + params.redelegation_start_epoch_from_end(redel_bond_start), + redel_bond_start, + ) && bond_start <= slash.epoch + && slash.epoch + params.slash_processing_epoch_offset() + // TODO this may need to be `<=` as in `fn compute_total_unbonded` + // + // NOTE(Tomas): Agreed and changed to `<=`. We're looking + // for slashes that were processed before or in the epoch + // in which slashes that are currently being processed + // occurred. Because we're slashing in the beginning of an + // epoch, we're also taking slashes that were processed in + // the infraction epoch as they would still be processed + // before any infraction occurred. + <= infraction_epoch + }) + .collect::>(); + + let slashable_amount = amount + .checked_sub(updated_total_unbonded) + .unwrap_or_default(); + + let slashed = + apply_list_slashes(params, &list_slashes, slashable_amount) + .mul_ceil(slash_rate); + + let list_slashes = slashes + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + params.in_redelegation_slashing_window( + slash.epoch, + params.redelegation_start_epoch_from_end(redel_bond_start), + redel_bond_start, + ) && bond_start <= slash.epoch + }) + .collect::>(); + + let slashable_stake = + apply_list_slashes(params, &list_slashes, slashable_amount) + .mul_ceil(slash_rate); + + init_tot_unbonded = updated_total_unbonded; + let to_slash = cmp::min(slashed, slashable_stake); + if !to_slash.is_zero() { + let map_value = slashed_amounts.entry(epoch).or_default(); + *map_value += to_slash; + } + } Ok(()) } +/// Computes for a given validator and a slash how much should be slashed at all +/// epochs between the currentÃ¥ epoch (curEpoch) + 1 and the current epoch + 1 + +/// PIPELINE_OFFSET, accounting for any tokens already unbonded. +/// +/// - `validator` - the misbehaving validator. +/// - `slash_rate` - the rate of the slash being processed. +/// - `slashed_amounts_map` - a map from epoch to already processed slash +/// amounts. +/// +/// Returns a map that adds any newly processed slash amount to +/// `slashed_amounts_map`. +// `def slashValidator` +fn slash_validator( + storage: &S, + params: &PosParams, + validator: &Address, + slash_rate: Dec, + current_epoch: Epoch, + slashed_amounts_map: &BTreeMap, +) -> storage_api::Result> +where + S: StorageRead, +{ + tracing::debug!("Slashing validator {} at rate {}", validator, slash_rate); + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + let total_unbonded = total_unbonded_handle(validator); + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(validator); + let total_bonded = total_bonded_handle(validator); + let total_redelegated_bonded = + validator_total_redelegated_bonded_handle(validator); + + let mut slashed_amounts = slashed_amounts_map.clone(); + + let mut tot_bonds = total_bonded + .get_data_handler() + .iter(storage)? + .map(Result::unwrap) + .filter(|&(epoch, bonded)| { + epoch <= infraction_epoch && bonded > 0.into() + }) + .collect::>(); + + let mut redelegated_bonds = tot_bonds + .keys() + .filter(|&epoch| { + !total_redelegated_bonded + .at(epoch) + .is_empty(storage) + .unwrap() + }) + .map(|epoch| { + let tot_redel_bonded = total_redelegated_bonded + .at(epoch) + .collect_map(storage) + .unwrap(); + (*epoch, tot_redel_bonded) + }) + .collect::>(); + + let mut sum = token::Amount::zero(); + + let eps = current_epoch + .iter_range(params.pipeline_len) + .collect::>(); + for epoch in eps.into_iter().rev() { + let amount = tot_bonds.iter().fold( + token::Amount::zero(), + |acc, (bond_start, bond_amount)| { + acc + compute_slash_bond_at_epoch( + storage, + params, + validator, + epoch, + infraction_epoch, + *bond_start, + *bond_amount, + redelegated_bonds.get(bond_start), + slash_rate, + ) + .unwrap() + }, + ); + + let new_bonds = total_unbonded.at(&epoch); + tot_bonds = new_bonds + .collect_map(storage) + .unwrap() + .into_iter() + .filter(|(ep, _)| *ep <= infraction_epoch) + .collect::>(); + + let new_redelegated_bonds = tot_bonds + .keys() + .filter(|&ep| { + !total_redelegated_unbonded.at(ep).is_empty(storage).unwrap() + }) + .map(|ep| { + ( + *ep, + total_redelegated_unbonded + .at(&epoch) + .at(ep) + .collect_map(storage) + .unwrap(), + ) + }) + .collect::>(); + + redelegated_bonds = new_redelegated_bonds; + + // `newSum` + sum += amount; + + // `newSlashesMap` + let cur = slashed_amounts.entry(epoch).or_default(); + *cur += sum; + } + // Hack - should this be done differently? (think this is safe) + let pipeline_epoch = current_epoch + params.pipeline_len; + let last_amt = slashed_amounts + .get(&pipeline_epoch.prev()) + .cloned() + .unwrap(); + slashed_amounts.insert(pipeline_epoch, last_amt); + + Ok(slashed_amounts) +} + +/// Get the remaining token amount in a bond after applying a set of slashes. +/// +/// - `validator` - the bond's validator +/// - `epoch` - the latest slash epoch to consider. +/// - `start` - the start epoch of the bond +/// - `redelegated_bonds` +fn compute_bond_at_epoch( + storage: &S, + params: &PosParams, + validator: &Address, + epoch: Epoch, + start: Epoch, + amount: token::Amount, + redelegated_bonds: Option<&EagerRedelegatedBondsMap>, +) -> storage_api::Result +where + S: StorageRead, +{ + let list_slashes = validator_slashes_handle(validator) + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + // TODO: check bounds on second arg + start <= slash.epoch + && slash.epoch + params.slash_processing_epoch_offset() <= epoch + }) + .collect::>(); + + let slash_epoch_filter = + |e: Epoch| e + params.slash_processing_epoch_offset() <= epoch; + + let result_fold = redelegated_bonds + .map(|redelegated_bonds| { + fold_and_slash_redelegated_bonds( + storage, + params, + redelegated_bonds, + start, + &list_slashes, + slash_epoch_filter, + ) + }) + .unwrap_or_default(); + + let total_not_redelegated = amount - result_fold.total_redelegated; + let after_not_redelegated = + apply_list_slashes(params, &list_slashes, total_not_redelegated); + + Ok(after_not_redelegated + result_fold.total_after_slashing) +} + +/// Uses `fn compute_bond_at_epoch` to compute the token amount to slash in +/// order to prevent overslashing. +#[allow(clippy::too_many_arguments)] +fn compute_slash_bond_at_epoch( + storage: &S, + params: &PosParams, + validator: &Address, + epoch: Epoch, + infraction_epoch: Epoch, + bond_start: Epoch, + bond_amount: token::Amount, + redelegated_bonds: Option<&EagerRedelegatedBondsMap>, + slash_rate: Dec, +) -> storage_api::Result +where + S: StorageRead, +{ + let amount_due = compute_bond_at_epoch( + storage, + params, + validator, + infraction_epoch, + bond_start, + bond_amount, + redelegated_bonds, + )? + .mul_ceil(slash_rate); + let slashable_amount = compute_bond_at_epoch( + storage, + params, + validator, + epoch, + bond_start, + bond_amount, + redelegated_bonds, + )?; + Ok(cmp::min(amount_due, slashable_amount)) +} + /// Unjail a validator that is currently jailed pub fn unjail_validator( storage: &mut S, @@ -3963,8 +5067,7 @@ where // Re-insert the validator into the validator set and update its state let pipeline_epoch = current_epoch + params.pipeline_len; let stake = - read_validator_stake(storage, ¶ms, validator, pipeline_epoch)? - .unwrap_or_default(); + read_validator_stake(storage, ¶ms, validator, pipeline_epoch)?; insert_validator_into_validator_set( storage, @@ -4017,6 +5120,7 @@ where /// Find slashes applicable to a validator with inclusive `start` and exclusive /// `end` epoch. +#[allow(dead_code)] fn find_slashes_in_range( storage: &S, start: Epoch, @@ -4032,13 +5136,215 @@ where if start <= slash.epoch && end.map(|end| slash.epoch < end).unwrap_or(true) { - // println!( - // "Slash (epoch, rate) = ({}, {})", - // &slash.epoch, &slash.rate - // ); let cur_rate = slashes.entry(slash.epoch).or_default(); *cur_rate = cmp::min(*cur_rate + slash.rate, Dec::one()); } } Ok(slashes) } + +/// Redelegate bonded tokens from a source validator to a destination validator +pub fn redelegate_tokens( + storage: &mut S, + delegator: &Address, + src_validator: &Address, + dest_validator: &Address, + current_epoch: Epoch, + amount: token::Amount, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + tracing::debug!( + "Delegator {} redelegating {} tokens from {} to {}", + delegator, + amount.to_string_native(), + src_validator, + dest_validator + ); + if amount.is_zero() { + return Ok(()); + } + + // The src and dest validators must be different + if src_validator == dest_validator { + return Err(RedelegationError::RedelegationSrcEqDest.into()); + } + + // The delegator must not be a validator + if is_validator(storage, delegator)? { + return Err(RedelegationError::DelegatorIsValidator.into()); + } + + // The src and dest validators must actually be validators + if !is_validator(storage, src_validator)? { + return Err( + RedelegationError::NotAValidator(src_validator.clone()).into() + ); + } + if !is_validator(storage, dest_validator)? { + return Err( + RedelegationError::NotAValidator(dest_validator.clone()).into() + ); + } + + let params = read_pos_params(storage)?; + let pipeline_epoch = current_epoch + params.pipeline_len; + let src_redel_end_epoch = + validator_incoming_redelegations_handle(src_validator) + .get(storage, delegator)?; + + // Forbid chained redelegations. A redelegation is "chained" if: + // 1. the source validator holds bonded tokens that themselves were + // redelegated to the src validator + // 2. given the latest epoch at which the most recently redelegated tokens + // started contributing to the src validator's voting power, these tokens + // cannot be slashed anymore + let is_not_chained = if let Some(end_epoch) = src_redel_end_epoch { + // TODO: check bounds for correctness (> and presence of cubic offset) + let last_contrib_epoch = end_epoch.prev(); + // If the source validator's slashes that would cause slash on + // redelegation are now outdated (would have to be processed before or + // on start of the current epoch), the redelegation can be redelegated + // again + last_contrib_epoch + params.slash_processing_epoch_offset() + <= current_epoch + } else { + true + }; + if !is_not_chained { + return Err(RedelegationError::IsChainedRedelegation.into()); + } + + // Unbond the redelegated tokens from the src validator. + // `resultUnbond` in quint + let result_unbond = unbond_tokens( + storage, + Some(delegator), + src_validator, + amount, + current_epoch, + true, + )?; + + // The unbonded amount after slashing is what is going to be redelegated. + // `amountAfterSlashing` + let amount_after_slashing = result_unbond.sum; + tracing::debug!( + "Redelegated amount after slashing: {}", + amount_after_slashing.to_string_native() + ); + + // Add incoming redelegated bonds to the dest validator. + // `updatedRedelegatedBonds` with updates to delegatorState + // `redelegatedBonded` + let redelegated_bonds = delegator_redelegated_bonds_handle(delegator) + .at(dest_validator) + .at(&pipeline_epoch) + .at(src_validator); + for (&epoch, &unbonded_amount) in result_unbond.epoch_map.iter() { + redelegated_bonds.update(storage, epoch, |current| { + current.unwrap_or_default() + unbonded_amount + })?; + } + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, delegator, dest_validator)?; + tracing::debug!("\nRedeleg dest bonds before incrementing: {bonds:#?}"); + } + + // Add a bond delta to the destination. + if !amount_after_slashing.is_zero() { + // `updatedDelegator` with updates to `bonded` + let bond_handle = bond_handle(delegator, dest_validator); + bond_handle.add( + storage, + amount_after_slashing, + current_epoch, + params.pipeline_len, + )?; + // `updatedDestValidator` --> `with("totalVBonded")` + // Add the amount to the dest validator total bonded + let dest_total_bonded = total_bonded_handle(dest_validator); + dest_total_bonded.add( + storage, + amount_after_slashing, + current_epoch, + params.pipeline_len, + )?; + } + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, delegator, dest_validator)?; + tracing::debug!("\nRedeleg dest bonds after incrementing: {bonds:#?}"); + } + + // Add outgoing redelegation to the src validator. + // `updateOutgoingRedelegations` with `updatedSrcValidator` + let outgoing_redelegations = + validator_outgoing_redelegations_handle(src_validator) + .at(dest_validator); + for (start, &unbonded_amount) in result_unbond.epoch_map.iter() { + outgoing_redelegations.at(start).update( + storage, + current_epoch, + |current| current.unwrap_or_default() + unbonded_amount, + )?; + } + + // Add the amount to the dest validator total redelegated bonds. + let dest_total_redelegated_bonded = + validator_total_redelegated_bonded_handle(dest_validator) + .at(&pipeline_epoch) + .at(src_validator); + for (&epoch, &amount) in &result_unbond.epoch_map { + dest_total_redelegated_bonded.update(storage, epoch, |current| { + current.unwrap_or_default() + amount + })?; + } + + // Set the epoch of the validator incoming redelegation from this delegator + let dest_incoming_redelegations = + validator_incoming_redelegations_handle(dest_validator); + dest_incoming_redelegations.insert( + storage, + delegator.clone(), + pipeline_epoch, + )?; + + // Update validator set for dest validator + let is_jailed_at_pipeline = matches!( + validator_state_handle(dest_validator).get( + storage, + pipeline_epoch, + ¶ms + )?, + Some(ValidatorState::Jailed) + ); + if !is_jailed_at_pipeline { + update_validator_set( + storage, + ¶ms, + dest_validator, + amount_after_slashing.change(), + pipeline_epoch, + )?; + } + + // Update deltas + update_validator_deltas( + storage, + dest_validator, + amount_after_slashing.change(), + current_epoch, + params.pipeline_len, + )?; + update_total_deltas( + storage, + amount_after_slashing.change(), + current_epoch, + params.pipeline_len, + )?; + + Ok(()) +} diff --git a/proof_of_stake/src/parameters.rs b/proof_of_stake/src/parameters.rs index 8501aff379..1fe0b33ed3 100644 --- a/proof_of_stake/src/parameters.rs +++ b/proof_of_stake/src/parameters.rs @@ -173,6 +173,37 @@ impl PosParams { let end = infraction_epoch + self.cubic_slashing_window_length; (start, end) } + + /// Get the redelegation end epoch from the start epoch + pub fn redelegation_end_epoch_from_start(&self, end: Epoch) -> Epoch { + end + self.pipeline_len + } + + /// Get the redelegation start epoch from the end epoch + pub fn redelegation_start_epoch_from_end(&self, end: Epoch) -> Epoch { + end - self.pipeline_len + } + + /// Determine if the infraction is in the lazy slashing window for a + /// redelegation source validator. Any source validator slashes that + /// were processed before redelegation was applied will be applied + /// eagerly on the redelegation amount, so this function will only return + /// `true` for applicable infractions that were processed after + /// the redelegation was applied. + /// + /// The `redel_start` is the epoch in which the redelegation was applied and + /// `redel_end` the epoch in which it no longer contributed to source + /// validator's stake. + pub fn in_redelegation_slashing_window( + &self, + infraction_epoch: Epoch, + redel_start: Epoch, + redel_end: Epoch, + ) -> bool { + let processing_epoch = + infraction_epoch + self.slash_processing_epoch_offset(); + redel_start < processing_epoch && infraction_epoch < redel_end + } } #[cfg(test)] diff --git a/proof_of_stake/src/storage.rs b/proof_of_stake/src/storage.rs index 54bd7cfe6b..fe7e6c8d7e 100644 --- a/proof_of_stake/src/storage.rs +++ b/proof_of_stake/src/storage.rs @@ -6,7 +6,7 @@ use namada_core::types::storage::{DbKeySeg, Epoch, Key, KeySeg}; use super::ADDRESS; use crate::epoched::LAZY_MAP_SUB_KEY; -pub use crate::types::*; // TODO: not sure why this needs to be public +use crate::types::BondId; const PARAMS_STORAGE_KEY: &str = "params"; const VALIDATOR_ADDRESSES_KEY: &str = "validator_addresses"; @@ -43,6 +43,13 @@ const CONSENSUS_KEYS: &str = "consensus_keys"; const LAST_BLOCK_PROPOSER_STORAGE_KEY: &str = "last_block_proposer"; const CONSENSUS_VALIDATOR_SET_ACCUMULATOR_STORAGE_KEY: &str = "validator_rewards_accumulator"; +const VALIDATOR_INCOMING_REDELEGATIONS_KEY: &str = "incoming_redelegations"; +const VALIDATOR_OUTGOING_REDELEGATIONS_KEY: &str = "outgoing_redelegations"; +const VALIDATOR_TOTAL_REDELEGATED_BONDED_KEY: &str = "total_redelegated_bonded"; +const VALIDATOR_TOTAL_REDELEGATED_UNBONDED_KEY: &str = + "total_redelegated_unbonded"; +const DELEGATOR_REDELEGATED_BONDS_KEY: &str = "delegator_redelegated_bonds"; +const DELEGATOR_REDELEGATED_UNBONDS_KEY: &str = "delegator_redelegated_unbonds"; /// Is the given key a PoS storage key? pub fn is_pos_key(key: &Key) -> bool { @@ -257,6 +264,66 @@ pub fn validator_delegation_rewards_product_key(validator: &Address) -> Key { .expect("Cannot obtain a storage key") } +/// Storage key for a validator's incoming redelegations, where the prefixed +/// validator is the destination validator. +pub fn validator_incoming_redelegations_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_INCOMING_REDELEGATIONS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a validator's outgoing redelegations, where the prefixed +/// validator is the source validator. +pub fn validator_outgoing_redelegations_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_OUTGOING_REDELEGATIONS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for validator's total-redelegated-bonded amount to track for +/// slashing +pub fn validator_total_redelegated_bonded_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_TOTAL_REDELEGATED_BONDED_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for validator's total-redelegated-unbonded amount to track for +/// slashing +pub fn validator_total_redelegated_unbonded_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_TOTAL_REDELEGATED_UNBONDED_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key prefix for all delegators' redelegated bonds. +pub fn delegator_redelegated_bonds_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&DELEGATOR_REDELEGATED_BONDS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a particular delegator's redelegated bond information. +pub fn delegator_redelegated_bonds_key(delegator: &Address) -> Key { + delegator_redelegated_bonds_prefix() + .push(&delegator.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Storage key prefix for all delegators' redelegated unbonds. +pub fn delegator_redelegated_unbonds_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&DELEGATOR_REDELEGATED_UNBONDS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a particular delegator's redelegated unbond information. +pub fn delegator_redelegated_unbonds_key(delegator: &Address) -> Key { + delegator_redelegated_unbonds_prefix() + .push(&delegator.to_db_key()) + .expect("Cannot obtain a storage key") +} + /// Is storage key for validator's delegation rewards products? pub fn is_validator_delegation_rewards_product_key( key: &Key, @@ -521,9 +588,9 @@ pub fn is_unbond_key(key: &Key) -> Option<(BondId, Epoch, Epoch)> { DbKeySeg::AddressSeg(source), DbKeySeg::AddressSeg(validator), DbKeySeg::StringSeg(data_1), - DbKeySeg::StringSeg(withdraw_epoch_str), - DbKeySeg::StringSeg(data_2), DbKeySeg::StringSeg(start_epoch_str), + DbKeySeg::StringSeg(data_2), + DbKeySeg::StringSeg(withdraw_epoch_str), ] if addr == &ADDRESS && prefix == UNBOND_STORAGE_KEY && data_1 == lazy_map::DATA_SUBKEY diff --git a/proof_of_stake/src/tests.rs b/proof_of_stake/src/tests.rs index b7463c8ea5..8c2ce7fbc4 100644 --- a/proof_of_stake/src/tests.rs +++ b/proof_of_stake/src/tests.rs @@ -1,16 +1,25 @@ //! PoS system tests mod state_machine; +mod state_machine_v2; +mod utils; -use std::cmp::min; -use std::ops::Range; +use std::cmp::{max, min}; +use std::collections::{BTreeMap, BTreeSet}; +use std::ops::{Deref, Range}; +use std::str::FromStr; +use assert_matches::assert_matches; use namada_core::ledger::storage::testing::TestWlStorage; -use namada_core::ledger::storage_api::collections::lazy_map; +use namada_core::ledger::storage_api::collections::lazy_map::{ + self, Collectable, NestedMap, +}; +use namada_core::ledger::storage_api::collections::LazyCollection; use namada_core::ledger::storage_api::token::{credit_tokens, read_balance}; use namada_core::ledger::storage_api::StorageRead; use namada_core::types::address::testing::{ - address_from_simple_seed, arb_established_address, + address_from_simple_seed, arb_established_address, established_address_1, + established_address_2, established_address_3, }; use namada_core::types::address::{Address, EstablishedAddressGen}; use namada_core::types::dec::Dec; @@ -19,9 +28,9 @@ use namada_core::types::key::testing::{ arb_common_keypair, common_sk_from_simple_seed, }; use namada_core::types::key::RefTo; -use namada_core::types::storage::{BlockHeight, Epoch}; +use namada_core::types::storage::{BlockHeight, Epoch, Key}; +use namada_core::types::token::testing::arb_amount_non_zero_ceiled; use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; -use namada_core::types::uint::Uint; use namada_core::types::{address, key, token}; use proptest::prelude::*; use proptest::test_runner::Config; @@ -33,34 +42,45 @@ use crate::parameters::testing::arb_pos_params; use crate::parameters::PosParams; use crate::types::{ into_tm_voting_power, BondDetails, BondId, BondsAndUnbondsDetails, - ConsensusValidator, GenesisValidator, Position, ReverseOrdTokenAmount, - SlashType, UnbondDetails, ValidatorSetUpdate, ValidatorState, - WeightedValidator, + ConsensusValidator, EagerRedelegatedBondsMap, GenesisValidator, Position, + RedelegatedTokens, ReverseOrdTokenAmount, Slash, SlashType, UnbondDetails, + ValidatorSetUpdate, ValidatorState, WeightedValidator, }; use crate::{ - become_validator, below_capacity_validator_set_handle, bond_handle, - bond_tokens, bonds_and_unbonds, consensus_validator_set_handle, - copy_validator_sets_and_positions, find_validator_by_raw_hash, - get_num_consensus_validators, init_genesis, - insert_validator_into_validator_set, is_validator, process_slashes, - purge_validator_sets_for_old_epoch, + apply_list_slashes, become_validator, below_capacity_validator_set_handle, + bond_handle, bond_tokens, bonds_and_unbonds, + compute_amount_after_slashing_unbond, + compute_amount_after_slashing_withdraw, compute_bond_at_epoch, + compute_modified_redelegation, compute_new_redelegated_unbonds, + compute_slash_bond_at_epoch, compute_slashable_amount, + consensus_validator_set_handle, copy_validator_sets_and_positions, + delegator_redelegated_bonds_handle, delegator_redelegated_unbonds_handle, + find_bonds_to_remove, find_validator_by_raw_hash, + fold_and_slash_redelegated_bonds, get_num_consensus_validators, + init_genesis, insert_validator_into_validator_set, is_validator, + process_slashes, purge_validator_sets_for_old_epoch, read_below_capacity_validator_set_addresses_with_stake, read_below_threshold_validator_set_addresses, read_consensus_validator_set_addresses_with_stake, read_total_stake, - read_validator_delta_value, read_validator_stake, slash, - staking_token_address, store_total_consensus_stake, total_deltas_handle, - unbond_handle, unbond_tokens, unjail_validator, update_validator_deltas, - update_validator_set, validator_consensus_key_handle, - validator_set_positions_handle, validator_set_update_tendermint, - validator_slashes_handle, validator_state_handle, withdraw_tokens, - write_validator_address_raw_hash, BecomeValidator, + read_validator_deltas_value, read_validator_stake, slash, + slash_redelegation, slash_validator, slash_validator_redelegation, + staking_token_address, store_total_consensus_stake, total_bonded_handle, + total_deltas_handle, total_unbonded_handle, unbond_handle, unbond_tokens, + unjail_validator, update_validator_deltas, update_validator_set, + validator_consensus_key_handle, validator_incoming_redelegations_handle, + validator_outgoing_redelegations_handle, validator_set_positions_handle, + validator_set_update_tendermint, validator_slashes_handle, + validator_state_handle, validator_total_redelegated_bonded_handle, + validator_total_redelegated_unbonded_handle, withdraw_tokens, + write_validator_address_raw_hash, BecomeValidator, EagerRedelegatedUnbonds, + FoldRedelegatedBondsResult, ModifiedRedelegation, RedelegationError, STORE_VALIDATOR_SETS_LEN, }; proptest! { // Generate arb valid input for `test_init_genesis_aux` #![proptest_config(Config { - cases: 1, + cases: 100, .. Config::default() })] #[test] @@ -77,7 +97,7 @@ proptest! { proptest! { // Generate arb valid input for `test_bonds_aux` #![proptest_config(Config { - cases: 1, + cases: 100, .. Config::default() })] #[test] @@ -93,7 +113,7 @@ proptest! { proptest! { // Generate arb valid input for `test_become_validator_aux` #![proptest_config(Config { - cases: 1, + cases: 100, .. Config::default() })] #[test] @@ -112,7 +132,7 @@ proptest! { proptest! { // Generate arb valid input for `test_slashes_with_unbonding_aux` #![proptest_config(Config { - cases: 5, + cases: 100, .. Config::default() })] #[test] @@ -128,7 +148,7 @@ proptest! { proptest! { // Generate arb valid input for `test_unjail_validator_aux` #![proptest_config(Config { - cases: 5, + cases: 100, .. Config::default() })] #[test] @@ -141,6 +161,72 @@ proptest! { } } +proptest! { + // Generate arb valid input for `test_simple_redelegation_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_simple_redelegation( + + genesis_validators in arb_genesis_validators(2..4, None), + (amount_delegate, amount_redelegate, amount_unbond) in arb_redelegation_amounts(20) + + ) { + test_simple_redelegation_aux(genesis_validators, amount_delegate, amount_redelegate, amount_unbond) + } +} + +proptest! { + // Generate arb valid input for `test_simple_redelegation_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_redelegation_with_slashing( + + genesis_validators in arb_genesis_validators(2..4, None), + (amount_delegate, amount_redelegate, amount_unbond) in arb_redelegation_amounts(20) + + ) { + test_redelegation_with_slashing_aux(genesis_validators, amount_delegate, amount_redelegate, amount_unbond) + } +} + +proptest! { + // Generate arb valid input for `test_chain_redelegations_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_chain_redelegations( + + genesis_validators in arb_genesis_validators(3..4, None), + + ) { + test_chain_redelegations_aux(genesis_validators) + } +} + +proptest! { + // Generate arb valid input for `test_overslashing_aux` + #![proptest_config(Config { + cases: 1, + .. Config::default() + })] + #[test] + fn test_overslashing( + + genesis_validators in arb_genesis_validators(4..5, None), + + ) { + test_overslashing_aux(genesis_validators) + } +} + fn arb_params_and_genesis_validators( num_max_validator_slots: Option, val_size: Range, @@ -303,10 +389,8 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { // Check the bond delta let self_bond = bond_handle(&validator.address, &validator.address); - let delta = self_bond - .get_delta_val(&s, pipeline_epoch, ¶ms) - .unwrap(); - assert_eq!(delta, Some(amount_self_bond.change())); + let delta = self_bond.get_delta_val(&s, pipeline_epoch).unwrap(); + assert_eq!(delta, Some(amount_self_bond)); // Check the validator in the validator set let set = @@ -322,13 +406,9 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { } )); - let val_deltas = read_validator_delta_value( - &s, - ¶ms, - &validator.address, - pipeline_epoch, - ) - .unwrap(); + let val_deltas = + read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) + .unwrap(); assert_eq!(val_deltas, Some(amount_self_bond.change())); let total_deltas_handle = total_deltas_handle(); @@ -423,12 +503,10 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { &validator.address, pipeline_epoch.prev(), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let val_stake_post = read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) - .unwrap() - .unwrap_or_default(); + .unwrap(); assert_eq!(validator.tokens + amount_self_bond, val_stake_pre); assert_eq!( validator.tokens + amount_self_bond + amount_del, @@ -440,14 +518,14 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { .get_sum(&s, pipeline_epoch.prev(), ¶ms) .unwrap() .unwrap_or_default(), - token::Change::default() + token::Amount::zero() ); assert_eq!( delegation .get_sum(&s, pipeline_epoch, ¶ms) .unwrap() .unwrap_or_default(), - amount_del.change() + amount_del ); // Check delegation bonds details after delegation @@ -532,7 +610,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { amount_self_bond + (validator.tokens / 2); // When the difference is 0, only the non-genesis self-bond is unbonded let unbonded_genesis_self_bond = - amount_self_unbond - amount_self_bond != token::Amount::default(); + amount_self_unbond - amount_self_bond != token::Amount::zero(); dbg!( amount_self_unbond, amount_self_bond, @@ -546,6 +624,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { &validator.address, amount_self_unbond, current_epoch, + false, ) .unwrap(); @@ -561,22 +640,21 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) .unwrap(); - let val_delta = read_validator_delta_value( - &s, - ¶ms, - &validator.address, - pipeline_epoch, - ) - .unwrap(); + let val_delta = + read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) + .unwrap(); let unbond = unbond_handle(&validator.address, &validator.address); assert_eq!(val_delta, Some(-amount_self_unbond.change())); assert_eq!( unbond - .at(&(pipeline_epoch - + params.unbonding_len - + params.cubic_slashing_window_length)) - .get(&s, &Epoch::default()) + .at(&Epoch::default()) + .get( + &s, + &(pipeline_epoch + + params.unbonding_len + + params.cubic_slashing_window_length) + ) .unwrap(), if unbonded_genesis_self_bond { Some(amount_self_unbond - amount_self_bond) @@ -586,23 +664,23 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { ); assert_eq!( unbond - .at(&(pipeline_epoch - + params.unbonding_len - + params.cubic_slashing_window_length)) - .get(&s, &(self_bond_epoch + params.pipeline_len)) + .at(&(self_bond_epoch + params.pipeline_len)) + .get( + &s, + &(pipeline_epoch + + params.unbonding_len + + params.cubic_slashing_window_length) + ) .unwrap(), Some(amount_self_bond) ); assert_eq!( val_stake_pre, - Some(validator.tokens + amount_self_bond + amount_del) + validator.tokens + amount_self_bond + amount_del ); assert_eq!( val_stake_post, - Some( - validator.tokens + amount_self_bond + amount_del - - amount_self_unbond - ) + validator.tokens + amount_self_bond + amount_del - amount_self_unbond ); // Check all bond and unbond details (self-bonds and delegation) @@ -680,6 +758,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { &validator.address, amount_undel, current_epoch, + false, ) .unwrap(); @@ -693,13 +772,9 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { let val_stake_post = read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) .unwrap(); - let val_delta = read_validator_delta_value( - &s, - ¶ms, - &validator.address, - pipeline_epoch, - ) - .unwrap(); + let val_delta = + read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) + .unwrap(); let unbond = unbond_handle(&delegator, &validator.address); assert_eq!( @@ -708,24 +783,24 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { ); assert_eq!( unbond - .at(&(pipeline_epoch - + params.unbonding_len - + params.cubic_slashing_window_length)) - .get(&s, &(delegation_epoch + params.pipeline_len)) + .at(&(delegation_epoch + params.pipeline_len)) + .get( + &s, + &(pipeline_epoch + + params.unbonding_len + + params.cubic_slashing_window_length) + ) .unwrap(), Some(amount_undel) ); assert_eq!( val_stake_pre, - Some(validator.tokens + amount_self_bond + amount_del) + validator.tokens + amount_self_bond + amount_del ); assert_eq!( val_stake_post, - Some( - validator.tokens + amount_self_bond - amount_self_unbond - + amount_del - - amount_undel - ) + validator.tokens + amount_self_bond - amount_self_unbond + amount_del + - amount_undel ); let withdrawable_offset = params.unbonding_len @@ -888,10 +963,8 @@ fn test_become_validator_aux( // Check the bond delta let bond_handle = bond_handle(&new_validator, &new_validator); let pipeline_epoch = current_epoch + params.pipeline_len; - let delta = bond_handle - .get_delta_val(&s, pipeline_epoch, ¶ms) - .unwrap(); - assert_eq!(delta, Some(amount.change())); + let delta = bond_handle.get_delta_val(&s, pipeline_epoch).unwrap(); + assert_eq!(delta, Some(amount)); // Check the validator in the validator set - // If the consensus validator slots are full and all the genesis validators @@ -935,7 +1008,8 @@ fn test_become_validator_aux( current_epoch = advance_epoch(&mut s, ¶ms); // Unbond the self-bond - unbond_tokens(&mut s, None, &new_validator, amount, current_epoch).unwrap(); + unbond_tokens(&mut s, None, &new_validator, amount, current_epoch, false) + .unwrap(); let withdrawable_offset = params.unbonding_len + params.pipeline_len; @@ -1022,7 +1096,8 @@ fn test_slashes_with_unbonding_aux( let unbond_amount = Dec::new(5, 1).unwrap() * val_tokens; println!("Going to unbond {}", unbond_amount.to_string_native()); let unbond_epoch = current_epoch; - unbond_tokens(&mut s, None, val_addr, unbond_amount, unbond_epoch).unwrap(); + unbond_tokens(&mut s, None, val_addr, unbond_amount, unbond_epoch, false) + .unwrap(); // Discover second slash let slash_1_evidence_epoch = current_epoch; @@ -1163,7 +1238,6 @@ fn test_validator_sets() { update_validator_deltas( s, - ¶ms, addr, stake.change(), epoch, @@ -1459,13 +1533,18 @@ fn test_validator_sets() { // Because `update_validator_set` and `update_validator_deltas` are // effective from pipeline offset, we use pipeline epoch for the rest of the // checks - update_validator_set(&mut s, ¶ms, &val1, -unbond.change(), epoch) - .unwrap(); - update_validator_deltas( + update_validator_set( &mut s, ¶ms, &val1, -unbond.change(), + pipeline_epoch, + ) + .unwrap(); + update_validator_deltas( + &mut s, + &val1, + -unbond.change(), epoch, params.pipeline_len, ) @@ -1655,10 +1734,10 @@ fn test_validator_sets() { let bond = token::Amount::from_uint(500_000, 0).unwrap(); let stake6 = stake6 + bond; println!("val6 {val6} new stake {}", stake6.to_string_native()); - update_validator_set(&mut s, ¶ms, &val6, bond.change(), epoch).unwrap(); + update_validator_set(&mut s, ¶ms, &val6, bond.change(), pipeline_epoch) + .unwrap(); update_validator_deltas( &mut s, - ¶ms, &val6, bond.change(), epoch, @@ -1808,7 +1887,7 @@ fn test_validator_sets_swap() { max_validator_slots: 2, // Set the stake threshold to 0 so no validators are in the // below-threshold set - validator_stake_threshold: token::Amount::default(), + validator_stake_threshold: token::Amount::zero(), // Set 0.1 votes per token tm_votes_per_token: Dec::new(1, 1).expect("Dec creation failed"), ..Default::default() @@ -1844,7 +1923,6 @@ fn test_validator_sets_swap() { update_validator_deltas( s, - ¶ms, addr, stake.change(), epoch, @@ -1936,25 +2014,35 @@ fn test_validator_sets_swap() { assert_eq!(into_tm_voting_power(params.tm_votes_per_token, stake2), 0); assert_eq!(into_tm_voting_power(params.tm_votes_per_token, stake3), 0); - update_validator_set(&mut s, ¶ms, &val2, bond2.change(), epoch) - .unwrap(); - update_validator_deltas( + update_validator_set( &mut s, ¶ms, &val2, bond2.change(), + pipeline_epoch, + ) + .unwrap(); + update_validator_deltas( + &mut s, + &val2, + bond2.change(), epoch, params.pipeline_len, ) .unwrap(); - update_validator_set(&mut s, ¶ms, &val3, bond3.change(), epoch) - .unwrap(); - update_validator_deltas( + update_validator_set( &mut s, ¶ms, &val3, bond3.change(), + pipeline_epoch, + ) + .unwrap(); + update_validator_deltas( + &mut s, + &val3, + bond3.change(), epoch, params.pipeline_len, ) @@ -1975,25 +2063,35 @@ fn test_validator_sets_swap() { into_tm_voting_power(params.tm_votes_per_token, stake3) ); - update_validator_set(&mut s, ¶ms, &val2, bonds.change(), epoch) - .unwrap(); - update_validator_deltas( + update_validator_set( &mut s, ¶ms, &val2, bonds.change(), + pipeline_epoch, + ) + .unwrap(); + update_validator_deltas( + &mut s, + &val2, + bonds.change(), epoch, params.pipeline_len, ) .unwrap(); - update_validator_set(&mut s, ¶ms, &val3, bonds.change(), epoch) - .unwrap(); - update_validator_deltas( + update_validator_set( &mut s, ¶ms, &val3, bonds.change(), + pipeline_epoch, + ) + .unwrap(); + update_validator_deltas( + &mut s, + &val3, + bonds.change(), epoch, params.pipeline_len, ) @@ -2063,16 +2161,15 @@ fn arb_genesis_validators( size: Range, threshold: Option, ) -> impl Strategy> { + let threshold = threshold + .unwrap_or_else(|| PosParams::default().validator_stake_threshold); let tokens: Vec<_> = (0..size.end) .map(|ix| { if ix == 0 { - // If there's a threshold, make sure that at least one validator - // has at least a stake greater or equal to the threshold to - // avoid having an empty consensus set. - threshold - .map(|token| token.raw_amount()) - .unwrap_or(Uint::one()) - .as_u64()..=10_000_000_u64 + // Make sure that at least one validator has at least a stake + // greater or equal to the threshold to avoid having an empty + // consensus set. + threshold.raw_amount().as_u64()..=10_000_000_u64 } else { 1..=10_000_000_u64 } @@ -2121,11 +2218,7 @@ fn arb_genesis_validators( "Must have at least one genesis validator with stake above the \ provided threshold, if any.", move |gen_vals: &Vec| { - if let Some(thresh) = threshold { - gen_vals.iter().any(|val| val.tokens >= thresh) - } else { - true - } + gen_vals.iter().any(|val| val.tokens >= threshold) }, ) } @@ -2253,3 +2346,3384 @@ fn test_unjail_validator_aux( let second_att = unjail_validator(&mut s, val_addr, current_epoch); assert!(second_att.is_err()); } + +/// `iterateBondsUpToAmountTest` +#[test] +fn test_find_bonds_to_remove() { + let mut storage = TestWlStorage::default(); + let source = established_address_1(); + let validator = established_address_2(); + let bond_handle = bond_handle(&source, &validator); + + let (e1, e2, e6) = (Epoch(1), Epoch(2), Epoch(6)); + + bond_handle + .set(&mut storage, token::Amount::from(5), e1, 0) + .unwrap(); + bond_handle + .set(&mut storage, token::Amount::from(3), e2, 0) + .unwrap(); + bond_handle + .set(&mut storage, token::Amount::from(8), e6, 0) + .unwrap(); + + // Test 1 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(8), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6].into_iter().collect::>() + ); + assert!(bonds_for_removal.new_entry.is_none()); + + // Test 2 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(10), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6].into_iter().collect::>() + ); + assert_eq!( + bonds_for_removal.new_entry, + Some((Epoch(2), token::Amount::from(1))) + ); + + // Test 3 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(11), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6, e2].into_iter().collect::>() + ); + assert!(bonds_for_removal.new_entry.is_none()); + + // Test 4 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(12), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6, e2].into_iter().collect::>() + ); + assert_eq!( + bonds_for_removal.new_entry, + Some((Epoch(1), token::Amount::from(4))) + ); +} + +/// `computeModifiedRedelegationTest` +#[test] +fn test_compute_modified_redelegation() { + let mut storage = TestWlStorage::default(); + let validator1 = established_address_1(); + let validator2 = established_address_2(); + let owner = established_address_3(); + let outer_epoch = Epoch(0); + + let mut alice = validator1.clone(); + let mut bob = validator2.clone(); + + // Ensure a ranking order of alice > bob + // TODO: check why this needs to be > (am I just confusing myself?) + if bob > alice { + alice = validator2; + bob = validator1; + } + println!("\n\nalice = {}\nbob = {}\n", &alice, &bob); + + // Fill redelegated bonds in storage + let redelegated_bonds_map = delegator_redelegated_bonds_handle(&owner) + .at(&alice) + .at(&outer_epoch); + redelegated_bonds_map + .at(&alice) + .insert(&mut storage, Epoch(2), token::Amount::from(6)) + .unwrap(); + redelegated_bonds_map + .at(&alice) + .insert(&mut storage, Epoch(4), token::Amount::from(7)) + .unwrap(); + redelegated_bonds_map + .at(&bob) + .insert(&mut storage, Epoch(1), token::Amount::from(5)) + .unwrap(); + redelegated_bonds_map + .at(&bob) + .insert(&mut storage, Epoch(4), token::Amount::from(7)) + .unwrap(); + + // Test cases 1 and 2 + let mr1 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(25), + ) + .unwrap(); + let mr2 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(30), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + ..Default::default() + }; + + assert_eq!(mr1, exp_mr); + assert_eq!(mr2, exp_mr); + + // Test case 3 + let mr3 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(7), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), + ..Default::default() + }; + assert_eq!(mr3, exp_mr); + + // Test case 4 + let mr4 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(8), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(1), Epoch(4)]), + epoch_to_modify: Some(Epoch(1)), + new_amount: Some(4.into()), + }; + assert_eq!(mr4, exp_mr); + + // Test case 5 + let mr5 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 12.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([bob.clone()]), + ..Default::default() + }; + assert_eq!(mr5, exp_mr); + + // Test case 6 + let mr6 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 14.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: Some(alice.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), + epoch_to_modify: Some(Epoch(4)), + new_amount: Some(5.into()), + }; + assert_eq!(mr6, exp_mr); + + // Test case 7 + let mr7 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 19.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: Some(alice.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), + ..Default::default() + }; + assert_eq!(mr7, exp_mr); + + // Test case 8 + let mr8 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 21.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob]), + validator_to_modify: Some(alice), + epochs_to_remove: BTreeSet::from_iter([Epoch(2), Epoch(4)]), + epoch_to_modify: Some(Epoch(2)), + new_amount: Some(4.into()), + }; + assert_eq!(mr8, exp_mr); +} + +/// `computeBondAtEpochTest` +#[test] +fn test_compute_bond_at_epoch() { + let mut storage = TestWlStorage::default(); + let params = PosParams { + pipeline_len: 2, + unbonding_len: 4, + cubic_slashing_window_length: 1, + ..Default::default() + }; + let alice = established_address_1(); + let bob = established_address_2(); + + // Test 1 + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&Default::default()), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 23.into()); + + // Test 2 + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: 4.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&Default::default()), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); + + // Test 3 + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + let mut redel_bonds = EagerRedelegatedBondsMap::default(); + redel_bonds.insert( + alice.clone(), + BTreeMap::from_iter([(Epoch(1), token::Amount::from(5))]), + ); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 23.into()); + + // Test 4 + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: 4.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); + + // Test 5 + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push( + &mut storage, + Slash { + epoch: 6.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 23.into()); + + // Test 6 + validator_slashes_handle(&alice).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push( + &mut storage, + Slash { + epoch: 4.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 18.into(), + 9.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 18.into()); +} + +/// `computeSlashBondAtEpochTest` +#[test] +fn test_compute_slash_bond_at_epoch() { + let mut storage = TestWlStorage::default(); + let params = PosParams { + pipeline_len: 2, + unbonding_len: 4, + cubic_slashing_window_length: 1, + ..Default::default() + }; + let alice = established_address_1(); + let bob = established_address_2(); + + let current_epoch = Epoch(20); + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + let redelegated_bond = BTreeMap::from_iter([( + alice, + BTreeMap::from_iter([(infraction_epoch - 4, token::Amount::from(10))]), + )]); + + // Test 1 + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&Default::default()), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 30.into()); + + // Test 2 + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&redelegated_bond), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 30.into()); + + // Test 3 + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: infraction_epoch.prev(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&Default::default()), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); + + // Test 4 + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&redelegated_bond), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); +} + +/// `computeNewRedelegatedUnbondsTest` +#[test] +fn test_compute_new_redelegated_unbonds() { + let mut storage = TestWlStorage::default(); + let alice = established_address_1(); + let bob = established_address_2(); + + let key = Key::parse("testing").unwrap(); + let redelegated_bonds = NestedMap::::open(key); + + // Populate the lazy and eager maps + let (ep1, ep2, ep4, ep5, ep6, ep7) = + (Epoch(1), Epoch(2), Epoch(4), Epoch(5), Epoch(6), Epoch(7)); + let keys_and_values = vec![ + (ep5, alice.clone(), ep2, 1), + (ep5, alice.clone(), ep4, 1), + (ep7, alice.clone(), ep2, 1), + (ep7, alice.clone(), ep4, 1), + (ep5, bob.clone(), ep1, 1), + (ep5, bob.clone(), ep4, 2), + (ep7, bob.clone(), ep1, 1), + (ep7, bob.clone(), ep4, 2), + ]; + let mut eager_map = BTreeMap::::new(); + for (outer_ep, address, inner_ep, amount) in keys_and_values { + redelegated_bonds + .at(&outer_ep) + .at(&address) + .insert(&mut storage, inner_ep, token::Amount::from(amount)) + .unwrap(); + eager_map + .entry(outer_ep) + .or_default() + .entry(address.clone()) + .or_default() + .insert(inner_ep, token::Amount::from(amount)); + } + + // Different ModifiedRedelegation objects for testing + let empty_mr = ModifiedRedelegation::default(); + let all_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: None, + epochs_to_remove: Default::default(), + epoch_to_modify: None, + new_amount: None, + }; + let mod_val_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice.clone()]), + validator_to_modify: None, + epochs_to_remove: Default::default(), + epoch_to_modify: None, + new_amount: None, + }; + let mod_val_partial_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([ep1]), + epoch_to_modify: None, + new_amount: None, + }; + let mod_epoch_partial_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice, bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([ep1, ep4]), + epoch_to_modify: Some(ep4), + new_amount: Some(token::Amount::from(1)), + }; + + // Test case 1 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &Default::default(), + &empty_mr, + ) + .unwrap(); + assert_eq!(res, Default::default()); + + let set5 = BTreeSet::::from_iter([ep5]); + let set56 = BTreeSet::::from_iter([ep5, ep6]); + + // Test case 2 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set5, + &empty_mr, + ) + .unwrap(); + let mut exp_res = eager_map.clone(); + exp_res.remove(&ep7); + assert_eq!(res, exp_res); + + // Test case 3 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &empty_mr, + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 4 + println!("\nTEST CASE 4\n"); + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &all_mr, + ) + .unwrap(); + assert_eq!(res, eager_map); + + // Test case 5 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &mod_val_mr, + ) + .unwrap(); + exp_res = eager_map.clone(); + exp_res.entry(ep7).or_default().remove(&bob); + assert_eq!(res, exp_res); + + // Test case 6 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &mod_val_partial_mr, + ) + .unwrap(); + exp_res = eager_map.clone(); + exp_res + .entry(ep7) + .or_default() + .entry(bob.clone()) + .or_default() + .remove(&ep4); + assert_eq!(res, exp_res); + + // Test case 7 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &mod_epoch_partial_mr, + ) + .unwrap(); + exp_res + .entry(ep7) + .or_default() + .entry(bob) + .or_default() + .insert(ep4, token::Amount::from(1)); + assert_eq!(res, exp_res); +} + +/// `applyListSlashesTest` +#[test] +fn test_apply_list_slashes() { + let init_epoch = Epoch(2); + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + // let unbonding_len = 4u64; + // let cubic_offset = 1u64; + + let slash1 = Slash { + epoch: init_epoch, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slash2 = Slash { + epoch: init_epoch + + params.unbonding_len + + params.cubic_slashing_window_length + + 1u64, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let list1 = vec![slash1.clone()]; + let list2 = vec![slash1.clone(), slash2.clone()]; + let list3 = vec![slash1.clone(), slash1.clone()]; + let list4 = vec![slash1.clone(), slash1, slash2]; + + let res = apply_list_slashes(¶ms, &[], token::Amount::from(100)); + assert_eq!(res, token::Amount::from(100)); + + let res = apply_list_slashes(¶ms, &list1, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); + + let res = apply_list_slashes(¶ms, &list2, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); + + let res = apply_list_slashes(¶ms, &list3, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); + + let res = apply_list_slashes(¶ms, &list4, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); +} + +/// `computeSlashableAmountTest` +#[test] +fn test_compute_slashable_amount() { + let init_epoch = Epoch(2); + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + + let slash1 = Slash { + epoch: init_epoch + + params.unbonding_len + + params.cubic_slashing_window_length, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let slash2 = Slash { + epoch: init_epoch + + params.unbonding_len + + params.cubic_slashing_window_length + + 1u64, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let test_map = vec![(init_epoch, token::Amount::from(50))] + .into_iter() + .collect::>(); + + let res = compute_slashable_amount( + ¶ms, + &slash1, + token::Amount::from(100), + &BTreeMap::new(), + ); + assert_eq!(res, token::Amount::from(100)); + + let res = compute_slashable_amount( + ¶ms, + &slash2, + token::Amount::from(100), + &test_map, + ); + assert_eq!(res, token::Amount::from(50)); + + let res = compute_slashable_amount( + ¶ms, + &slash1, + token::Amount::from(100), + &test_map, + ); + assert_eq!(res, token::Amount::from(100)); +} + +/// `foldAndSlashRedelegatedBondsMapTest` +#[test] +fn test_fold_and_slash_redelegated_bonds() { + let mut storage = TestWlStorage::default(); + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + let start_epoch = Epoch(7); + + let alice = established_address_1(); + let bob = established_address_2(); + + println!("\n\nAlice: {}", alice); + println!("Bob: {}\n", bob); + + let test_slash = Slash { + epoch: Default::default(), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let test_data = vec![ + (alice.clone(), vec![(2, 1), (4, 1)]), + (bob, vec![(1, 1), (4, 2)]), + ]; + let mut eager_redel_bonds = EagerRedelegatedBondsMap::default(); + for (address, pair) in test_data { + for (epoch, amount) in pair { + eager_redel_bonds + .entry(address.clone()) + .or_default() + .insert(Epoch(epoch), token::Amount::from(amount)); + } + } + + // Test case 1 + let res = fold_and_slash_redelegated_bonds( + &storage, + ¶ms, + &eager_redel_bonds, + start_epoch, + &[], + |_| true, + ); + assert_eq!( + res, + FoldRedelegatedBondsResult { + total_redelegated: token::Amount::from(5), + total_after_slashing: token::Amount::from(5), + } + ); + + // Test case 2 + let res = fold_and_slash_redelegated_bonds( + &storage, + ¶ms, + &eager_redel_bonds, + start_epoch, + &[test_slash], + |_| true, + ); + assert_eq!( + res, + FoldRedelegatedBondsResult { + total_redelegated: token::Amount::from(5), + total_after_slashing: token::Amount::zero(), + } + ); + + // Test case 3 + let alice_slash = Slash { + epoch: Epoch(6), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + + let res = fold_and_slash_redelegated_bonds( + &storage, + ¶ms, + &eager_redel_bonds, + start_epoch, + &[], + |_| true, + ); + assert_eq!( + res, + FoldRedelegatedBondsResult { + total_redelegated: token::Amount::from(5), + total_after_slashing: token::Amount::from(3), + } + ); +} + +/// `slashRedelegationTest` +#[test] +fn test_slash_redelegation() { + let mut storage = TestWlStorage::default(); + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + let alice = established_address_1(); + + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(&alice); + total_redelegated_unbonded + .at(&Epoch(13)) + .at(&Epoch(10)) + .at(&alice) + .insert(&mut storage, Epoch(7), token::Amount::from(2)) + .unwrap(); + + let slashes = validator_slashes_handle(&alice); + + let mut slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::zero()), + (Epoch(16), token::Amount::zero()), + ]); + let empty_slash_amounts = slashed_amounts_map.clone(); + + // Test case 1 + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(5)), + (Epoch(16), token::Amount::from(5)), + ]) + ); + + // Test case 2 + slashed_amounts_map = empty_slash_amounts.clone(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(11), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(7)), + ]) + ); + + // Test case 3 + slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(2)), + (Epoch(16), token::Amount::from(3)), + ]); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(8)), + ]) + ); + + // Test case 4 + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(8), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slashed_amounts_map = empty_slash_amounts.clone(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 5 + slashes.pop(&mut storage).unwrap(); + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(9), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 6 + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(8), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); +} + +/// `slashValidatorRedelegationTest` +#[test] +fn test_slash_validator_redelegation() { + let mut storage = TestWlStorage::default(); + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + let alice = established_address_1(); + let bob = established_address_2(); + + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(&alice); + total_redelegated_unbonded + .at(&Epoch(13)) + .at(&Epoch(10)) + .at(&alice) + .insert(&mut storage, Epoch(7), token::Amount::from(2)) + .unwrap(); + + let outgoing_redelegations = + validator_outgoing_redelegations_handle(&alice).at(&bob); + + let slashes = validator_slashes_handle(&alice); + + let mut slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::zero()), + (Epoch(16), token::Amount::zero()), + ]); + let empty_slash_amounts = slashed_amounts_map.clone(); + + // Test case 1 + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 2 + total_redelegated_unbonded + .remove_all(&mut storage, &Epoch(13)) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 3 + total_redelegated_unbonded + .at(&Epoch(13)) + .at(&Epoch(10)) + .at(&alice) + .insert(&mut storage, Epoch(7), token::Amount::from(2)) + .unwrap(); + outgoing_redelegations + .at(&Epoch(6)) + .insert(&mut storage, Epoch(8), token::Amount::from(7)) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(7)), + ]) + ); + + // Test case 4 + slashed_amounts_map = empty_slash_amounts.clone(); + outgoing_redelegations + .remove_all(&mut storage, &Epoch(6)) + .unwrap(); + outgoing_redelegations + .at(&Epoch(7)) + .insert(&mut storage, Epoch(8), token::Amount::from(7)) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(5)), + (Epoch(16), token::Amount::from(5)), + ]) + ); + + // Test case 5 + slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(2)), + (Epoch(16), token::Amount::from(3)), + ]); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(8)), + ]) + ); + + // Test case 6 + slashed_amounts_map = empty_slash_amounts.clone(); + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(8), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); +} + +/// `slashValidatorTest` +#[test] +fn test_slash_validator() { + let mut storage = TestWlStorage::default(); + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + let alice = established_address_1(); + let bob = established_address_2(); + + let total_bonded = total_bonded_handle(&bob); + let total_unbonded = total_unbonded_handle(&bob); + let total_redelegated_bonded = + validator_total_redelegated_bonded_handle(&bob); + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(&bob); + + let infraction_stake = token::Amount::from(23); + + let initial_stakes = BTreeMap::from_iter([ + (Epoch(11), infraction_stake), + (Epoch(12), infraction_stake), + (Epoch(13), infraction_stake), + ]); + let mut exp_res = initial_stakes.clone(); + + let current_epoch = Epoch(10); + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + let processing_epoch = current_epoch.next(); + let slash_rate = Dec::one(); + + // Test case 1 + println!("\nTEST 1:"); + + total_bonded + .set(&mut storage, 23.into(), infraction_epoch - 2, 0) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 2 + println!("\nTEST 2:"); + total_bonded + .set(&mut storage, 17.into(), infraction_epoch - 2, 0) + .unwrap(); + total_unbonded + .at(&(current_epoch + params.pipeline_len)) + .insert(&mut storage, infraction_epoch - 2, 6.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res.insert(Epoch(12), 17.into()); + exp_res.insert(Epoch(13), 17.into()); + assert_eq!(res, exp_res); + + // Test case 3 + println!("\nTEST 3:"); + total_redelegated_bonded + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(2), 5.into()) + .unwrap(); + total_redelegated_bonded + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(3), 1.into()) + .unwrap(); + + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 4 + println!("\nTEST 4:"); + total_unbonded_handle(&bob) + .at(&(current_epoch + params.pipeline_len)) + .remove(&mut storage, &(infraction_epoch - 2)) + .unwrap(); + total_unbonded_handle(&bob) + .at(&(current_epoch + params.pipeline_len)) + .insert(&mut storage, infraction_epoch - 1, 6.into()) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(2), 5.into()) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(3), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 5 + println!("\nTEST 5:"); + total_bonded_handle(&bob) + .set(&mut storage, 19.into(), infraction_epoch - 2, 0) + .unwrap(); + total_unbonded_handle(&bob) + .at(&(current_epoch + params.pipeline_len)) + .insert(&mut storage, infraction_epoch - 1, 4.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, Epoch(2), token::Amount::from(1)) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .remove(&mut storage, &Epoch(3)) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(2), 4.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res.insert(Epoch(12), 19.into()); + exp_res.insert(Epoch(13), 19.into()); + assert_eq!(res, exp_res); + + // Test case 6 + println!("\nTEST 6:"); + total_unbonded_handle(&bob) + .remove_all(&mut storage, &(current_epoch + params.pipeline_len)) + .unwrap(); + total_redelegated_unbonded + .remove_all(&mut storage, &(current_epoch + params.pipeline_len)) + .unwrap(); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch) + .unwrap(); + total_bonded_handle(&bob) + .set(&mut storage, 23.into(), infraction_epoch - 2, 0) + .unwrap(); + total_bonded_handle(&bob) + .set(&mut storage, 6.into(), current_epoch, 0) + .unwrap(); + + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res = initial_stakes; + assert_eq!(res, exp_res); + + // Test case 7 + println!("\nTEST 7:"); + total_bonded + .get_data_handler() + .remove(&mut storage, ¤t_epoch) + .unwrap(); + total_unbonded + .at(¤t_epoch.next()) + .insert(&mut storage, current_epoch, 6.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 8 + println!("\nTEST 8:"); + total_bonded + .get_data_handler() + .insert(&mut storage, current_epoch, 3.into()) + .unwrap(); + total_unbonded + .at(¤t_epoch.next()) + .insert(&mut storage, current_epoch, 3.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 9 + println!("\nTEST 9:"); + total_unbonded + .remove_all(&mut storage, ¤t_epoch.next()) + .unwrap(); + total_bonded + .set(&mut storage, 6.into(), current_epoch, 0) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 5.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 10 + println!("\nTEST 10:"); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch) + .unwrap(); + total_bonded + .get_data_handler() + .remove(&mut storage, ¤t_epoch) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 5.into()) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 11 + println!("\nTEST 11:"); + total_bonded + .set(&mut storage, 2.into(), current_epoch, 0) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 4.into()) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .remove(&mut storage, &3.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 1.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 12 + println!("\nTEST 12:"); + total_bonded + .set(&mut storage, 6.into(), current_epoch, 0) + .unwrap(); + total_bonded + .set(&mut storage, 2.into(), current_epoch.next(), 0) + .unwrap(); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch.next()) + .at(&alice) + .insert(&mut storage, 2.into(), 1.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch.next()) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 13 + println!("\nTEST 13:"); + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: infraction_epoch.prev(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + total_redelegated_unbonded + .remove_all(&mut storage, ¤t_epoch.next()) + .unwrap(); + total_bonded + .get_data_handler() + .remove(&mut storage, ¤t_epoch.next()) + .unwrap(); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch.next()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res.insert(Epoch(11), 0.into()); + exp_res.insert(Epoch(12), 0.into()); + exp_res.insert(Epoch(13), 0.into()); + assert_eq!(res, exp_res); +} + +/// `computeAmountAfterSlashingUnbondTest` +#[test] +fn compute_amount_after_slashing_unbond_test() { + let mut storage = TestWlStorage::default(); + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Test data + let alice = established_address_1(); + let bob = established_address_2(); + let unbonds: BTreeMap = BTreeMap::from_iter([ + ((Epoch(2)), token::Amount::from(5)), + ((Epoch(4)), token::Amount::from(6)), + ]); + let redelegated_unbonds: EagerRedelegatedUnbonds = BTreeMap::from_iter([( + Epoch(2), + BTreeMap::from_iter([( + alice.clone(), + BTreeMap::from_iter([(Epoch(1), token::Amount::from(1))]), + )]), + )]); + + // Test case 1 + let slashes = vec![]; + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 2 + let bob_slash = Slash { + epoch: Epoch(5), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![bob_slash.clone()]; + validator_slashes_handle(&bob) + .push(&mut storage, bob_slash) + .unwrap(); + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 0.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 0.into()), (4.into(), 0.into())], + ); + + // Test case 3 + let alice_slash = Slash { + epoch: Epoch(0), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 4 + let alice_slash = Slash { + epoch: Epoch(1), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 10.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 4.into()), (4.into(), 6.into())], + ); +} + +/// `computeAmountAfterSlashingWithdrawTest` +#[test] +fn compute_amount_after_slashing_withdraw_test() { + let mut storage = TestWlStorage::default(); + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Test data + let alice = established_address_1(); + let bob = established_address_2(); + let unbonds_and_redelegated_unbonds: BTreeMap< + (Epoch, Epoch), + (token::Amount, EagerRedelegatedBondsMap), + > = BTreeMap::from_iter([ + ( + (Epoch(2), Epoch(20)), + ( + // unbond + token::Amount::from(5), + // redelegations + BTreeMap::from_iter([( + alice.clone(), + BTreeMap::from_iter([(Epoch(1), token::Amount::from(1))]), + )]), + ), + ), + ( + (Epoch(4), Epoch(20)), + ( + // unbond + token::Amount::from(6), + // redelegations + BTreeMap::default(), + ), + ), + ]); + + // Test case 1 + let slashes = vec![]; + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 2 + let bob_slash = Slash { + epoch: Epoch(5), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![bob_slash.clone()]; + validator_slashes_handle(&bob) + .push(&mut storage, bob_slash) + .unwrap(); + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 0.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 0.into()), (4.into(), 0.into())], + ); + + // Test case 3 + let alice_slash = Slash { + epoch: Epoch(0), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 4 + let alice_slash = Slash { + epoch: Epoch(1), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 10.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 4.into()), (4.into(), 6.into())], + ); +} + +fn arb_redelegation_amounts( + max_delegation: u64, +) -> impl Strategy { + let arb_delegation = arb_amount_non_zero_ceiled(max_delegation); + let amounts = arb_delegation.prop_flat_map(move |amount_delegate| { + let amount_redelegate = arb_amount_non_zero_ceiled(max( + 1, + u64::try_from(amount_delegate.raw_amount()).unwrap() - 1, + )); + (Just(amount_delegate), amount_redelegate) + }); + amounts.prop_flat_map(move |(amount_delegate, amount_redelegate)| { + let amount_unbond = arb_amount_non_zero_ceiled(max( + 1, + u64::try_from(amount_redelegate.raw_amount()).unwrap() - 1, + )); + ( + Just(amount_delegate), + Just(amount_redelegate), + amount_unbond, + ) + }) +} + +fn test_simple_redelegation_aux( + mut validators: Vec, + amount_delegate: token::Amount, + amount_redelegate: token::Amount, + amount_unbond: token::Amount, +) { + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + + let src_validator = validators[0].address.clone(); + let dest_validator = validators[1].address.clone(); + + let mut storage = TestWlStorage::default(); + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + init_genesis( + &mut storage, + ¶ms, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + // Ensure that we cannot redelegate with the same src and dest validator + let err = super::redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &src_validator, + current_epoch, + amount_redelegate, + ) + .unwrap_err(); + let err_str = err.to_string(); + assert_matches!( + err.downcast::().unwrap().deref(), + RedelegationError::RedelegationSrcEqDest, + "Redelegation with the same src and dest validator must be rejected, \ + got {err_str}", + ); + + for _ in 0..5 { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + } + + let init_epoch = current_epoch; + + // Delegate in epoch 1 to src_validator + println!( + "\nBONDING {} TOKENS TO {}\n", + amount_delegate.to_string_native(), + &src_validator + ); + super::bond_tokens( + &mut storage, + Some(&delegator), + &src_validator, + amount_delegate, + current_epoch, + ) + .unwrap(); + + println!("\nAFTER DELEGATION\n"); + let bonds = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds = total_bonded_handle(&src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds = total_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + dbg!(&bonds, &bonds_dest, &unbonds, &tot_bonds, &tot_unbonds); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate in epoch 3 + println!( + "\nREDELEGATING {} TOKENS TO {}\n", + amount_redelegate.to_string_native(), + &dest_validator + ); + + super::redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &dest_validator, + current_epoch, + amount_redelegate, + ) + .unwrap(); + + println!("\nAFTER REDELEGATION\n"); + println!("\nDELEGATOR\n"); + let bonds_src = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds_src = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let unbonds_dest = unbond_handle(&delegator, &dest_validator) + .collect_map(&storage) + .unwrap(); + let redel_bonds = delegator_redelegated_bonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + + dbg!( + &bonds_src, + &bonds_dest, + &unbonds_src, + &unbonds_dest, + &redel_bonds, + &redel_unbonds + ); + + // Dest val + println!("\nDEST VALIDATOR\n"); + + let incoming_redels_dest = + validator_incoming_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_dest = + validator_outgoing_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_dest = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_dest = total_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_dest = + validator_total_redelegated_bonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_dest = + validator_total_redelegated_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_dest, + &outgoing_redels_dest, + &tot_bonds_dest, + &tot_unbonds_dest, + &tot_redel_bonds_dest, + &tot_redel_unbonds_dest + ); + + // Src val + println!("\nSRC VALIDATOR\n"); + + let incoming_redels_src = + validator_incoming_redelegations_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_src = + validator_outgoing_redelegations_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_src = total_bonded_handle(&src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_src = total_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_src = + validator_total_redelegated_bonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_src = + validator_total_redelegated_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_src, + &outgoing_redels_src, + &tot_bonds_src, + &tot_unbonds_src, + &tot_redel_bonds_src, + &tot_redel_unbonds_src + ); + + // Checks + let redelegated = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&(current_epoch + params.pipeline_len)) + .at(&src_validator) + .get(&storage, &(init_epoch + params.pipeline_len)) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + let redel_start_epoch = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap() + .unwrap(); + assert_eq!(redel_start_epoch, current_epoch + params.pipeline_len); + + let redelegated = validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(¤t_epoch.prev()) + .get(&storage, ¤t_epoch) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Unbond in epoch 5 from dest_validator + println!( + "\nUNBONDING {} TOKENS FROM {}\n", + amount_unbond.to_string_native(), + &dest_validator + ); + let _ = unbond_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + amount_unbond, + current_epoch, + false, + ) + .unwrap(); + + println!("\nAFTER UNBONDING\n"); + println!("\nDELEGATOR\n"); + + let bonds_src = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds_src = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let unbonds_dest = unbond_handle(&delegator, &dest_validator) + .collect_map(&storage) + .unwrap(); + let redel_bonds = delegator_redelegated_bonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + + dbg!( + &bonds_src, + &bonds_dest, + &unbonds_src, + &unbonds_dest, + &redel_bonds, + &redel_unbonds + ); + + println!("\nDEST VALIDATOR\n"); + + let incoming_redels_dest = + validator_incoming_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_dest = + validator_outgoing_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_dest = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_dest = total_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_dest = + validator_total_redelegated_bonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_dest = + validator_total_redelegated_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_dest, + &outgoing_redels_dest, + &tot_bonds_dest, + &tot_unbonds_dest, + &tot_redel_bonds_dest, + &tot_redel_unbonds_dest + ); + + let bond_start = init_epoch + params.pipeline_len; + let redelegation_end = bond_start + params.pipeline_len + 1u64; + let unbond_end = + redelegation_end + params.withdrawable_epoch_offset() + 1u64; + let unbond_materialized = redelegation_end + params.pipeline_len + 1u64; + + // Checks + let redelegated_remaining = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(redelegated_remaining, amount_redelegate - amount_unbond); + + let redel_unbonded = delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&unbond_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(redel_unbonded, amount_unbond); + + dbg!(unbond_materialized, redelegation_end, bond_start); + let total_redel_unbonded = + validator_total_redelegated_unbonded_handle(&dest_validator) + .at(&unbond_materialized) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(total_redel_unbonded, amount_unbond); + + // Advance to withdrawal epoch + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == unbond_end { + break; + } + } + + // Withdraw + withdraw_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + current_epoch, + ) + .unwrap(); + + assert!( + delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .is_empty(&storage) + .unwrap() + ); + + let delegator_balance = storage + .read::(&token::balance_key(&staking_token, &delegator)) + .unwrap() + .unwrap_or_default(); + assert_eq!( + delegator_balance, + del_balance - amount_delegate + amount_unbond + ); +} + +fn test_redelegation_with_slashing_aux( + mut validators: Vec, + amount_delegate: token::Amount, + amount_redelegate: token::Amount, + amount_unbond: token::Amount, +) { + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + + let src_validator = validators[0].address.clone(); + let dest_validator = validators[1].address.clone(); + + let mut storage = TestWlStorage::default(); + let params = PosParams { + unbonding_len: 4, + // Avoid empty consensus set by removing the threshold + validator_stake_threshold: token::Amount::zero(), + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + init_genesis( + &mut storage, + ¶ms, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + for _ in 0..5 { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + } + + let init_epoch = current_epoch; + + // Delegate in epoch 5 to src_validator + println!( + "\nBONDING {} TOKENS TO {}\n", + amount_delegate.to_string_native(), + &src_validator + ); + super::bond_tokens( + &mut storage, + Some(&delegator), + &src_validator, + amount_delegate, + current_epoch, + ) + .unwrap(); + + println!("\nAFTER DELEGATION\n"); + let bonds = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds = total_bonded_handle(&src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds = total_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + dbg!(&bonds, &bonds_dest, &unbonds, &tot_bonds, &tot_unbonds); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate in epoch 8 + println!( + "\nREDELEGATING {} TOKENS TO {}\n", + amount_redelegate.to_string_native(), + &dest_validator + ); + + super::redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &dest_validator, + current_epoch, + amount_redelegate, + ) + .unwrap(); + + println!("\nAFTER REDELEGATION\n"); + println!("\nDELEGATOR\n"); + let bonds_src = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds_src = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let unbonds_dest = unbond_handle(&delegator, &dest_validator) + .collect_map(&storage) + .unwrap(); + let redel_bonds = delegator_redelegated_bonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + + dbg!( + &bonds_src, + &bonds_dest, + &unbonds_src, + &unbonds_dest, + &redel_bonds, + &redel_unbonds + ); + + // Dest val + println!("\nDEST VALIDATOR\n"); + + let incoming_redels_dest = + validator_incoming_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_dest = + validator_outgoing_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_dest = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_dest = total_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_dest = + validator_total_redelegated_bonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_dest = + validator_total_redelegated_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_dest, + &outgoing_redels_dest, + &tot_bonds_dest, + &tot_unbonds_dest, + &tot_redel_bonds_dest, + &tot_redel_unbonds_dest + ); + + // Src val + println!("\nSRC VALIDATOR\n"); + + let incoming_redels_src = + validator_incoming_redelegations_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_src = + validator_outgoing_redelegations_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_src = total_bonded_handle(&src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_src = total_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_src = + validator_total_redelegated_bonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_src = + validator_total_redelegated_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_src, + &outgoing_redels_src, + &tot_bonds_src, + &tot_unbonds_src, + &tot_redel_bonds_src, + &tot_redel_unbonds_src + ); + + // Checks + let redelegated = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&(current_epoch + params.pipeline_len)) + .at(&src_validator) + .get(&storage, &(init_epoch + params.pipeline_len)) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + let redel_start_epoch = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap() + .unwrap(); + assert_eq!(redel_start_epoch, current_epoch + params.pipeline_len); + + let redelegated = validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(¤t_epoch.prev()) + .get(&storage, ¤t_epoch) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Unbond in epoch 11 from dest_validator + println!( + "\nUNBONDING {} TOKENS FROM {}\n", + amount_unbond.to_string_native(), + &dest_validator + ); + let _ = unbond_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + amount_unbond, + current_epoch, + false, + ) + .unwrap(); + + println!("\nAFTER UNBONDING\n"); + println!("\nDELEGATOR\n"); + + let bonds_src = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds_src = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let unbonds_dest = unbond_handle(&delegator, &dest_validator) + .collect_map(&storage) + .unwrap(); + let redel_bonds = delegator_redelegated_bonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + + dbg!( + &bonds_src, + &bonds_dest, + &unbonds_src, + &unbonds_dest, + &redel_bonds, + &redel_unbonds + ); + + println!("\nDEST VALIDATOR\n"); + + let incoming_redels_dest = + validator_incoming_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_dest = + validator_outgoing_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_dest = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_dest = total_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_dest = + validator_total_redelegated_bonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_dest = + validator_total_redelegated_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_dest, + &outgoing_redels_dest, + &tot_bonds_dest, + &tot_unbonds_dest, + &tot_redel_bonds_dest, + &tot_redel_unbonds_dest + ); + + // Advance one epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Discover evidence + slash( + &mut storage, + ¶ms, + current_epoch, + init_epoch + 2 * params.pipeline_len, + 0u64, + SlashType::DuplicateVote, + &src_validator, + current_epoch.next(), + ) + .unwrap(); + + let bond_start = init_epoch + params.pipeline_len; + let redelegation_end = bond_start + params.pipeline_len + 1u64; + let unbond_end = + redelegation_end + params.withdrawable_epoch_offset() + 1u64; + let unbond_materialized = redelegation_end + params.pipeline_len + 1u64; + + // Checks + let redelegated_remaining = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(redelegated_remaining, amount_redelegate - amount_unbond); + + let redel_unbonded = delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&unbond_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(redel_unbonded, amount_unbond); + + dbg!(unbond_materialized, redelegation_end, bond_start); + let total_redel_unbonded = + validator_total_redelegated_unbonded_handle(&dest_validator) + .at(&unbond_materialized) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(total_redel_unbonded, amount_unbond); + + // Advance to withdrawal epoch + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == unbond_end { + break; + } + } + + // Withdraw + withdraw_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + current_epoch, + ) + .unwrap(); + + assert!( + delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .is_empty(&storage) + .unwrap() + ); + + let delegator_balance = storage + .read::(&token::balance_key(&staking_token, &delegator)) + .unwrap() + .unwrap_or_default(); + assert_eq!(delegator_balance, del_balance - amount_delegate); +} + +fn test_chain_redelegations_aux(mut validators: Vec) { + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + + let src_validator = validators[0].address.clone(); + let _init_stake_src = validators[0].tokens; + let dest_validator = validators[1].address.clone(); + let _init_stake_dest = validators[1].tokens; + let dest_validator_2 = validators[2].address.clone(); + let _init_stake_dest_2 = validators[2].tokens; + + let mut storage = TestWlStorage::default(); + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + init_genesis( + &mut storage, + ¶ms, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + // Delegate in epoch 0 to src_validator + let bond_amount: token::Amount = 100.into(); + super::bond_tokens( + &mut storage, + Some(&delegator), + &src_validator, + bond_amount, + current_epoch, + ) + .unwrap(); + + let bond_start = current_epoch + params.pipeline_len; + + // Advance one epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate in epoch 1 to dest_validator + let redel_amount_1: token::Amount = 58.into(); + super::redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &dest_validator, + current_epoch, + redel_amount_1, + ) + .unwrap(); + + let redel_start = current_epoch; + let redel_end = current_epoch + params.pipeline_len; + + // Checks ---------------- + + // Dest validator should have an incoming redelegation + let incoming_redelegation = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap(); + assert_eq!(incoming_redelegation, Some(redel_end)); + + // Src validator should have an outoging redelegation + let outgoing_redelegation = + validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(&bond_start) + .get(&storage, &redel_start) + .unwrap(); + assert_eq!(outgoing_redelegation, Some(redel_amount_1)); + + // Delegator should have redelegated bonds + let del_total_redelegated_bonded = + delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(del_total_redelegated_bonded, redel_amount_1); + + // There should be delegator bonds for both src and dest validators + let bonded_src = bond_handle(&delegator, &src_validator); + let bonded_dest = bond_handle(&delegator, &dest_validator); + assert_eq!( + bonded_src + .get_delta_val(&storage, bond_start) + .unwrap() + .unwrap_or_default(), + bond_amount - redel_amount_1 + ); + assert_eq!( + bonded_dest + .get_delta_val(&storage, redel_end) + .unwrap() + .unwrap_or_default(), + redel_amount_1 + ); + + // The dest validator should have total redelegated bonded tokens + let dest_total_redelegated_bonded = + validator_total_redelegated_bonded_handle(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(dest_total_redelegated_bonded, redel_amount_1); + + // The dest validator's total bonded should have an entry for the genesis + // bond and the redelegation + let dest_total_bonded = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + assert!( + dest_total_bonded.len() == 2 + && dest_total_bonded.contains_key(&Epoch::default()) + ); + assert_eq!( + dest_total_bonded + .get(&redel_end) + .cloned() + .unwrap_or_default(), + redel_amount_1 + ); + + // The src validator should have a total bonded entry for the original bond + // accounting for the redelegation + assert_eq!( + total_bonded_handle(&src_validator) + .get_delta_val(&storage, bond_start) + .unwrap() + .unwrap_or_default(), + bond_amount - redel_amount_1 + ); + + // The src validator should have a total unbonded entry due to the + // redelegation + let src_total_unbonded = total_unbonded_handle(&src_validator) + .at(&redel_end) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(src_total_unbonded, redel_amount_1); + + // Attempt to redelegate in epoch 3 to dest_validator + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + let redel_amount_2: token::Amount = 23.into(); + let redel_att = super::redelegate_tokens( + &mut storage, + &delegator, + &dest_validator, + &dest_validator_2, + current_epoch, + redel_amount_2, + ); + assert!(redel_att.is_err()); + + // Advance to right before the redelegation can be redelegated again + assert_eq!(redel_end, current_epoch); + let epoch_can_redel = + redel_end.prev() + params.slash_processing_epoch_offset(); + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == epoch_can_redel.prev() { + break; + } + } + + // Attempt to redelegate in epoch before we actually are able to + let redel_att = super::redelegate_tokens( + &mut storage, + &delegator, + &dest_validator, + &dest_validator_2, + current_epoch, + redel_amount_2, + ); + assert!(redel_att.is_err()); + + // Advance one more epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate from dest_validator to dest_validator_2 now + super::redelegate_tokens( + &mut storage, + &delegator, + &dest_validator, + &dest_validator_2, + current_epoch, + redel_amount_2, + ) + .unwrap(); + + let redel_2_start = current_epoch; + let redel_2_end = current_epoch + params.pipeline_len; + + // Checks ----------------------------------- + + // Both the dest validator and dest validator 2 should have incoming + // redelegations + let incoming_redelegation_1 = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap(); + assert_eq!(incoming_redelegation_1, Some(redel_end)); + let incoming_redelegation_2 = + validator_incoming_redelegations_handle(&dest_validator_2) + .get(&storage, &delegator) + .unwrap(); + assert_eq!(incoming_redelegation_2, Some(redel_2_end)); + + // Both the src validator and dest validator should have outgoing + // redelegations + let outgoing_redelegation_1 = + validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(&bond_start) + .get(&storage, &redel_start) + .unwrap(); + assert_eq!(outgoing_redelegation_1, Some(redel_amount_1)); + + let outgoing_redelegation_2 = + validator_outgoing_redelegations_handle(&dest_validator) + .at(&dest_validator_2) + .at(&redel_end) + .get(&storage, &redel_2_start) + .unwrap(); + assert_eq!(outgoing_redelegation_2, Some(redel_amount_2)); + + // All three validators should have bonds + let bonded_dest2 = bond_handle(&delegator, &dest_validator_2); + assert_eq!( + bonded_src + .get_delta_val(&storage, bond_start) + .unwrap() + .unwrap_or_default(), + bond_amount - redel_amount_1 + ); + assert_eq!( + bonded_dest + .get_delta_val(&storage, redel_end) + .unwrap() + .unwrap_or_default(), + redel_amount_1 - redel_amount_2 + ); + assert_eq!( + bonded_dest2 + .get_delta_val(&storage, redel_2_end) + .unwrap() + .unwrap_or_default(), + redel_amount_2 + ); + + // There should be no unbond entries + let unbond_src = unbond_handle(&delegator, &src_validator); + let unbond_dest = unbond_handle(&delegator, &dest_validator); + assert!(unbond_src.is_empty(&storage).unwrap()); + assert!(unbond_dest.is_empty(&storage).unwrap()); + + // The dest validator should have some total unbonded due to the second + // redelegation + let dest_total_unbonded = total_unbonded_handle(&dest_validator) + .at(&redel_2_end) + .get(&storage, &redel_end) + .unwrap(); + assert_eq!(dest_total_unbonded, Some(redel_amount_2)); + + // Delegator should have redelegated bonds due to both redelegations + let del_redelegated_bonds = delegator_redelegated_bonds_handle(&delegator); + assert_eq!( + Some(redel_amount_1 - redel_amount_2), + del_redelegated_bonds + .at(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + ); + assert_eq!( + Some(redel_amount_2), + del_redelegated_bonds + .at(&dest_validator_2) + .at(&redel_2_end) + .at(&dest_validator) + .get(&storage, &redel_end) + .unwrap() + ); + + // Delegator redelegated unbonds should be empty + assert!( + delegator_redelegated_unbonds_handle(&delegator) + .is_empty(&storage) + .unwrap() + ); + + // Both the dest validator and dest validator 2 should have total + // redelegated bonds + let dest_redelegated_bonded = + validator_total_redelegated_bonded_handle(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + let dest2_redelegated_bonded = + validator_total_redelegated_bonded_handle(&dest_validator_2) + .at(&redel_2_end) + .at(&dest_validator) + .get(&storage, &redel_end) + .unwrap() + .unwrap_or_default(); + assert_eq!(dest_redelegated_bonded, redel_amount_1 - redel_amount_2); + assert_eq!(dest2_redelegated_bonded, redel_amount_2); + + // Total redelegated unbonded should be empty for src_validator and + // dest_validator_2 + assert!( + validator_total_redelegated_unbonded_handle(&dest_validator_2) + .is_empty(&storage) + .unwrap() + ); + assert!( + validator_total_redelegated_unbonded_handle(&src_validator) + .is_empty(&storage) + .unwrap() + ); + + // The dest_validator should have total_redelegated unbonded + let tot_redel_unbonded = + validator_total_redelegated_unbonded_handle(&dest_validator) + .at(&redel_2_end) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(tot_redel_unbonded, redel_amount_2); +} + +/// SM test case 1 from Brent +#[test] +fn test_from_sm_case_1() { + use namada_core::types::address::testing::established_address_4; + + let mut storage = TestWlStorage::default(); + let validator = established_address_1(); + let redeleg_src_1 = established_address_2(); + let redeleg_src_2 = established_address_3(); + let owner = established_address_4(); + let unbond_amount = token::Amount::from(3130688); + println!( + "Owner: {owner}\nValidator: {validator}\nRedeleg src 1: \ + {redeleg_src_1}\nRedeleg src 2: {redeleg_src_2}" + ); + + // Validator's incoming redelegations + let outer_epoch_1 = Epoch(27); + // from redeleg_src_1 + let epoch_1_redeleg_1 = token::Amount::from(8516); + // from redeleg_src_2 + let epoch_1_redeleg_2 = token::Amount::from(5704386); + let outer_epoch_2 = Epoch(30); + // from redeleg_src_2 + let epoch_2_redeleg_2 = token::Amount::from(1035191); + + // Insert the data - bonds and redelegated bonds + let bonds_handle = bond_handle(&owner, &validator); + bonds_handle + .add( + &mut storage, + epoch_1_redeleg_1 + epoch_1_redeleg_2, + outer_epoch_1, + 0, + ) + .unwrap(); + bonds_handle + .add(&mut storage, epoch_2_redeleg_2, outer_epoch_2, 0) + .unwrap(); + + let redelegated_bonds_map_1 = delegator_redelegated_bonds_handle(&owner) + .at(&validator) + .at(&outer_epoch_1); + redelegated_bonds_map_1 + .at(&redeleg_src_1) + .insert(&mut storage, Epoch(14), epoch_1_redeleg_1) + .unwrap(); + redelegated_bonds_map_1 + .at(&redeleg_src_2) + .insert(&mut storage, Epoch(18), epoch_1_redeleg_2) + .unwrap(); + let redelegated_bonds_map_1 = delegator_redelegated_bonds_handle(&owner) + .at(&validator) + .at(&outer_epoch_1); + + let redelegated_bonds_map_2 = delegator_redelegated_bonds_handle(&owner) + .at(&validator) + .at(&outer_epoch_2); + redelegated_bonds_map_2 + .at(&redeleg_src_2) + .insert(&mut storage, Epoch(18), epoch_2_redeleg_2) + .unwrap(); + + // Find the modified redelegation the same way as `unbond_tokens` + let bonds_to_unbond = find_bonds_to_remove( + &storage, + &bonds_handle.get_data_handler(), + unbond_amount, + ) + .unwrap(); + dbg!(&bonds_to_unbond); + + let (new_entry_epoch, new_bond_amount) = bonds_to_unbond.new_entry.unwrap(); + assert_eq!(outer_epoch_1, new_entry_epoch); + // The modified bond should be sum of all redelegations less the unbonded + // amouunt + assert_eq!( + epoch_1_redeleg_1 + epoch_1_redeleg_2 + epoch_2_redeleg_2 + - unbond_amount, + new_bond_amount + ); + // The current bond should be sum of redelegations fom the modified epoch + let cur_bond_amount = bonds_handle + .get_delta_val(&storage, new_entry_epoch) + .unwrap() + .unwrap_or_default(); + assert_eq!(epoch_1_redeleg_1 + epoch_1_redeleg_2, cur_bond_amount); + + let mr = compute_modified_redelegation( + &storage, + &redelegated_bonds_map_1, + new_entry_epoch, + cur_bond_amount - new_bond_amount, + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(27)), + validators_to_remove: BTreeSet::from_iter([redeleg_src_2.clone()]), + validator_to_modify: Some(redeleg_src_2), + epochs_to_remove: BTreeSet::from_iter([Epoch(18)]), + epoch_to_modify: Some(Epoch(18)), + new_amount: Some(token::Amount::from(3608889)), + }; + + pretty_assertions::assert_eq!(mr, exp_mr); +} + +/// Test precisely that we are not overslashing, as originally discovered by Tomas in this issue: https://github.com/informalsystems/partnership-heliax/issues/74 +fn test_overslashing_aux(mut validators: Vec) { + assert_eq!(validators.len(), 4); + + let params = PosParams { + unbonding_len: 4, + ..Default::default() + }; + + let offending_stake = token::Amount::native_whole(110); + let other_stake = token::Amount::native_whole(100); + + // Set stakes so we know we will get a slashing rate between 0.5 -1.0 + validators[0].tokens = offending_stake; + validators[1].tokens = other_stake; + validators[2].tokens = other_stake; + validators[3].tokens = other_stake; + + // Get the offending validator + let validator = validators[0].address.clone(); + + println!("\nTest inputs: {params:?}, genesis validators: {validators:#?}"); + let mut storage = TestWlStorage::default(); + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + init_genesis( + &mut storage, + ¶ms, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = storage.storage.native_token.clone(); + let delegator = address::testing::gen_implicit_address(); + let amount_del = token::Amount::native_whole(5); + credit_tokens(&mut storage, &staking_token, &delegator, amount_del) + .unwrap(); + + // Delegate tokens in epoch 0 to validator + bond_tokens( + &mut storage, + Some(&delegator), + &validator, + amount_del, + current_epoch, + ) + .unwrap(); + + let self_bond_epoch = current_epoch; + let delegation_epoch = current_epoch + params.pipeline_len; + + // Advance to pipeline epoch + for _ in 0..params.pipeline_len { + current_epoch = advance_epoch(&mut storage, ¶ms); + } + assert_eq!(delegation_epoch, current_epoch); + + // Find a misbehavior committed in epoch 0 + slash( + &mut storage, + ¶ms, + current_epoch, + self_bond_epoch, + 0_u64, + SlashType::DuplicateVote, + &validator, + current_epoch.next(), + ) + .unwrap(); + + // Find a misbehavior committed in current epoch + slash( + &mut storage, + ¶ms, + current_epoch, + delegation_epoch, + 0_u64, + SlashType::DuplicateVote, + &validator, + current_epoch.next(), + ) + .unwrap(); + + let processing_epoch_1 = + self_bond_epoch + params.slash_processing_epoch_offset(); + let processing_epoch_2 = + delegation_epoch + params.slash_processing_epoch_offset(); + + // Advance to processing epoch 1 + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == processing_epoch_1 { + break; + } + } + + let total_stake_1 = offending_stake + 3 * other_stake; + let stake_frac = Dec::from(offending_stake) / Dec::from(total_stake_1); + let slash_rate_1 = Dec::from_str("9.0").unwrap() * stake_frac * stake_frac; + dbg!(&slash_rate_1); + + let exp_slashed_1 = offending_stake.mul_ceil(slash_rate_1); + + // Check that the proper amount was slashed + let epoch = current_epoch.next(); + let validator_stake = + read_validator_stake(&storage, ¶ms, &validator, epoch).unwrap(); + let exp_validator_stake = offending_stake - exp_slashed_1 + amount_del; + assert_eq!(validator_stake, exp_validator_stake); + + let total_stake = read_total_stake(&storage, ¶ms, epoch).unwrap(); + let exp_total_stake = + offending_stake - exp_slashed_1 + amount_del + 3 * other_stake; + assert_eq!(total_stake, exp_total_stake); + + let self_bond_id = BondId { + source: validator.clone(), + validator: validator.clone(), + }; + let bond_amount = + crate::bond_amount(&storage, &self_bond_id, epoch).unwrap(); + let exp_bond_amount = offending_stake - exp_slashed_1; + assert_eq!(bond_amount, exp_bond_amount); + + // Advance to processing epoch 2 + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == processing_epoch_2 { + break; + } + } + + let total_stake_2 = offending_stake + amount_del + 3 * other_stake; + let stake_frac = + Dec::from(offending_stake + amount_del) / Dec::from(total_stake_2); + let slash_rate_2 = Dec::from_str("9.0").unwrap() * stake_frac * stake_frac; + dbg!(&slash_rate_2); + + let exp_slashed_from_delegation = amount_del.mul_ceil(slash_rate_2); + + // Check that the proper amount was slashed. We expect that all of the + // validator self-bond has been slashed and some of the delegation has been + // slashed due to the second infraction. + let epoch = current_epoch.next(); + + let validator_stake = + read_validator_stake(&storage, ¶ms, &validator, epoch).unwrap(); + let exp_validator_stake = amount_del - exp_slashed_from_delegation; + assert_eq!(validator_stake, exp_validator_stake); + + let total_stake = read_total_stake(&storage, ¶ms, epoch).unwrap(); + let exp_total_stake = + amount_del - exp_slashed_from_delegation + 3 * other_stake; + assert_eq!(total_stake, exp_total_stake); + + let delegation_id = BondId { + source: delegator.clone(), + validator: validator.clone(), + }; + let delegation_amount = + crate::bond_amount(&storage, &delegation_id, epoch).unwrap(); + let exp_del_amount = amount_del - exp_slashed_from_delegation; + assert_eq!(delegation_amount, exp_del_amount); + + let self_bond_amount = + crate::bond_amount(&storage, &self_bond_id, epoch).unwrap(); + let exp_bond_amount = token::Amount::zero(); + assert_eq!(self_bond_amount, exp_bond_amount); +} diff --git a/proof_of_stake/src/tests/state_machine.rs b/proof_of_stake/src/tests/state_machine.rs index 6c9968c519..e9c4db1b3a 100644 --- a/proof_of_stake/src/tests/state_machine.rs +++ b/proof_of_stake/src/tests/state_machine.rs @@ -2,10 +2,14 @@ use std::cmp; use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque}; +use std::ops::Deref; +use assert_matches::assert_matches; use itertools::Itertools; use namada_core::ledger::storage::testing::TestWlStorage; -use namada_core::ledger::storage_api::collections::lazy_map::NestedSubKey; +use namada_core::ledger::storage_api::collections::lazy_map::{ + Collectable, NestedSubKey, SubKey, +}; use namada_core::ledger::storage_api::token::read_balance; use namada_core::ledger::storage_api::{token, StorageRead}; use namada_core::types::address::{self, Address}; @@ -27,27 +31,72 @@ use crate::parameters::testing::arb_rate; use crate::parameters::PosParams; use crate::tests::arb_params_and_genesis_validators; use crate::types::{ - BondId, GenesisValidator, ReverseOrdTokenAmount, Slash, SlashType, - SlashedAmount, ValidatorState, WeightedValidator, + BondId, EagerRedelegatedBondsMap, GenesisValidator, ReverseOrdTokenAmount, + Slash, SlashType, ValidatorState, WeightedValidator, }; use crate::{ below_capacity_validator_set_handle, consensus_validator_set_handle, enqueued_slashes_handle, read_below_threshold_validator_set_addresses, - read_pos_params, validator_deltas_handle, validator_slashes_handle, - validator_state_handle, + read_pos_params, redelegate_tokens, validator_deltas_handle, + validator_slashes_handle, validator_state_handle, BondsForRemovalRes, + EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, ModifiedRedelegation, + RedelegationError, ResultSlashing, }; prop_state_machine! { #![proptest_config(Config { cases: 2, - verbose: 1, .. Config::default() })] #[test] /// A `StateMachineTest` implemented on `PosState` - fn pos_state_machine_test(sequential 200 => ConcretePosState); + fn pos_state_machine_test(sequential 500 => ConcretePosState); } +type AbstractDelegatorRedelegatedBonded = BTreeMap< + Address, + BTreeMap< + Address, + BTreeMap>>, + >, +>; + +type AbstractDelegatorRedelegatedUnbonded = BTreeMap< + Address, + BTreeMap< + Address, + BTreeMap< + (Epoch, Epoch), + BTreeMap>, + >, + >, +>; + +type AbstractValidatorTotalRedelegatedBonded = BTreeMap< + Address, + BTreeMap>>, +>; + +type AbstractTotalRedelegatedUnbonded = BTreeMap< + Epoch, + BTreeMap>>, +>; + +type AbstractValidatorTotalRedelegatedUnbonded = BTreeMap< + Address, + BTreeMap< + Epoch, + BTreeMap>>, + >, +>; + +type AbstractIncomingRedelegations = + BTreeMap>; +type AbstractOutgoingRedelegations = BTreeMap< + Address, + BTreeMap>, +>; + /// Abstract representation of a state of PoS system #[derive(Clone, Debug)] struct AbstractPosState { @@ -59,13 +108,13 @@ struct AbstractPosState { genesis_validators: Vec, /// Bonds delta values. The outer key for Epoch is pipeline offset from /// epoch in which the bond is applied - bonds: BTreeMap>, + bonds: BTreeMap>, /// Total bonded tokens to a validator in each epoch. This is never /// decremented and used for slashing computations. - total_bonded: BTreeMap>, + total_bonded: BTreeMap>, /// Validator stakes. These are NOT deltas. /// Pipelined. - validator_stakes: BTreeMap>, + validator_stakes: BTreeMap>, /// Consensus validator set. Pipelined. consensus_set: BTreeMap>>, /// Below-capacity validator set. Pipelined. @@ -75,20 +124,30 @@ struct AbstractPosState { below_threshold_set: BTreeMap>, /// Validator states. Pipelined. validator_states: BTreeMap>, - /// Unbonded bonds. The outer key for Epoch is pipeline + unbonding offset - /// from epoch in which the unbond is applied. - unbonds: BTreeMap>, + /// Unbonded bonds. The outer key for Epoch is pipeline + unbonding + + /// cubic_window offset from epoch in which the unbond transition + /// occurs. + unbonds: BTreeMap<(Epoch, Epoch), BTreeMap>, /// Validator slashes post-processing validator_slashes: BTreeMap>, /// Enqueued slashes pre-processing enqueued_slashes: BTreeMap>>, /// The last epoch in which a validator committed an infraction validator_last_slash_epochs: BTreeMap, - /// Unbond records required for slashing. + /// Validator's total unbonded required for slashing. /// Inner `Epoch` is the epoch in which the unbond became active. /// Outer `Epoch` is the epoch in which the underlying bond became active. - unbond_records: + total_unbonded: BTreeMap>>, + /// The outer key is the epoch in which redelegation became active + /// (pipeline offset). The next key is the address of the delegator. + delegator_redelegated_bonded: AbstractDelegatorRedelegatedBonded, + delegator_redelegated_unbonded: AbstractDelegatorRedelegatedUnbonded, + validator_total_redelegated_bonded: AbstractValidatorTotalRedelegatedBonded, + validator_total_redelegated_unbonded: + AbstractValidatorTotalRedelegatedUnbonded, + incoming_redelegations: AbstractIncomingRedelegations, + outgoing_redelegations: AbstractOutgoingRedelegations, } /// The PoS system under test @@ -122,6 +181,13 @@ enum Transition { Withdraw { id: BondId, }, + Redelegate { + /// A chained redelegation must fail + is_chained: bool, + id: BondId, + new_validator: Address, + amount: token::Amount, + }, Misbehavior { address: Address, slash_type: SlashType, @@ -140,9 +206,8 @@ impl StateMachineTest for ConcretePosState { fn init_test( initial_state: &::State, ) -> Self::SystemUnderTest { - println!(); - println!("New test case"); - println!( + tracing::debug!("New test case"); + tracing::debug!( "Genesis validators: {:#?}", initial_state .genesis_validators @@ -163,7 +228,7 @@ impl StateMachineTest for ConcretePosState { fn apply( mut state: Self::SystemUnderTest, - _ref_state: &::State, + ref_state: &::State, transition: ::Transition, ) -> Self::SystemUnderTest { let params = crate::read_pos_params(&state.s).unwrap(); @@ -173,10 +238,10 @@ impl StateMachineTest for ConcretePosState { &crate::ADDRESS, ) .unwrap(); - println!("PoS balance: {}", pos_balance.to_string_native()); + tracing::debug!("PoS balance: {}", pos_balance.to_string_native()); match transition { Transition::NextEpoch => { - println!("\nCONCRETE Next epoch"); + tracing::debug!("\nCONCRETE Next epoch"); super::advance_epoch(&mut state.s, ¶ms); // Need to apply some slashing @@ -194,7 +259,7 @@ impl StateMachineTest for ConcretePosState { commission_rate, max_commission_rate_change, } => { - println!("\nCONCRETE Init validator"); + tracing::debug!("\nCONCRETE Init validator"); let current_epoch = state.current_epoch(); super::become_validator(super::BecomeValidator { @@ -218,7 +283,7 @@ impl StateMachineTest for ConcretePosState { ) } Transition::Bond { id, amount } => { - println!("\nCONCRETE Bond"); + tracing::debug!("\nCONCRETE Bond"); let current_epoch = state.current_epoch(); let pipeline = current_epoch + params.pipeline_len; let validator_stake_before_bond_cur = @@ -228,8 +293,7 @@ impl StateMachineTest for ConcretePosState { &id.validator, current_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let validator_stake_before_bond_pipeline = crate::read_validator_stake( &state.s, @@ -237,8 +301,7 @@ impl StateMachineTest for ConcretePosState { &id.validator, pipeline, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Credit tokens to ensure we can apply the bond let native_token = state.s.get_native_token().unwrap(); @@ -299,9 +362,11 @@ impl StateMachineTest for ConcretePosState { pos_balance_post - pos_balance_pre, src_balance_pre - src_balance_post ); + + state.check_multistate_bond_post_conditions(ref_state, &id); } Transition::Unbond { id, amount } => { - println!("\nCONCRETE Unbond"); + tracing::debug!("\nCONCRETE Unbond"); let current_epoch = state.current_epoch(); let pipeline = current_epoch + params.pipeline_len; let native_token = state.s.get_native_token().unwrap(); @@ -319,8 +384,7 @@ impl StateMachineTest for ConcretePosState { &id.validator, current_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let validator_stake_before_unbond_pipeline = crate::read_validator_stake( &state.s, @@ -328,8 +392,7 @@ impl StateMachineTest for ConcretePosState { &id.validator, pipeline, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Apply the unbond super::unbond_tokens( @@ -338,6 +401,7 @@ impl StateMachineTest for ConcretePosState { &id.validator, amount, current_epoch, + false, ) .unwrap(); @@ -361,11 +425,13 @@ impl StateMachineTest for ConcretePosState { assert_eq!(pos_balance_pre, pos_balance_post); // Post-condition: Source balance should not change assert_eq!(src_balance_post, src_balance_pre); + + state.check_multistate_unbond_post_conditions(ref_state, &id); } Transition::Withdraw { id: BondId { source, validator }, } => { - println!("\nCONCRETE Withdraw"); + tracing::debug!("\nCONCRETE Withdraw"); let current_epoch = state.current_epoch(); let native_token = state.s.get_native_token().unwrap(); let pos = address::POS; @@ -411,6 +477,218 @@ impl StateMachineTest for ConcretePosState { // Post-condition: The increment in source balance should be // equal to the withdrawn amount assert_eq!(src_balance_post - src_balance_pre, withdrawn); + + state.check_multistate_withdraw_post_conditions( + ref_state, + &BondId { source, validator }, + ); + } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + tracing::debug!("\nCONCRETE Redelegate"); + + let current_epoch = state.current_epoch(); + let pipeline = current_epoch + params.pipeline_len; + + // Read data prior to applying the transition + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + let slash_pool = address::POS_SLASH_POOL; + let slash_balance_pre = + token::read_balance(&state.s, &native_token, &slash_pool) + .unwrap(); + + // Read src validator stakes + let src_validator_stake_cur_pre = crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + let _src_validator_stake_pipeline_pre = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + + // Read dest validator stakes + let dest_validator_stake_cur_pre = crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + current_epoch, + ) + .unwrap(); + let _dest_validator_stake_pipeline_pre = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + pipeline, + ) + .unwrap(); + + // Find delegations + let delegations_pre = + crate::find_delegations(&state.s, &id.source, &pipeline) + .unwrap(); + + // Apply redelegation + let result = redelegate_tokens( + &mut state.s, + &id.source, + &id.validator, + &new_validator, + current_epoch, + amount, + ); + + state.check_multistate_redelegation_post_conditions( + ref_state, + &id.source, + &id.validator, + &new_validator, + ); + + if is_chained && !amount.is_zero() { + assert!(result.is_err()); + let err = result.unwrap_err(); + let err_str = err.to_string(); + assert_matches!( + err.downcast::().unwrap().deref(), + RedelegationError::IsChainedRedelegation, + "A chained redelegation must be rejected, got \ + {err_str}", + ); + } else { + result.unwrap(); + + // Post-condition: PoS balance is unchanged + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos) + .unwrap(); + assert_eq!(pos_balance_pre, pos_balance_post); + + // Find slash pool balance difference + let slash_balance_post = token::read_balance( + &state.s, + &native_token, + &slash_pool, + ) + .unwrap(); + let slashed = slash_balance_post - slash_balance_pre; + + // Post-condition: Source validator stake at current epoch + // is unchanged + let src_validator_stake_cur_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + assert_eq!( + src_validator_stake_cur_pre, + src_validator_stake_cur_post + ); + + // Post-condition: Source validator stake at pipeline epoch + // is reduced by the redelegation amount + + // TODO: shouldn't this be reduced by the redelegation + // amount post-slashing tho? + // NOTE: We changed it to reduce it, check again later + let _amount_after_slash = amount - slashed; + let _src_validator_stake_pipeline_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + // assert_eq!( + // src_validator_stake_pipeline_pre - + // amount_after_slash, + // src_validator_stake_pipeline_post + // ); + + // Post-condition: Destination validator stake at current + // epoch is unchanged + let dest_validator_stake_cur_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + current_epoch, + ) + .unwrap(); + assert_eq!( + dest_validator_stake_cur_pre, + dest_validator_stake_cur_post + ); + + // Post-condition: Destination validator stake at pipeline + // epoch is increased by the redelegation amount, less any + // slashes + let _dest_validator_stake_pipeline_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + pipeline, + ) + .unwrap(); + // assert_eq!( + // dest_validator_stake_pipeline_pre + + // amount_after_slash, + // dest_validator_stake_pipeline_post + // ); + + // Post-condition: The delegator's delegations should be + // updated with redelegation. For the source reduced by the + // redelegation amount and for the destination increased by + // the redelegation amount, less any slashes. + let delegations_post = crate::find_delegations( + &state.s, &id.source, &pipeline, + ) + .unwrap(); + let src_delegation_pre = delegations_pre + .get(&id.validator) + .cloned() + .unwrap_or_default(); + let src_delegation_post = delegations_post + .get(&id.validator) + .cloned() + .unwrap_or_default(); + assert_eq!( + src_delegation_pre - src_delegation_post, + amount + ); + let _dest_delegation_pre = delegations_pre + .get(&new_validator) + .cloned() + .unwrap_or_default(); + let _dest_delegation_post = delegations_post + .get(&new_validator) + .cloned() + .unwrap_or_default(); + // assert_eq!( + // dest_delegation_post - dest_delegation_pre, + // amount_after_slash + // ); + } } Transition::Misbehavior { address, @@ -418,7 +696,7 @@ impl StateMachineTest for ConcretePosState { infraction_epoch, height, } => { - println!("\nCONCRETE Misbehavior"); + tracing::debug!("\nCONCRETE Misbehavior"); let current_epoch = state.current_epoch(); // Record the slash evidence super::slash( @@ -443,10 +721,10 @@ impl StateMachineTest for ConcretePosState { &address, ); - // TODO: Any others? + state.check_multistate_misbehavior_post_conditions(ref_state); } Transition::UnjailValidator { address } => { - println!("\nCONCRETE UnjailValidator"); + tracing::debug!("\nCONCRETE UnjailValidator"); let current_epoch = state.current_epoch(); // Unjail the validator @@ -566,8 +844,7 @@ impl ConcretePosState { &id.validator, submit_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Post-condition: the validator stake at the current epoch should not // change @@ -579,8 +856,7 @@ impl ConcretePosState { &id.validator, pipeline, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Post-condition: the validator stake at the pipeline should be // incremented by the bond amount @@ -597,6 +873,29 @@ impl ConcretePosState { ); } + fn check_multistate_bond_post_conditions( + &self, + ref_state: &AbstractPosState, + id: &BondId, + ) { + // Check that the bonds are the same + let abs_bonds = ref_state.bonds.get(id).cloned().unwrap(); + let conc_bonds = crate::bond_handle(&id.source, &id.validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_bonds, conc_bonds); + + // Check that the total bonded is the same + let abs_tot_bonded = + ref_state.total_bonded.get(&id.validator).cloned().unwrap(); + let conc_tot_bonded = crate::total_bonded_handle(&id.validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_tot_bonded, conc_tot_bonded); + } + fn check_unbond_post_conditions( &self, submit_epoch: Epoch, @@ -614,8 +913,7 @@ impl ConcretePosState { &id.validator, submit_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Post-condition: the validator stake at the current epoch should not // change @@ -627,8 +925,7 @@ impl ConcretePosState { &id.validator, pipeline, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Post-condition: the validator stake at the pipeline should be // decremented at most by the bond amount (because slashing can reduce @@ -651,6 +948,172 @@ impl ConcretePosState { ); } + fn check_multistate_unbond_post_conditions( + &self, + ref_state: &AbstractPosState, + id: &BondId, + ) { + // Check that the bonds are the same + let abs_bonds = ref_state.bonds.get(id).cloned().unwrap(); + let conc_bonds = crate::bond_handle(&id.source, &id.validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_bonds, conc_bonds); + + // Check that the total bonded is the same + let abs_tot_bonded = + ref_state.total_bonded.get(&id.validator).cloned().unwrap(); + let conc_tot_bonded = crate::total_bonded_handle(&id.validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_tot_bonded, conc_tot_bonded); + + // Check that the unbonds are the same + let mut abs_unbonds: BTreeMap> = + BTreeMap::new(); + ref_state.unbonds.iter().for_each( + |((start_epoch, withdraw_epoch), inner)| { + let amount = inner.get(id).cloned().unwrap_or_default(); + if !amount.is_zero() { + abs_unbonds + .entry(*start_epoch) + .or_default() + .insert(*withdraw_epoch, amount); + } + }, + ); + let conc_unbonds = crate::unbond_handle(&id.source, &id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_unbonds, conc_unbonds); + + // Check that the total_unbonded are the same + // TODO: figure out how we get entries with 0 amount in the + // abstract version (and prevent) + let mut abs_total_unbonded = ref_state + .total_unbonded + .get(&id.validator) + .cloned() + .unwrap(); + abs_total_unbonded.retain(|_, inner_map| { + inner_map.retain(|_, value| !value.is_zero()); + !inner_map.is_empty() + }); + let conc_total_unbonded = crate::total_unbonded_handle(&id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_total_unbonded, conc_total_unbonded); + + // Check that the delegator redelegated bonds are the same + let abs_del_redel_bonds = ref_state + .delegator_redelegated_bonded + .get(&id.source) + .cloned() + .unwrap_or_default() + .get(&id.validator) + .cloned() + .unwrap_or_default(); + let conc_del_redel_bonds = + crate::delegator_redelegated_bonds_handle(&id.source) + .at(&id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_del_redel_bonds, conc_del_redel_bonds); + + // Check that the delegator redelegated unbonds are the same + #[allow(clippy::type_complexity)] + let mut abs_del_redel_unbonds: BTreeMap< + Epoch, + BTreeMap>>, + > = BTreeMap::new(); + ref_state + .delegator_redelegated_unbonded + .get(&id.source) + .cloned() + .unwrap_or_default() + .get(&id.validator) + .cloned() + .unwrap_or_default() + .iter() + .for_each(|((redel_end_epoch, withdraw_epoch), inner)| { + let abs_map = abs_del_redel_unbonds + .entry(*redel_end_epoch) + .or_default() + .entry(*withdraw_epoch) + .or_default(); + for (src, bonds) in inner { + for (start, amount) in bonds { + abs_map + .entry(src.clone()) + .or_default() + .insert(*start, *amount); + } + } + }); + let conc_del_redel_unbonds = + crate::delegator_redelegated_unbonds_handle(&id.source) + .at(&id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_del_redel_unbonds, conc_del_redel_unbonds); + + // Check the validator total redelegated bonded + let abs_total_redel_bonded = ref_state + .validator_total_redelegated_bonded + .get(&id.validator) + .cloned() + .unwrap_or_default(); + let mut conc_total_redel_bonded: BTreeMap< + Epoch, + BTreeMap>, + > = BTreeMap::new(); + crate::validator_total_redelegated_bonded_handle(&id.validator) + .iter(&self.s) + .unwrap() + .for_each(|res| { + let ( + NestedSubKey::Data { + key: redel_end_epoch, + nested_sub_key: + NestedSubKey::Data { + key: src_val, + nested_sub_key: SubKey::Data(bond_start), + }, + }, + amount, + ) = res.unwrap(); + conc_total_redel_bonded + .entry(redel_end_epoch) + .or_default() + .entry(src_val) + .or_default() + .insert(bond_start, amount); + }); + assert_eq!(abs_total_redel_bonded, conc_total_redel_bonded); + + // Check the validator total redelegated unbonded + let mut abs_total_redel_unbonded = ref_state + .validator_total_redelegated_unbonded + .get(&id.validator) + .cloned() + .unwrap_or_default(); + abs_total_redel_unbonded.retain(|_, inner1| { + inner1.retain(|_, inner2| { + inner2.retain(|_, inner3| !inner3.is_empty()); + !inner2.is_empty() + }); + !inner1.is_empty() + }); + + let conc_total_redel_unbonded = + crate::validator_total_redelegated_unbonded_handle(&id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_total_redel_unbonded, conc_total_redel_unbonded); + } + /// These post-conditions apply to bonding and unbonding fn check_bond_and_unbond_post_conditions( &self, @@ -760,6 +1223,68 @@ impl ConcretePosState { } } + fn check_multistate_withdraw_post_conditions( + &self, + ref_state: &AbstractPosState, + id: &BondId, + ) { + // Check that the unbonds are the same + let mut abs_unbonds: BTreeMap> = + BTreeMap::new(); + ref_state.unbonds.iter().for_each( + |((start_epoch, withdraw_epoch), inner)| { + let amount = inner.get(id).cloned().unwrap_or_default(); + if !amount.is_zero() { + abs_unbonds + .entry(*start_epoch) + .or_default() + .insert(*withdraw_epoch, amount); + } + }, + ); + let conc_unbonds = crate::unbond_handle(&id.source, &id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_unbonds, conc_unbonds); + + // Check the delegator redelegated unbonds + #[allow(clippy::type_complexity)] + let mut abs_del_redel_unbonds: BTreeMap< + Epoch, + BTreeMap>>, + > = BTreeMap::new(); + ref_state + .delegator_redelegated_unbonded + .get(&id.source) + .cloned() + .unwrap_or_default() + .get(&id.validator) + .cloned() + .unwrap_or_default() + .iter() + .for_each(|((redel_end_epoch, withdraw_epoch), inner)| { + let abs_map = abs_del_redel_unbonds + .entry(*redel_end_epoch) + .or_default() + .entry(*withdraw_epoch) + .or_default(); + for (src, bonds) in inner { + for (start, amount) in bonds { + abs_map + .entry(src.clone()) + .or_default() + .insert(*start, *amount); + } + } + }); + let conc_del_redel_unbonds = + crate::delegator_redelegated_unbonds_handle(&id.source) + .at(&id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_del_redel_unbonds, conc_del_redel_unbonds); + } + fn check_init_validator_post_conditions( &self, submit_epoch: Epoch, @@ -823,7 +1348,7 @@ impl ConcretePosState { slash_type: SlashType, validator: &Address, ) { - println!( + tracing::debug!( "\nChecking misbehavior post conditions for validator: \n{}", validator ); @@ -831,13 +1356,6 @@ impl ConcretePosState { // Validator state jailed and validator removed from the consensus set // starting at the next epoch for offset in 1..=params.pipeline_len { - // dbg!( - // crate::read_consensus_validator_set_addresses_with_stake( - // &self.s, - // current_epoch + offset - // ) - // .unwrap() - // ); assert_eq!( validator_state_handle(validator) .get(&self.s, current_epoch + offset, params) @@ -850,7 +1368,6 @@ impl ConcretePosState { .unwrap() .any(|res| { let (_, val_address) = res.unwrap(); - // dbg!(&val_address); val_address == validator.clone() }); assert!(!in_consensus); @@ -877,6 +1394,40 @@ impl ConcretePosState { // TODO: Any others? } + fn check_multistate_misbehavior_post_conditions( + &self, + ref_state: &AbstractPosState, + ) { + // Check the enqueued slashes + let abs_enqueued = ref_state.enqueued_slashes.clone(); + let mut conc_enqueued: BTreeMap>> = + BTreeMap::new(); + crate::enqueued_slashes_handle() + .get_data_handler() + .iter(&self.s) + .unwrap() + .for_each(|res| { + let ( + NestedSubKey::Data { + key: epoch, + nested_sub_key: + NestedSubKey::Data { + key: address, + nested_sub_key: _, + }, + }, + slash, + ) = res.unwrap(); + let slashes = conc_enqueued + .entry(epoch) + .or_default() + .entry(address) + .or_default(); + slashes.push(slash); + }); + assert_eq!(abs_enqueued, conc_enqueued); + } + fn check_unjail_validator_post_conditions( &self, params: &PosParams, @@ -950,58 +1501,299 @@ impl ConcretePosState { ); } - fn check_global_post_conditions( + fn check_multistate_redelegation_post_conditions( &self, - params: &PosParams, - current_epoch: Epoch, ref_state: &AbstractPosState, + delegator: &Address, + src_validator: &Address, + dest_validator: &Address, ) { - // Ensure that every validator in each set has the proper state - for epoch in Epoch::iter_bounds_inclusive( - current_epoch, - current_epoch + params.pipeline_len, - ) { - tracing::debug!("Epoch {epoch}"); - let mut vals = HashSet::
::new(); - for WeightedValidator { - bonded_stake, - address: validator, - } in crate::read_consensus_validator_set_addresses_with_stake( - &self.s, epoch, - ) - .unwrap() - { - let deltas_stake = validator_deltas_handle(&validator) - .get_sum(&self.s, epoch, params) - .unwrap() - .unwrap_or_default(); - tracing::debug!( - "Consensus val {}, stake: {} ({})", - &validator, - bonded_stake.to_string_native(), - deltas_stake.to_string_native(), - ); - assert!(!deltas_stake.is_negative()); - assert_eq!( - bonded_stake, - token::Amount::from_change(deltas_stake) - ); - assert_eq!( - bonded_stake.change(), - ref_state - .validator_stakes - .get(&epoch) - .unwrap() - .get(&validator) - .cloned() - .unwrap() - ); + let src_id = BondId { + source: delegator.clone(), + validator: src_validator.clone(), + }; + let dest_id = BondId { + source: delegator.clone(), + validator: dest_validator.clone(), + }; - let state = crate::validator_state_handle(&validator) - .get(&self.s, epoch, params) - .unwrap(); + // Check the src bonds + let abs_src_bonds = + ref_state.bonds.get(&src_id).cloned().unwrap_or_default(); + let conc_src_bonds = crate::bond_handle(delegator, src_validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_src_bonds, conc_src_bonds); + + // Check the dest bonds + let abs_dest_bonds = + ref_state.bonds.get(&dest_id).cloned().unwrap_or_default(); + let conc_dest_bonds = crate::bond_handle(delegator, dest_validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_dest_bonds, conc_dest_bonds); - assert_eq!(state, Some(ValidatorState::Consensus)); + // Check the src total bonded + let abs_src_tot_bonded = ref_state + .total_bonded + .get(src_validator) + .cloned() + .unwrap_or_default(); + let conc_src_tot_bonded = crate::total_bonded_handle(src_validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_src_tot_bonded, conc_src_tot_bonded); + + // Check the dest total bonded + let abs_dest_tot_bonded = ref_state + .total_bonded + .get(dest_validator) + .cloned() + .unwrap_or_default(); + let conc_dest_tot_bonded = crate::total_bonded_handle(dest_validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_dest_tot_bonded, conc_dest_tot_bonded); + + // NOTE: Unbonds are not updated by redelegation + + // Check the src total_unbonded + let mut abs_src_total_unbonded = ref_state + .total_unbonded + .get(src_validator) + .cloned() + .unwrap_or_default(); + abs_src_total_unbonded.retain(|_, inner_map| { + inner_map.retain(|_, value| !value.is_zero()); + !inner_map.is_empty() + }); + let conc_src_total_unbonded = + crate::total_unbonded_handle(src_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_src_total_unbonded, conc_src_total_unbonded); + + // Check the delegator redelegated bonds to the src + let abs_del_redel_bonds_src = ref_state + .delegator_redelegated_bonded + .get(delegator) + .cloned() + .unwrap_or_default() + .get(src_validator) + .cloned() + .unwrap_or_default(); + let conc_del_redel_bonds_src = + crate::delegator_redelegated_bonds_handle(delegator) + .at(src_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_del_redel_bonds_src, conc_del_redel_bonds_src); + + // Check the delegator redelegated bonds to the dest + let abs_del_redel_bonds_dest = ref_state + .delegator_redelegated_bonded + .get(delegator) + .cloned() + .unwrap_or_default() + .get(dest_validator) + .cloned() + .unwrap_or_default(); + let conc_del_redel_bonds_dest = + crate::delegator_redelegated_bonds_handle(delegator) + .at(dest_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_del_redel_bonds_dest, conc_del_redel_bonds_dest); + + // NOTE: Delegator redelegated unbonds are not updated by redelegation + + // Check the src total redelegated bonded + let abs_src_total_redel_bonded = ref_state + .validator_total_redelegated_bonded + .get(src_validator) + .cloned() + .unwrap_or_default(); + let mut conc_src_total_redel_bonded: BTreeMap< + Epoch, + BTreeMap>, + > = BTreeMap::new(); + crate::validator_total_redelegated_bonded_handle(src_validator) + .iter(&self.s) + .unwrap() + .for_each(|res| { + let ( + NestedSubKey::Data { + key: redel_end_epoch, + nested_sub_key: + NestedSubKey::Data { + key: src_val, + nested_sub_key: SubKey::Data(bond_start), + }, + }, + amount, + ) = res.unwrap(); + conc_src_total_redel_bonded + .entry(redel_end_epoch) + .or_default() + .entry(src_val) + .or_default() + .insert(bond_start, amount); + }); + assert_eq!(abs_src_total_redel_bonded, conc_src_total_redel_bonded); + + // Check the dest total redelegated bonded + let abs_dest_total_redel_bonded = ref_state + .validator_total_redelegated_bonded + .get(dest_validator) + .cloned() + .unwrap_or_default(); + let mut conc_dest_total_redel_bonded: BTreeMap< + Epoch, + BTreeMap>, + > = BTreeMap::new(); + crate::validator_total_redelegated_bonded_handle(dest_validator) + .iter(&self.s) + .unwrap() + .for_each(|res| { + let ( + NestedSubKey::Data { + key: redel_end_epoch, + nested_sub_key: + NestedSubKey::Data { + key: src_val, + nested_sub_key: SubKey::Data(bond_start), + }, + }, + amount, + ) = res.unwrap(); + conc_dest_total_redel_bonded + .entry(redel_end_epoch) + .or_default() + .entry(src_val) + .or_default() + .insert(bond_start, amount); + }); + assert_eq!(abs_dest_total_redel_bonded, conc_dest_total_redel_bonded); + + // Check the src validator's total redelegated unbonded + let mut abs_src_total_redel_unbonded = ref_state + .validator_total_redelegated_unbonded + .get(src_validator) + .cloned() + .unwrap_or_default(); + abs_src_total_redel_unbonded.retain(|_, inner1| { + inner1.retain(|_, inner2| { + inner2.retain(|_, inner3| !inner3.is_empty()); + !inner2.is_empty() + }); + !inner1.is_empty() + }); + + let conc_src_total_redel_unbonded = + crate::validator_total_redelegated_unbonded_handle(src_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_src_total_redel_unbonded, conc_src_total_redel_unbonded); + + // Check the src validator's outgoing redelegations + let mut abs_src_outgoing: BTreeMap< + Address, + BTreeMap>, + > = BTreeMap::new(); + ref_state + .outgoing_redelegations + .get(src_validator) + .cloned() + .unwrap_or_default() + .iter() + .for_each(|(address, amounts)| { + for ((bond_start, redel_start), amount) in amounts { + abs_src_outgoing + .entry(address.clone()) + .or_default() + .entry(*bond_start) + .or_default() + .insert(*redel_start, *amount); + } + }); + let conc_src_outgoing = + crate::validator_outgoing_redelegations_handle(src_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_src_outgoing, conc_src_outgoing); + + // Check the dest validator's incoming redelegations + let abs_dest_incoming = ref_state + .incoming_redelegations + .get(dest_validator) + .cloned() + .unwrap_or_default(); + let conc_dest_incoming = + crate::validator_incoming_redelegations_handle(dest_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_dest_incoming, conc_dest_incoming); + } + + fn check_global_post_conditions( + &self, + params: &PosParams, + current_epoch: Epoch, + ref_state: &AbstractPosState, + ) { + for epoch in Epoch::iter_bounds_inclusive( + current_epoch, + current_epoch + params.pipeline_len, + ) { + tracing::debug!("Epoch {epoch}"); + let mut vals = HashSet::
::new(); + + // Consensus validators + for WeightedValidator { + bonded_stake, + address: validator, + } in crate::read_consensus_validator_set_addresses_with_stake( + &self.s, epoch, + ) + .unwrap() + { + let deltas_stake = validator_deltas_handle(&validator) + .get_sum(&self.s, epoch, params) + .unwrap() + .unwrap_or_default(); + tracing::debug!( + "Consensus val {}, stake: {} ({})", + &validator, + bonded_stake.to_string_native(), + deltas_stake.to_string_native(), + ); + assert!(!deltas_stake.is_negative()); + + // Checks on stake + assert_eq!( + bonded_stake, + token::Amount::from_change(deltas_stake) + ); + assert_eq!( + bonded_stake, + ref_state + .validator_stakes + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap() + ); + + // Checks on validator state + let state = crate::validator_state_handle(&validator) + .get(&self.s, epoch, params) + .unwrap(); + assert_eq!(state, Some(ValidatorState::Consensus)); assert_eq!( state.unwrap(), ref_state @@ -1012,9 +1804,12 @@ impl ConcretePosState { .cloned() .unwrap() ); + assert!(!vals.contains(&validator)); vals.insert(validator); } + + // Below-capacity validators for WeightedValidator { bonded_stake, address: validator, @@ -1039,7 +1834,7 @@ impl ConcretePosState { token::Amount::from_change(deltas_stake) ); assert_eq!( - bonded_stake.change(), + bonded_stake, ref_state .validator_stakes .get(&epoch) @@ -1052,23 +1847,7 @@ impl ConcretePosState { let state = crate::validator_state_handle(&validator) .get(&self.s, epoch, params) .unwrap(); - if state.is_none() { - dbg!( - crate::validator_state_handle(&validator) - .get(&self.s, current_epoch, params) - .unwrap() - ); - dbg!( - crate::validator_state_handle(&validator) - .get(&self.s, current_epoch.next(), params) - .unwrap() - ); - dbg!( - crate::validator_state_handle(&validator) - .get(&self.s, current_epoch.next(), params) - .unwrap() - ); - } + assert_eq!(state, Some(ValidatorState::BelowCapacity)); assert_eq!( state.unwrap(), @@ -1080,6 +1859,7 @@ impl ConcretePosState { .cloned() .unwrap() ); + assert!(!vals.contains(&validator)); vals.insert(validator); } @@ -1090,10 +1870,10 @@ impl ConcretePosState { ) .unwrap() { - let stake = validator_deltas_handle(&validator) - .get_sum(&self.s, epoch, params) - .unwrap() - .unwrap_or_default(); + let stake = crate::read_validator_stake( + &self.s, params, &validator, epoch, + ) + .unwrap(); tracing::debug!( "Below-thresh val {}, stake {}", &validator, @@ -1126,6 +1906,7 @@ impl ConcretePosState { .cloned() .unwrap() ); + assert!(!vals.contains(&validator)); vals.insert(validator); } @@ -1134,8 +1915,8 @@ impl ConcretePosState { let all_validators = crate::read_all_validator_addresses(&self.s, epoch).unwrap(); - for val in all_validators { - let state = validator_state_handle(&val) + for validator in all_validators { + let state = validator_state_handle(&validator) .get(&self.s, epoch, params) .unwrap() .unwrap(); @@ -1147,17 +1928,17 @@ impl ConcretePosState { .validator_states .get(&epoch) .unwrap() - .get(&val) + .get(&validator) .cloned() .unwrap() ); - let stake = validator_deltas_handle(&val) - .get_sum(&self.s, epoch, params) - .unwrap() - .unwrap_or_default(); + let stake = crate::read_validator_stake( + &self.s, params, &validator, epoch, + ) + .unwrap(); tracing::debug!( "Jailed val {}, stake {}", - &val, + &validator, stake.to_string_native() ); @@ -1167,7 +1948,7 @@ impl ConcretePosState { .validator_states .get(&epoch) .unwrap() - .get(&val) + .get(&validator) .cloned() .unwrap() ); @@ -1177,11 +1958,12 @@ impl ConcretePosState { .validator_stakes .get(&epoch) .unwrap() - .get(&val) + .get(&validator) .cloned() .unwrap() ); - assert!(!vals.contains(&val)); + + assert!(!vals.contains(&validator)); } } } @@ -1194,7 +1976,7 @@ impl ReferenceStateMachine for AbstractPosState { type Transition = Transition; fn init_state() -> BoxedStrategy { - println!("\nInitializing abstract state machine"); + tracing::debug!("\nInitializing abstract state machine"); arb_params_and_genesis_validators(Some(8), 8..10) .prop_map(|(params, genesis_validators)| { let epoch = Epoch::default(); @@ -1218,7 +2000,13 @@ impl ReferenceStateMachine for AbstractPosState { validator_slashes: Default::default(), enqueued_slashes: Default::default(), validator_last_slash_epochs: Default::default(), - unbond_records: Default::default(), + total_unbonded: Default::default(), + delegator_redelegated_bonded: Default::default(), + delegator_redelegated_unbonded: Default::default(), + validator_total_redelegated_bonded: Default::default(), + validator_total_redelegated_unbonded: Default::default(), + incoming_redelegations: Default::default(), + outgoing_redelegations: Default::default(), }; for GenesisValidator { @@ -1238,12 +2026,15 @@ impl ReferenceStateMachine for AbstractPosState { validator: address.clone(), }) .or_default(); - bonds.insert(epoch, token::Change::from(tokens)); + bonds.insert(epoch, tokens); + + let total_bonded = + state.total_bonded.entry(address.clone()).or_default(); + total_bonded.insert(epoch, tokens); let total_stakes = state.validator_stakes.entry(epoch).or_default(); - total_stakes - .insert(address.clone(), token::Change::from(tokens)); + total_stakes.insert(address.clone(), tokens); let consensus_set = state.consensus_set.entry(epoch).or_default(); @@ -1302,7 +2093,6 @@ impl ReferenceStateMachine for AbstractPosState { { state.copy_discrete_epoched_data(epoch) } - // dbg!(&state); state }) .boxed() @@ -1312,6 +2102,24 @@ impl ReferenceStateMachine for AbstractPosState { fn transitions(state: &Self::State) -> BoxedStrategy { // Let preconditions filter out what unbonds are not allowed let unbondable = state.bond_sums().into_iter().collect::>(); + let redelegatable = unbondable + .iter() + // Self-bonds cannot be redelegated + .filter(|(id, _)| id.source != id.validator) + .cloned() + .collect::>(); + + for (id, amt) in &redelegatable { + if *amt <= 0.into() { + tracing::debug!( + "Source: {}\nValidator: {}\nAmount: {}", + &id.source, + &id.validator, + amt.to_string_native() + ); + panic!("Should have no bonds with 0 amount or less!"); + } + } let withdrawable = state.withdrawable_unbonds().into_iter().collect::>(); @@ -1394,10 +2202,14 @@ impl ReferenceStateMachine for AbstractPosState { } else { let arb_unbondable = prop::sample::select(unbondable); let arb_unbond = - arb_unbondable.prop_flat_map(|(id, deltas_sum)| { - let deltas_sum = i128::try_from(deltas_sum).unwrap(); + arb_unbondable.prop_flat_map(move |(id, deltas_sum)| { + let deltas_sum = + i128::try_from(deltas_sum.change()).unwrap(); // Generate an amount to unbond, up to the sum - assert!(deltas_sum > 0); + assert!( + deltas_sum > 0, + "Bond {id} deltas_sum must be non-zero" + ); (0..deltas_sum).prop_map(move |to_unbond| { let id = id.clone(); let amount = @@ -1409,7 +2221,7 @@ impl ReferenceStateMachine for AbstractPosState { }; // Add withdrawals, if any - if withdrawable.is_empty() { + let transitions = if withdrawable.is_empty() { transitions } else { let arb_withdrawable = prop::sample::select(withdrawable); @@ -1417,6 +2229,63 @@ impl ReferenceStateMachine for AbstractPosState { .prop_map(|(id, _)| Transition::Withdraw { id }); prop_oneof![transitions, arb_withdrawal].boxed() + }; + + // Add redelegations, if any + if redelegatable.is_empty() { + transitions + } else { + let arb_redelegatable = prop::sample::select(redelegatable); + let validators = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .keys() + .cloned() + .collect::>(); + let epoch = state.epoch; + let params = state.params.clone(); + let incoming_redelegations = state.incoming_redelegations.clone(); + let arb_redelegation = + arb_redelegatable.prop_flat_map(move |(id, deltas_sum)| { + let deltas_sum = + i128::try_from(deltas_sum.change()).unwrap(); + // Generate an amount to redelegate, up to the sum + assert!( + deltas_sum > 0, + "Bond {id} deltas_sum must be non-zero" + ); + let arb_amount = (0..deltas_sum).prop_map(|to_unbond| { + token::Amount::from_change(Change::from(to_unbond)) + }); + // Generate a new validator for redelegation + let current_validator = id.validator.clone(); + let new_validators = validators + .iter() + // The validator must be other than the current + .filter(|validator| *validator != ¤t_validator) + .cloned() + .collect::>(); + let arb_new_validator = + prop::sample::select(new_validators); + let params = params.clone(); + let incoming_redelegations = incoming_redelegations.clone(); + (arb_amount, arb_new_validator).prop_map( + move |(amount, new_validator)| Transition::Redelegate { + is_chained: Self::is_chained_redelegation( + epoch, + ¶ms, + &incoming_redelegations, + &id.source, + &id.validator, + ), + id: id.clone(), + new_validator, + amount, + }, + ) + }); + prop_oneof![transitions, arb_redelegation].boxed() } } @@ -1426,7 +2295,7 @@ impl ReferenceStateMachine for AbstractPosState { ) -> Self::State { match transition { Transition::NextEpoch => { - println!("\nABSTRACT Next Epoch"); + tracing::debug!("\nABSTRACT Next Epoch"); state.epoch = state.epoch.next(); @@ -1447,9 +2316,10 @@ impl ReferenceStateMachine for AbstractPosState { commission_rate: _, max_commission_rate_change: _, } => { - println!( + tracing::debug!( "\nABSTRACT Init Validator {} in epoch {}", - address, state.epoch + address, + state.epoch ); let pipeline: Epoch = state.pipeline(); @@ -1458,7 +2328,7 @@ impl ReferenceStateMachine for AbstractPosState { .validator_stakes .entry(pipeline) .or_default() - .insert(address.clone(), 0_i128.into()); + .insert(address.clone(), token::Amount::zero()); // Insert into the below-threshold set at pipeline since the // initial stake is 0 @@ -1476,14 +2346,13 @@ impl ReferenceStateMachine for AbstractPosState { state.debug_validators(); } Transition::Bond { id, amount } => { - println!( + tracing::debug!( "\nABSTRACT Bond {} tokens, id = {}", amount.to_string_native(), id ); - if *amount != token::Amount::default() { - let change = token::Change::from(*amount); + if !amount.is_zero() { let pipeline_state = state .validator_states .get(&state.pipeline()) @@ -1493,54 +2362,95 @@ impl ReferenceStateMachine for AbstractPosState { // Validator sets need to be updated first!! if *pipeline_state != ValidatorState::Jailed { - state.update_validator_sets(&id.validator, change); + state.update_validator_sets( + state.pipeline(), + &id.validator, + amount.change(), + ); } - state.update_bond(id, change); - state.update_validator_total_stake(&id.validator, change); + state.update_bond(id, *amount); + state.update_validator_total_stake( + &id.validator, + amount.change(), + ); } state.debug_validators(); } Transition::Unbond { id, amount } => { - println!( + tracing::debug!( "\nABSTRACT Unbond {} tokens, id = {}", amount.to_string_native(), id ); - if *amount != token::Amount::default() { - let change = token::Change::from(*amount); - state.update_state_with_unbond(id, change); + // `totalBonded` + let sum_bonded = state + .bonds + .get(id) + .map(|a| { + a.iter() + .fold(token::Amount::zero(), |acc, (_, amount)| { + acc + *amount + }) + }) + .unwrap_or_default(); - // Validator sets need to be updated first!! - // state.update_validator_sets(&id.validator, change); - // state.update_bond(id, change); - // state.update_validator_total_stake(&id.validator, - // change); - - // let withdrawal_epoch = - // state.pipeline() + state.params.unbonding_len; - // // + 1_u64; - // let unbonds = - // state.unbonds.entry(withdrawal_epoch).or_default(); - // let unbond = unbonds.entry(id.clone()).or_default(); - // *unbond += *amount; + if !amount.is_zero() && *amount <= sum_bonded { + state.update_state_with_unbond(id, *amount); } state.debug_validators(); } Transition::Withdraw { id } => { - println!("\nABSTRACT Withdraw, id = {}", id); + tracing::debug!("\nABSTRACT Withdraw, id = {}", id); + + let redel_unbonds = state + .delegator_redelegated_unbonded + .entry(id.source.clone()) + .or_default() + .entry(id.validator.clone()) + .or_default(); // Remove all withdrawable unbonds with this bond ID - for (epoch, unbonds) in state.unbonds.iter_mut() { - if *epoch <= state.epoch { + for ((start_epoch, withdraw_epoch), unbonds) in + state.unbonds.iter_mut() + { + if *withdraw_epoch <= state.epoch { unbonds.remove(id); + redel_unbonds.remove(&(*start_epoch, *withdraw_epoch)); } } // Remove any epochs that have no unbonds left - state.unbonds.retain(|_epoch, unbonds| !unbonds.is_empty()); + state.unbonds.retain(|_epochs, unbonds| !unbonds.is_empty()); + + // Remove the redel unbonds if empty now + redel_unbonds.retain(|_epochs, unbonds| !unbonds.is_empty()); // TODO: should we do anything here for slashing? } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + tracing::debug!( + "\nABSTRACT Redelegation, id = {id}, new validator = \ + {new_validator}, amount = {}, is_chained = {is_chained}", + amount.to_string_native(), + ); + if *is_chained { + return state; + } + if !amount.is_zero() { + // Remove the amount from source validator + state.update_state_with_redelegation( + id, + new_validator, + *amount, + ); + } + state.debug_validators(); + } Transition::Misbehavior { address, slash_type, @@ -1548,10 +2458,12 @@ impl ReferenceStateMachine for AbstractPosState { height, } => { let current_epoch = state.epoch; - println!( + tracing::debug!( "\nABSTRACT Misbehavior in epoch {} by validator {}, \ found in epoch {}", - infraction_epoch, address, current_epoch + infraction_epoch, + address, + current_epoch ); let processing_epoch = *infraction_epoch @@ -1580,15 +2492,13 @@ impl ReferenceStateMachine for AbstractPosState { // Remove from the validator set starting at the next epoch and // up thru the pipeline for offset in 1..=state.params.pipeline_len { - let real_stake = token::Amount::from_change( - state - .validator_stakes - .get(&(current_epoch + offset)) - .unwrap() - .get(address) - .cloned() - .unwrap_or_default(), - ); + let real_stake = state + .validator_stakes + .get(&(current_epoch + offset)) + .unwrap() + .get(address) + .cloned() + .unwrap_or_default(); if let Some((index, stake)) = state .is_in_consensus_w_info(address, current_epoch + offset) @@ -1719,7 +2629,7 @@ impl ReferenceStateMachine for AbstractPosState { Transition::UnjailValidator { address } => { let pipeline_epoch = state.pipeline(); - println!( + tracing::debug!( "\nABSTRACT Unjail validator {} starting in epoch {}", address.clone(), pipeline_epoch @@ -1745,9 +2655,7 @@ impl ReferenceStateMachine for AbstractPosState { sum + validators.len() as u64 }); - if pipeline_stake - < state.params.validator_stake_threshold.change() - { + if pipeline_stake < state.params.validator_stake_threshold { // Place into the below-threshold set let below_threshold_set_pipeline = state .below_threshold_set @@ -1768,7 +2676,7 @@ impl ReferenceStateMachine for AbstractPosState { .is_empty() ); consensus_set_pipeline - .entry(token::Amount::from_change(pipeline_stake)) + .entry(pipeline_stake) .or_default() .push_back(address.clone()); validator_states_pipeline @@ -1782,7 +2690,7 @@ impl ReferenceStateMachine for AbstractPosState { .or_default(); let min_consensus_stake = *min_consensus.key(); - if pipeline_stake > min_consensus_stake.change() { + if pipeline_stake > min_consensus_stake { // Place into the consensus set and demote the last // min_consensus validator let min_validators = min_consensus.get_mut(); @@ -1800,7 +2708,7 @@ impl ReferenceStateMachine for AbstractPosState { .insert(last_val, ValidatorState::BelowCapacity); consensus_set_pipeline - .entry(token::Amount::from_change(pipeline_stake)) + .entry(pipeline_stake) .or_default() .push_back(address.clone()); validator_states_pipeline @@ -1808,10 +2716,7 @@ impl ReferenceStateMachine for AbstractPosState { } else { // Just place into the below-capacity set below_capacity_set_pipeline - .entry( - token::Amount::from_change(pipeline_stake) - .into(), - ) + .entry(pipeline_stake.into()) .or_default() .push_back(address.clone()); validator_states_pipeline.insert( @@ -1867,7 +2772,7 @@ impl ReferenceStateMachine for AbstractPosState { let is_unbondable = state .bond_sums() .get(id) - .map(|sum| *sum >= token::Change::from(*amount)) + .map(|sum| *sum >= *amount) .unwrap_or_default(); // The validator must not be frozen currently @@ -1883,13 +2788,6 @@ impl ReferenceStateMachine for AbstractPosState { false }; - // if is_frozen { - // println!( - // "\nVALIDATOR {} IS FROZEN - CANNOT UNBOND\n", - // &id.validator - // ); - // } - // The validator must be known state.is_validator(&id.validator, pipeline) // The amount must be available to unbond and the validator not jailed @@ -1901,7 +2799,7 @@ impl ReferenceStateMachine for AbstractPosState { let is_withdrawable = state .withdrawable_unbonds() .get(id) - .map(|amount| *amount >= token::Amount::default()) + .map(|amount| *amount >= token::Amount::zero()) .unwrap_or_default(); // The validator must not be jailed currently @@ -1918,6 +2816,71 @@ impl ReferenceStateMachine for AbstractPosState { // The amount must be available to unbond && is_withdrawable && !is_jailed } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + let pipeline = state.pipeline(); + + if *is_chained { + Self::is_chained_redelegation( + state.epoch, + &state.params, + &state.incoming_redelegations, + &id.source, + new_validator, + ) + } else { + // The src and dest validator must be known + if !state.is_validator(&id.validator, pipeline) + || !state.is_validator(new_validator, pipeline) + { + return false; + } + + // The amount must be available to redelegate + if !state + .bond_sums() + .get(id) + .map(|sum| *sum >= *amount) + .unwrap_or_default() + { + return false; + } + + // The src validator must not be frozen + if let Some(last_epoch) = + state.validator_last_slash_epochs.get(&id.validator) + { + if *last_epoch + + state.params.unbonding_len + + 1u64 + + state.params.cubic_slashing_window_length + > state.epoch + { + return false; + } + } + + // The dest validator must not be frozen + if let Some(last_epoch) = + state.validator_last_slash_epochs.get(new_validator) + { + if *last_epoch + + state.params.unbonding_len + + 1u64 + + state.params.cubic_slashing_window_length + > state.epoch + { + return false; + } + } + + true + } + } Transition::Misbehavior { address, slash_type: _, @@ -1935,27 +2898,43 @@ impl ReferenceStateMachine for AbstractPosState { <= state.params.unbonding_len; // Only misbehave when there is more than 3 validators that's - // not jailed, so there's always at least one honest left + // not jailed or about to be slashed, so there's always at least + // one honest left let enough_honest_validators = || { - state + let num_of_honest = state .validator_states .get(&state.pipeline()) .unwrap() .iter() .filter(|(_addr, val_state)| match val_state { ValidatorState::Consensus - | ValidatorState::BelowCapacity - | ValidatorState::BelowThreshold => true, + | ValidatorState::BelowCapacity => true, ValidatorState::Inactive - | ValidatorState::Jailed => false, + | ValidatorState::Jailed + // Below threshold cannot be in consensus + | ValidatorState::BelowThreshold => false, + }) + .count(); + + // Find the number of enqueued slashes to unique validators + let num_of_enquequed_slashes = state + .enqueued_slashes + .iter() + // find all validators with any enqueued slashes + .fold(BTreeSet::new(), |mut acc, (&epoch, slashes)| { + if epoch > current_epoch { + acc.extend(slashes.keys().cloned()); + } + acc }) - .count() - > 3 + .len(); + + num_of_honest - num_of_enquequed_slashes > 3 }; // Ensure that the validator is in consensus when it misbehaves // TODO: possibly also test allowing below-capacity validators - // println!("\nVal to possibly misbehave: {}", &address); + // tracing::debug!("\nVal to possibly misbehave: {}", &address); let state_at_infraction = state .validator_states .get(infraction_epoch) @@ -2060,7 +3039,7 @@ impl AbstractPosState { } /// Update a bond with bonded or unbonded change at the pipeline epoch - fn update_bond(&mut self, id: &BondId, change: token::Change) { + fn update_bond(&mut self, id: &BondId, change: token::Amount) { let pipeline_epoch = self.pipeline(); let bonds = self.bonds.entry(id.clone()).or_default(); let bond = bonds.entry(pipeline_epoch).or_default(); @@ -2079,32 +3058,59 @@ impl AbstractPosState { *total_bonded += change; } - fn update_state_with_unbond(&mut self, id: &BondId, change: token::Change) { + fn update_state_with_unbond(&mut self, id: &BondId, change: token::Amount) { + self.unbond_tokens(id, change, false); + } + + fn unbond_tokens( + &mut self, + id: &BondId, + change: token::Amount, + is_redelegation: bool, + ) -> ResultSlashing { + // TODO: check in here too that the amount is less or equal to bond sum + let pipeline_epoch = self.pipeline(); let withdraw_epoch = pipeline_epoch + self.params.unbonding_len + self.params.cubic_slashing_window_length; + let bonds = self.bonds.entry(id.clone()).or_default(); - let unbond_records = self - .unbond_records + + let total_bonded = + self.total_bonded.entry(id.validator.clone()).or_default(); + let total_unbonded = self + .total_unbonded .entry(id.validator.clone()) .or_default() .entry(pipeline_epoch) .or_default(); - let unbonds = self - .unbonds - .entry(withdraw_epoch) + + let delegator_redelegated_bonds = self + .delegator_redelegated_bonded + .entry(id.source.clone()) .or_default() - .entry(id.clone()) + .entry(id.validator.clone()) + .or_default(); + let delegator_redelegated_unbonds = self + .delegator_redelegated_unbonded + .entry(id.source.clone()) + .or_default() + .entry(id.validator.clone()) .or_default(); - let validator_slashes = self - .validator_slashes - .get(&id.validator) - .cloned() - .unwrap_or_default(); - - let mut remaining = change; - let mut amount_after_slashing = token::Change::default(); + + let validator_total_redelegated_bonded = self + .validator_total_redelegated_bonded + .entry(id.validator.clone()) + .or_default(); + let validator_total_redelegated_unbonded = self + .validator_total_redelegated_unbonded + .entry(id.validator.clone()) + .or_default() + .entry(pipeline_epoch) + .or_default(); + + let validator_slashes = &self.validator_slashes; tracing::debug!("Bonds before decrementing"); for (start, amnt) in bonds.iter() { @@ -2115,52 +3121,79 @@ impl AbstractPosState { ); } - for (bond_epoch, bond_amnt) in bonds.iter_mut().rev() { - tracing::debug!("remaining {}", remaining.to_string_native()); - tracing::debug!( - "Bond epoch {} - amnt {}", - bond_epoch, - bond_amnt.to_string_native() - ); - let to_unbond = cmp::min(*bond_amnt, remaining); - tracing::debug!( - "to_unbond (init) = {}", - to_unbond.to_string_native() - ); - *bond_amnt -= to_unbond; - *unbonds += token::Amount::from_change(to_unbond); - - let slashes_for_this_bond: BTreeMap = validator_slashes - .iter() - .cloned() - .filter(|s| *bond_epoch <= s.epoch) - .fold(BTreeMap::new(), |mut acc, s| { - let cur = acc.entry(s.epoch).or_default(); - *cur += s.rate; - acc - }); - tracing::debug!( - "Slashes for this bond{:?}", - slashes_for_this_bond.clone() - ); - amount_after_slashing += compute_amount_after_slashing( - &slashes_for_this_bond, - token::Amount::from_change(to_unbond), - self.params.unbonding_len, - self.params.cubic_slashing_window_length, - ) - .change(); - tracing::debug!( - "Cur amnt after slashing = {}", - &amount_after_slashing.to_string_native() - ); + // `resultUnbonding` + // Get the bonds for removal + let bonds_to_remove = Self::find_bonds_to_remove(bonds, change); + + // `modifiedRedelegation` + // Modified redelegation + // The unbond may need to partially unbond redelegated tokens, so + // compute if necessary + let modified_redelegation = match bonds_to_remove.new_entry { + Some((bond_epoch, new_bond_amount)) => { + if delegator_redelegated_bonds.contains_key(&bond_epoch) { + let cur_bond_amount = + bonds.get(&bond_epoch).cloned().unwrap_or_default(); + Self::compute_modified_redelegation( + delegator_redelegated_bonds, + bond_epoch, + cur_bond_amount - new_bond_amount, + ) + } else { + ModifiedRedelegation::default() + } + } + None => ModifiedRedelegation::default(), + }; - let amt = unbond_records.entry(*bond_epoch).or_default(); - *amt += token::Amount::from_change(to_unbond); + // `keysUnbonds` + // New unbonds. This will be needed for a couple things + let unbonded_bond_starts = + if let Some((start_epoch, _)) = bonds_to_remove.new_entry { + let mut to_remove = bonds_to_remove.epochs.clone(); + to_remove.insert(start_epoch); + to_remove + } else { + bonds_to_remove.epochs.clone() + }; + // `newUnbonds` + let new_unbonds = unbonded_bond_starts + .into_iter() + .map(|start| { + let cur_bond_amnt = bonds.get(&start).cloned().unwrap(); + let new_value = if let Some((start_epoch, new_bond_amount)) = + bonds_to_remove.new_entry + { + if start_epoch == start { + cur_bond_amnt - new_bond_amount + } else { + cur_bond_amnt + } + } else { + cur_bond_amnt + }; + ((start, withdraw_epoch), new_value) + }) + .collect::>(); - remaining -= to_unbond; - if remaining.is_zero() { - break; + // Update the bonds and unbonds in the AbstractState + // `updatedBonded` + updates to `updatedDelegator` + for bond_epoch in &bonds_to_remove.epochs { + bonds.remove(bond_epoch); + } + if let Some((bond_epoch, new_bond_amt)) = bonds_to_remove.new_entry { + bonds.insert(bond_epoch, new_bond_amt); + } + // `updatedUnbonded` + updates to `updatedDelegator` + if !is_redelegation { + for (epoch_pair, amount) in &new_unbonds { + let unbonds = self + .unbonds + .entry(*epoch_pair) + .or_default() + .entry(id.clone()) + .or_default(); + *unbonds += *amount; } } @@ -2173,27 +3206,323 @@ impl AbstractPosState { ); } + // `newRedelegatedUnbonds` + // Compute new redelegated unbonds (which requires unmodified + // redelegated bonds) + let new_redelegated_unbonds = Self::compute_new_redelegated_unbonds( + delegator_redelegated_bonds, + &bonds_to_remove.epochs, + &modified_redelegation, + ); + + // `updatedRedelegatedBonded` + // Update the delegator's redelegated bonds in the state + for epoch_to_remove in &bonds_to_remove.epochs { + delegator_redelegated_bonds.remove(epoch_to_remove); + } + if let Some(epoch) = modified_redelegation.epoch { + if modified_redelegation.validators_to_remove.is_empty() { + delegator_redelegated_bonds.remove(&epoch); + } else { + let rbonds = + delegator_redelegated_bonds.entry(epoch).or_default(); + + if let Some(val_to_modify) = + &modified_redelegation.validator_to_modify + { + let mut updated_vals_to_remove = + modified_redelegation.validators_to_remove.clone(); + updated_vals_to_remove.remove(val_to_modify); + + // Remove the updated_vals_to_remove keys from the + // redelegated_bonds map first + for val in &updated_vals_to_remove { + rbonds.remove(val); + } + + if let Some(epoch_to_modify) = + modified_redelegation.epoch_to_modify + { + let mut updated_epochs_to_remove = + modified_redelegation.epochs_to_remove.clone(); + updated_epochs_to_remove.remove(&epoch_to_modify); + let val_bonds_to_modify = + rbonds.entry(val_to_modify.clone()).or_default(); + for epoch in updated_epochs_to_remove { + val_bonds_to_modify.remove(&epoch); + } + val_bonds_to_modify.insert( + epoch_to_modify, + modified_redelegation.new_amount.unwrap(), + ); + } else { + // Then remove to epochs_to_remove from the redelegated + // bonds of the val_to_modify + let val_bonds_to_modify = + rbonds.entry(val_to_modify.clone()).or_default(); + for epoch in &modified_redelegation.epochs_to_remove { + val_bonds_to_modify.remove(epoch); + } + } + } else { + // Remove all validators in + // modified_redelegation.validators_to_remove + // from redelegated_bonds + for val in &modified_redelegation.validators_to_remove { + rbonds.remove(val); + } + } + } + } + + // `updatedRedelegatedUnbonded + if !is_redelegation { + // Get all the epoch pairs that should exist in the state now + let new_unbond_epoch_pairs = new_redelegated_unbonds + .keys() + .map(|start_epoch| (*start_epoch, withdraw_epoch)) + .collect::>(); + + // Update the state for delegator's redelegated unbonds now + // NOTE: can maybe do this by only looking at those inside the new + // epoch pairs? + for unbond_pair in new_unbond_epoch_pairs { + for (src_val, redel_unbonds) in + new_redelegated_unbonds.get(&unbond_pair.0).unwrap() + { + for (src_start, unbonded) in redel_unbonds { + let existing_unbonded = delegator_redelegated_unbonds + .entry(unbond_pair) + .or_default() + .entry(src_val.clone()) + .or_default() + .entry(*src_start) + .or_default(); + *existing_unbonded += *unbonded; + } + } + } + } + + // `updatedTotalBonded` and `updatedTotalUnbonded` + // Update the validator's total bonded and total unbonded + for ((start_epoch, _), unbonded) in &new_unbonds { + let cur_total_bonded = + total_bonded.entry(*start_epoch).or_default(); + *cur_total_bonded -= *unbonded; + let cur_total_unbonded = + total_unbonded.entry(*start_epoch).or_default(); + *cur_total_unbonded += *unbonded; + } + + // `updatedTotalRedelegatedBonded` and `updatedTotalRedelegatedUnbonded` + // Update the validator's total redelegated bonded and unbonded + for (dest_start, r_unbonds) in &new_redelegated_unbonds { + for (src_val, changes) in r_unbonds { + for (bond_start, change) in changes { + let cur_total_bonded = validator_total_redelegated_bonded + .entry(*dest_start) + .or_default() + .entry(src_val.clone()) + .or_default() + .entry(*bond_start) + .or_default(); + *cur_total_bonded -= *change; + + let cur_total_unbonded = + validator_total_redelegated_unbonded + .entry(*dest_start) + .or_default() + .entry(src_val.clone()) + .or_default() + .entry(*bond_start) + .or_default(); + *cur_total_unbonded += *change; + } + } + } + + // `resultSlashing` + // Get the slashed amount of the unbond now + let result_slashing = Self::compute_amount_after_slashing_unbond( + &self.params, + validator_slashes, + &id.validator, + &new_unbonds, + &new_redelegated_unbonds, + ); + // `amountAfterSlashing` + let amount_after_slashing = result_slashing.sum.change(); + let pipeline_state = self .validator_states .get(&self.pipeline()) .unwrap() .get(&id.validator) .unwrap(); - // let pipeline_stake = self - // .validator_stakes - // .get(&self.pipeline()) - // .unwrap() - // .get(&id.validator) - // .unwrap(); - // let token_change = cmp::min(*pipeline_stake, amount_after_slashing); if *pipeline_state != ValidatorState::Jailed { - self.update_validator_sets(&id.validator, -amount_after_slashing); + self.update_validator_sets( + self.pipeline(), + &id.validator, + -amount_after_slashing, + ); } self.update_validator_total_stake( &id.validator, -amount_after_slashing, ); + + result_slashing + } + + fn update_state_with_redelegation( + &mut self, + id: &BondId, + new_validator: &Address, + change: token::Amount, + ) { + // First need to unbond the redelegated tokens + // NOTE: same logic as unbond transition but with some things left out + let pipeline_epoch = self.pipeline(); + + // `resultUnbond` + let result_unbond = self.unbond_tokens(id, change, true); + + // `amountAfterSlashing` + let amount_after_slashing = result_unbond.sum; + + // `updatedRedelegatedBonds` + // Update the delegator's redelegated bonded + let delegator_redelegated_bonded = self + .delegator_redelegated_bonded + .entry(id.source.clone()) + .or_default() + .entry(new_validator.clone()) + .or_default() + .entry(pipeline_epoch) + .or_default() + .entry(id.validator.clone()) + .or_default(); + for (start_epoch, bonded) in &result_unbond.epoch_map { + *delegator_redelegated_bonded + .entry(*start_epoch) + .or_default() += *bonded; + } + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = self + .bonds + .get(&BondId { + source: id.source.clone(), + validator: new_validator.clone(), + }) + .cloned() + .unwrap_or_default(); + tracing::debug!( + "\nRedeleg dest bonds before incrementing: {bonds:#?}" + ); + } + + if !amount_after_slashing.is_zero() { + // `updatedDelegator` --> `with("bonded")` + // Update the delegator's bonds + let bonds = self + .bonds + .entry(BondId { + source: id.source.clone(), + validator: new_validator.clone(), + }) + .or_default(); + *bonds.entry(pipeline_epoch).or_default() += amount_after_slashing; + + // `updatedDestValidator` --> `with("totalBonded")` + // Update the dest validator's total bonded + let dest_total_bonded = self + .total_bonded + .entry(new_validator.clone()) + .or_default() + .entry(pipeline_epoch) + .or_default(); + *dest_total_bonded += amount_after_slashing; + } + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = self + .bonds + .get(&BondId { + source: id.source.clone(), + validator: new_validator.clone(), + }) + .cloned() + .unwrap_or_default(); + tracing::debug!( + "\nRedeleg dest bonds after incrementing: {bonds:#?}" + ); + } + + // `updatedOutgoingRedelegations` and `updatedSrcValidator` + // Update the src validator's outgoing redelegations + let outgoing_redelegations = self + .outgoing_redelegations + .entry(id.validator.clone()) + .or_default() + .entry(new_validator.clone()) + .or_default(); + for (start_epoch, bonded) in &result_unbond.epoch_map { + let cur_outgoing = outgoing_redelegations + .entry((*start_epoch, self.epoch)) + .or_default(); + *cur_outgoing += *bonded; + } + + // `updatedDestValidator` --> `with("totalRedelegatedBonded")` + // Update the dest validator's total redelegated bonded + let dest_total_redelegated_bonded = self + .validator_total_redelegated_bonded + .entry(new_validator.clone()) + .or_default() + .entry(pipeline_epoch) + .or_default() + .entry(id.validator.clone()) + .or_default(); + for (start_epoch, bonded) in &result_unbond.epoch_map { + let cur_tot_bonded = dest_total_redelegated_bonded + .entry(*start_epoch) + .or_default(); + *cur_tot_bonded += *bonded; + } + + // `updatedDestValidator` --> `with("incomingRedelegations")` + // Update the dest validator's incoming redelegations + let incoming_redelegations = self + .incoming_redelegations + .entry(new_validator.clone()) + .or_default(); + incoming_redelegations.insert(id.source.clone(), pipeline_epoch); + + // `updatedDestValidator` --> `with("stake")` + // Update validator set and stake + let pipeline_state = self + .validator_states + .get(&self.pipeline()) + .unwrap() + .get(new_validator) + .unwrap(); + + if !amount_after_slashing.is_zero() { + if *pipeline_state != ValidatorState::Jailed { + self.update_validator_sets( + self.pipeline(), + new_validator, + amount_after_slashing.change(), + ); + } + self.update_validator_total_stake( + new_validator, + amount_after_slashing.change(), + ); + } } /// Update validator's total stake with bonded or unbonded change at the @@ -2209,32 +3538,39 @@ impl AbstractPosState { .or_default() .entry(validator.clone()) .or_default(); - *total_stakes += change; + *total_stakes = token::Amount::from(total_stakes.change() + change); } /// Update validator in sets with bonded or unbonded change fn update_validator_sets( &mut self, + epoch: Epoch, validator: &Address, change: token::Change, ) { - let pipeline = self.pipeline(); - let consensus_set = self.consensus_set.entry(pipeline).or_default(); - let below_cap_set = - self.below_capacity_set.entry(pipeline).or_default(); + tracing::debug!( + "\nUpdating set for validator {} in epoch {} with amount {}\n", + validator, + epoch, + change + ); + if change.is_zero() { + return; + } + // let pipeline = self.pipeline(); + let consensus_set = self.consensus_set.entry(epoch).or_default(); + let below_cap_set = self.below_capacity_set.entry(epoch).or_default(); let below_thresh_set = - self.below_threshold_set.entry(pipeline).or_default(); + self.below_threshold_set.entry(epoch).or_default(); - let validator_stakes = self.validator_stakes.get(&pipeline).unwrap(); - let validator_states = - self.validator_states.get_mut(&pipeline).unwrap(); + let validator_stakes = self.validator_stakes.get(&epoch).unwrap(); + let validator_states = self.validator_states.get_mut(&epoch).unwrap(); let state_pre = validator_states.get(validator).unwrap(); let this_val_stake_pre = *validator_stakes.get(validator).unwrap(); let this_val_stake_post = - token::Amount::from_change(this_val_stake_pre + change); - let this_val_stake_pre = token::Amount::from_change(this_val_stake_pre); + token::Amount::from_change(this_val_stake_pre.change() + change); let threshold = self.params.validator_stake_threshold; if this_val_stake_pre < threshold && this_val_stake_post < threshold { @@ -2246,12 +3582,9 @@ impl AbstractPosState { match state_pre { ValidatorState::Consensus => { - // println!("Validator initially in consensus"); // Remove from the prior stake let vals = consensus_set.entry(this_val_stake_pre).or_default(); - // dbg!(&vals); vals.retain(|addr| addr != validator); - // dbg!(&vals); if vals.is_empty() { consensus_set.remove(&this_val_stake_pre); @@ -2290,7 +3623,7 @@ impl AbstractPosState { // If unbonding, check the max below-cap validator's state if we // need to do a swap - if change < token::Change::default() { + if change < token::Change::zero() { if let Some(mut max_below_cap) = below_cap_set.last_entry() { let max_below_cap_stake = *max_below_cap.key(); @@ -2333,7 +3666,7 @@ impl AbstractPosState { .push_back(validator.clone()); } ValidatorState::BelowCapacity => { - // println!("Validator initially in below-cap"); + // tracing::debug!("Validator initially in below-cap"); // Remove from the prior stake let vals = @@ -2356,11 +3689,9 @@ impl AbstractPosState { // If bonding, check the min consensus validator's state if we // need to do a swap - if change >= token::Change::default() { - // dbg!(&consensus_set); + if change >= token::Change::zero() { if let Some(mut min_consensus) = consensus_set.first_entry() { - // dbg!(&min_consensus); let min_consensus_stake = *min_consensus.key(); if this_val_stake_post > min_consensus_stake { // Swap this validator with the max consensus @@ -2423,7 +3754,6 @@ impl AbstractPosState { } // Determine which set to place the validator into if let Some(mut min_consensus) = consensus_set.first_entry() { - // dbg!(&min_consensus); let min_consensus_stake = *min_consensus.key(); if this_val_stake_post > min_consensus_stake { // Swap this validator with the max consensus @@ -2478,288 +3808,632 @@ impl AbstractPosState { .get(&self.epoch) .cloned() .unwrap_or_default(); - if !slashes_this_epoch.is_empty() { - let infraction_epoch = self.epoch - - self.params.unbonding_len - - self.params.cubic_slashing_window_length - - 1; - // Now need to basically do the end_of_epoch() procedure - // from the Informal Systems model - let cubic_rate = self.cubic_slash_rate(); - for (validator, slashes) in slashes_this_epoch { - let stake_at_infraction = self - .validator_stakes - .get(&infraction_epoch) - .unwrap() - .get(&validator) - .cloned() - .unwrap_or_default(); - tracing::debug!( - "Val {} stake at infraction {}", + + if slashes_this_epoch.is_empty() { + return; + } + + let infraction_epoch = + self.epoch - self.params.slash_processing_epoch_offset(); + let cubic_rate = self.cubic_slash_rate(); + + // Get effective slash rate per validator and update the slashes in the + // Abstract state + let slash_rates = slashes_this_epoch.iter().fold( + BTreeMap::::new(), + |mut acc, (validator, slashes)| { + let mut tot_rate = + acc.get(validator).cloned().unwrap_or_default(); + for slash in slashes { + debug_assert_eq!(slash.epoch, infraction_epoch); + let rate = cmp::max( + slash.r#type.get_slash_rate(&self.params), + cubic_rate, + ); + tot_rate = cmp::min(Dec::one(), tot_rate + rate); + } + acc.insert(validator.clone(), tot_rate); + acc + }, + ); + + let mut map_validator_slash: EagerRedelegatedBondsMap = BTreeMap::new(); + for (validator, rate) in slash_rates { + self.process_validator_slash( + &validator, + rate, + &mut map_validator_slash, + ); + } + tracing::debug!( + "Slashed amounts for validators: {map_validator_slash:#?}" + ); + + for (validator, slash_amounts) in map_validator_slash { + for (update_epoch, delta) in slash_amounts { + let state = self + .validator_states + .get(&update_epoch) + .unwrap() + .get(&validator) + .unwrap(); + if *state != ValidatorState::Jailed { + self.update_validator_sets( + update_epoch, + &validator, + -delta.change(), + ); + } + + let stake = self + .validator_stakes + .entry(update_epoch) + .or_default() + .entry(validator.clone()) + .or_default(); + *stake -= delta; + } + + let next_state = self + .validator_states + .get(&self.epoch.next()) + .unwrap() + .get(&validator) + .cloned() + .unwrap(); + + let pipeline_state = self + .validator_states + .get(&self.pipeline()) + .unwrap() + .get(&validator) + .cloned() + .unwrap(); + + debug_assert_eq!(next_state, pipeline_state); + } + + // Update the slashes in the Abstract state ONLY AFTER processing them + for (validator, slashes) in slashes_this_epoch { + let cur_slashes = + self.validator_slashes.entry(validator.clone()).or_default(); + + for slash in slashes { + let rate = cmp::max( + slash.r#type.get_slash_rate(&self.params), + cubic_rate, + ); + cur_slashes.push(Slash { + epoch: slash.epoch, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate, + }); + } + } + } + + fn process_validator_slash( + &mut self, + validator: &Address, + slash_rate: Dec, + val_slash_amounts: &mut EagerRedelegatedBondsMap, + ) { + let slash_amounts = val_slash_amounts + .get(validator) + .cloned() + .unwrap_or_default(); + let result_slash = + self.slash_validator(validator, slash_rate, &slash_amounts); + + // `updatedSlashedAmountMap` + let validator_slashes = + val_slash_amounts.entry(validator.clone()).or_default(); + for (epoch, slash) in result_slash { + *validator_slashes.entry(epoch).or_default() += slash; + } + + let dest_validators = self + .outgoing_redelegations + .get(validator) + .cloned() + .unwrap_or_default() + .keys() + .cloned() + .collect::>(); + + for dest_val in dest_validators { + let to_modify = + val_slash_amounts.entry(dest_val.clone()).or_default(); + + tracing::debug!( + "Slashing {} redelegation to {}", + validator, + &dest_val + ); + + // `slashValidatorRedelegation` + self.slash_validator_redelegation( + validator, &dest_val, slash_rate, to_modify, + ); + + if to_modify.is_empty() { + val_slash_amounts.remove(&dest_val); + }; + } + } + + fn slash_validator( + &self, + validator: &Address, + slash_rate: Dec, + val_slash_amounts: &BTreeMap, + ) -> BTreeMap { + tracing::debug!( + "Slashing validator {} at rate {}", + validator, + slash_rate + ); + + let infraction_epoch = + self.epoch - self.params.slash_processing_epoch_offset(); + + let total_unbonded = self + .total_unbonded + .get(validator) + .cloned() + .unwrap_or_default(); + let total_redelegated_unbonded = self + .validator_total_redelegated_unbonded + .get(validator) + .cloned() + .unwrap_or_default(); + + // `val bonds` + let mut total_bonded = self + .total_bonded + .get(validator) + .cloned() + .unwrap_or_default() + .into_iter() + .filter(|&(epoch, _amount)| epoch <= infraction_epoch) + .collect::>(); + + // `val redelegatedBonds` + let mut total_redelegated_bonded = total_bonded + .keys() + .filter(|&epoch| { + self.validator_total_redelegated_bonded + .get(validator) + .cloned() + .unwrap_or_default() + .contains_key(epoch) + }) + .map(|epoch| { + ( + *epoch, + self.validator_total_redelegated_bonded + .get(validator) + .unwrap() + .get(epoch) + .cloned() + .unwrap(), + ) + }) + .collect::>(); + + let mut slashed_amounts = val_slash_amounts.clone(); + let mut sum = token::Amount::zero(); + + let eps = self + .epoch + .iter_range(self.params.pipeline_len) + .collect::>(); + for epoch in eps.into_iter().rev() { + let amount = total_bonded.iter().fold( + token::Amount::zero(), + |acc, (bond_start, bond_amount)| { + let redel_bonds = total_redelegated_bonded + .get(bond_start) + .cloned() + .unwrap_or_default(); + acc + self.compute_slash_bond_at_epoch( + epoch, + infraction_epoch, + *bond_start, + *bond_amount, + &redel_bonds, + slash_rate, + validator, + ) + }, + ); + + let new_bonds = total_unbonded + .get(&epoch) + .cloned() + .unwrap_or_default() + .into_iter() + .filter(|(ep, _)| *ep <= infraction_epoch) + .collect::>(); + + let new_redelegated_bonds = new_bonds + .keys() + .filter(|&ep| { + total_redelegated_unbonded + .get(&epoch) + .cloned() + .unwrap_or_default() + .contains_key(ep) + }) + .map(|ep| { + ( + *ep, + total_redelegated_unbonded + .get(&epoch) + .unwrap() + .get(ep) + .cloned() + .unwrap(), + ) + }) + .collect::>(); + + total_bonded = new_bonds; + total_redelegated_bonded = new_redelegated_bonds; + sum += amount; + + let cur = slashed_amounts.entry(epoch).or_default(); + *cur += sum; + } + // Hack - should this be done differently? (think this is safe) + let last_amt = slashed_amounts + .get(&self.pipeline().prev()) + .cloned() + .unwrap(); + slashed_amounts.insert(self.pipeline(), last_amt); + + slashed_amounts + } + + fn fold_and_slash_redelegated_bonds( + &self, + redel_bonds: &BTreeMap>, + start: Epoch, + list_slashes: &[Slash], + slash_epoch_filter: impl Fn(Epoch) -> bool, + ) -> FoldRedelegatedBondsResult { + let mut result = FoldRedelegatedBondsResult::default(); + for (src_validator, bonds) in redel_bonds { + for (bond_start, bonded) in bonds { + let src_slashes = self + .validator_slashes + .get(src_validator) + .cloned() + .unwrap_or_default() + .iter() + .filter(|&s| { + self.params.in_redelegation_slashing_window( + s.epoch, + self.params + .redelegation_start_epoch_from_end(start), + start, + ) && *bond_start <= s.epoch + && slash_epoch_filter(s.epoch) + }) + .cloned() + .collect::>(); + + let mut merged = list_slashes + .iter() + .chain(src_slashes.iter()) + .cloned() + .collect::>(); + merged + .sort_by(|s1, s2| s1.epoch.partial_cmp(&s2.epoch).unwrap()); + + result.total_redelegated += *bonded; + result.total_after_slashing += Self::apply_slashes_to_amount( + &self.params, + &merged, + *bonded, + ); + } + } + result + } + + fn compute_bond_at_epoch( + &self, + epoch: Epoch, + start: Epoch, + amount: token::Amount, + redel_bonds: &BTreeMap>, + validator: &Address, + ) -> token::Amount { + // `val list_slashes` + let list_slashes = self + .validator_slashes + .get(validator) + .cloned() + .unwrap_or_default() + .iter() + .filter(|&slash| { + // TODO: check bounds! + start <= slash.epoch + && slash.epoch + self.params.slash_processing_epoch_offset() + <= epoch + }) + .cloned() + .collect::>(); + + // `val filteredSlashMap` and `val resultFold` + // `fold_and_slash_redelegated_bonds` + let slash_epoch_filter = + |e: Epoch| e + self.params.slash_processing_epoch_offset() <= epoch; + let result_fold = self.fold_and_slash_redelegated_bonds( + redel_bonds, + start, + &list_slashes, + slash_epoch_filter, + ); + + // `val totalNoRedelegated` + let total_not_redelegated = amount - result_fold.total_redelegated; + // `val afterNoRedelegated` + let after_not_redelegated = Self::apply_slashes_to_amount( + &self.params, + &list_slashes, + total_not_redelegated, + ); + + after_not_redelegated + result_fold.total_after_slashing + } + + #[allow(clippy::too_many_arguments)] + fn compute_slash_bond_at_epoch( + &self, + epoch: Epoch, + infraction_epoch: Epoch, + bond_start: Epoch, + bond_amount: token::Amount, + redel_bonds: &BTreeMap>, + slash_rate: Dec, + validator: &Address, + ) -> token::Amount { + let amount_due = self + .compute_bond_at_epoch( + infraction_epoch, + bond_start, + bond_amount, + redel_bonds, + validator, + ) + .mul_ceil(slash_rate); + let slashable_amount = self.compute_bond_at_epoch( + epoch, + bond_start, + bond_amount, + redel_bonds, + validator, + ); + + cmp::min(amount_due, slashable_amount) + } + + fn slash_validator_redelegation( + &self, + validator: &Address, + dest_validator: &Address, + slash_rate: Dec, + slash_amounts: &mut BTreeMap, + ) { + let infraction_epoch = + self.epoch - self.params.slash_processing_epoch_offset(); + + let dest_total_redelegated_unbonded = self + .validator_total_redelegated_unbonded + .get(dest_validator) + .cloned() + .unwrap_or_default(); + let validator_slashes = self + .validator_slashes + .get(validator) + .cloned() + .unwrap_or_default(); + + // Loop over outgoing redelegations of validator -> dest_validator + let outgoing_redelegations = if let Some(outgoing_redels) = + self.outgoing_redelegations.get(validator) + { + outgoing_redels + .get(dest_validator) + .cloned() + .unwrap_or_default() + } else { + BTreeMap::<(Epoch, Epoch), token::Amount>::new() + }; + + for ((src_start_epoch, redel_start), amount) in outgoing_redelegations { + if self.params.in_redelegation_slashing_window( + infraction_epoch, + redel_start, + self.params.redelegation_end_epoch_from_start(redel_start), + ) && src_start_epoch <= infraction_epoch + { + self.slash_redelegation( + amount, + src_start_epoch, + self.params.redelegation_end_epoch_from_start(redel_start), validator, - stake_at_infraction.to_string_native(), + slash_rate, + &validator_slashes, + &dest_total_redelegated_unbonded, + slash_amounts, ); + } + } + } - let mut total_rate = Dec::zero(); + #[allow(clippy::too_many_arguments)] + fn slash_redelegation( + &self, + amount: token::Amount, + bond_start: Epoch, + redel_bond_start: Epoch, + src_validator: &Address, + slash_rate: Dec, + slashes: &[Slash], + dest_total_redelegated_unbonded: &AbstractTotalRedelegatedUnbonded, + slash_amounts: &mut BTreeMap, + ) { + tracing::debug!( + "\nSlashing redelegation amount {} - bond start {} and \ + redel_bond_start {} - at rate {}\n", + amount.to_string_native(), + bond_start, + redel_bond_start, + slash_rate + ); - for slash in slashes { - debug_assert_eq!(slash.epoch, infraction_epoch); - let rate = cmp::max( - slash.r#type.get_slash_rate(&self.params), - cubic_rate, - ); - let processed_slash = Slash { - epoch: slash.epoch, - block_height: slash.block_height, - r#type: slash.r#type, - rate, - }; - let cur_slashes = self - .validator_slashes - .entry(validator.clone()) - .or_default(); - cur_slashes.push(processed_slash.clone()); + let infraction_epoch = + self.epoch - self.params.slash_processing_epoch_offset(); - total_rate += rate; - } - total_rate = cmp::min(total_rate, Dec::one()); - tracing::debug!("Total rate: {}", total_rate); - - let mut total_unbonded = token::Amount::default(); - let mut sum_post_bonds = token::Change::default(); - - for epoch in (infraction_epoch.0 + 1)..self.epoch.0 { - tracing::debug!("\nEpoch {}", epoch); - let mut recent_unbonds = token::Change::default(); - let unbond_records = self - .unbond_records - .entry(validator.clone()) - .or_default() - .get(&Epoch(epoch)) - .cloned() - .unwrap_or_default(); - for (start, unbond_amount) in unbond_records { - tracing::debug!( - "UnbondRecord: amount = {}, start_epoch {}", - &unbond_amount.to_string_native(), - &start - ); - if start <= infraction_epoch { - let slashes_for_this_unbond = self - .validator_slashes - .get(&validator) - .cloned() - .unwrap_or_default() - .iter() - .filter(|&s| { - start <= s.epoch - && s.epoch - + self.params.unbonding_len - + self - .params - .cubic_slashing_window_length - < infraction_epoch - }) - .cloned() - .fold( - BTreeMap::::new(), - |mut acc, s| { - let cur = - acc.entry(s.epoch).or_default(); - *cur += s.rate; - acc - }, - ); - tracing::debug!( - "Slashes for this unbond: {:?}", - slashes_for_this_unbond - ); - total_unbonded += compute_amount_after_slashing( - &slashes_for_this_unbond, - unbond_amount, - self.params.unbonding_len, - self.params.cubic_slashing_window_length, - ); - } else { - recent_unbonds += unbond_amount.change(); - } + // Slash redelegation destination validator from the next epoch only + // as they won't be jailed + let set_update_epoch = self.epoch.next(); - tracing::debug!( - "Total unbonded (epoch {}) w slashing = {}", - epoch, - total_unbonded.to_string_native() - ); - } - sum_post_bonds += self - .total_bonded - .get(&validator) - .and_then(|bonded| bonded.get(&Epoch(epoch))) + // Do initial computation of total unbonded + let mut tot_unbonded = token::Amount::zero(); + for epoch in Epoch::iter_bounds_inclusive( + infraction_epoch.next(), + set_update_epoch, + ) { + let total_redelegated_unbonded = + dest_total_redelegated_unbonded.get(&epoch); + if let Some(tot_redel_unbonded) = total_redelegated_unbonded { + if Self::has_redelegation( + tot_redel_unbonded, + bond_start, + redel_bond_start, + src_validator, + ) { + tot_unbonded += tot_redel_unbonded + .get(&redel_bond_start) + .unwrap() + .get(src_validator) + .unwrap() + .get(&bond_start) .cloned() - .unwrap_or_default() - - recent_unbonds; + .unwrap(); } - tracing::debug!("Computing adjusted amounts now"); + } + } - let mut last_slash = token::Change::default(); - for offset in 0..self.params.pipeline_len { - tracing::debug!( - "Epoch {}\nLast slash = {}", - self.epoch + offset, - last_slash.to_string_native(), - ); - let mut recent_unbonds = token::Change::default(); - let unbond_records = self - .unbond_records - .get(&validator) + for epoch in + Epoch::iter_range(set_update_epoch, self.params.pipeline_len) + { + let total_redelegated_unbonded = dest_total_redelegated_unbonded + .get(&epoch) + .cloned() + .unwrap_or_default(); + let updated_total_unbonded = if !Self::has_redelegation( + &total_redelegated_unbonded, + bond_start, + redel_bond_start, + src_validator, + ) { + tot_unbonded + } else { + tot_unbonded + + total_redelegated_unbonded + .get(&redel_bond_start) .unwrap() - .get(&(self.epoch + offset)) + .get(src_validator) + .unwrap() + .get(&bond_start) .cloned() - .unwrap_or_default(); - for (start, unbond_amount) in unbond_records { - tracing::debug!( - "UnbondRecord: amount = {}, start_epoch {}", - unbond_amount.to_string_native(), - &start - ); - if start <= infraction_epoch { - let slashes_for_this_unbond = self - .validator_slashes - .get(&validator) - .cloned() - .unwrap_or_default() - .iter() - .filter(|&s| { - start <= s.epoch - && s.epoch - + self.params.unbonding_len - + self - .params - .cubic_slashing_window_length - < infraction_epoch - }) - .cloned() - .fold( - BTreeMap::::new(), - |mut acc, s| { - let cur = - acc.entry(s.epoch).or_default(); - *cur += s.rate; - acc - }, - ); - tracing::debug!( - "Slashes for this unbond: {:?}", - slashes_for_this_unbond - ); + .unwrap() + }; - total_unbonded += compute_amount_after_slashing( - &slashes_for_this_unbond, - unbond_amount, - self.params.unbonding_len, - self.params.cubic_slashing_window_length, - ); - } else { - recent_unbonds += unbond_amount.change(); - } + let list_slashes = slashes + .iter() + .filter(|&slash| { + self.params.in_redelegation_slashing_window( + slash.epoch, + self.params.redelegation_start_epoch_from_end( + redel_bond_start, + ), + redel_bond_start, + ) && bond_start <= slash.epoch + && slash.epoch + + self.params.slash_processing_epoch_offset() + <= infraction_epoch + }) + .cloned() + .collect::>(); - tracing::debug!( - "Total unbonded (offset {}) w slashing = {}", - offset, - total_unbonded.to_string_native() - ); - } - tracing::debug!( - "stake at infraction {}", - stake_at_infraction.to_string_native(), - ); - tracing::debug!( - "total unbonded {}", - total_unbonded.to_string_native() - ); - let this_slash = total_rate - * (stake_at_infraction - total_unbonded.change()); - let diff_slashed_amount = last_slash - this_slash; - tracing::debug!( - "Offset {} diff_slashed_amount {}", - offset, - diff_slashed_amount.to_string_native(), - ); - last_slash = this_slash; - // total_unbonded = token::Amount::default(); - - // Update the voting powers (consider that the stake is - // discrete) let validator_stake = self - // .validator_stakes - // .entry(self.epoch + offset) - // .or_default() - // .entry(validator.clone()) - // .or_default(); - // *validator_stake -= diff_slashed_amount; - - tracing::debug!("Updating ABSTRACT voting powers"); - sum_post_bonds += self - .total_bonded - .get(&validator) - .and_then(|bonded| bonded.get(&(self.epoch + offset))) - .cloned() - .unwrap_or_default() - - recent_unbonds; + let slashable_amount = amount + .checked_sub(updated_total_unbonded) + .unwrap_or_default(); - tracing::debug!( - "\nUnslashable bonds = {}", - sum_post_bonds.to_string_native() - ); - let validator_stake_at_offset = self - .validator_stakes - .entry(self.epoch + offset) - .or_default() - .entry(validator.clone()) - .or_default(); + let slashed = Self::apply_slashes_to_amount( + &self.params, + &list_slashes, + slashable_amount, + ) + .mul_ceil(slash_rate); - let slashable_stake_at_offset = - *validator_stake_at_offset - sum_post_bonds; - tracing::debug!( - "Val stake pre (epoch {}) = {}", - self.epoch + offset, - validator_stake_at_offset.to_string_native(), - ); - tracing::debug!( - "Slashable stake at offset = {}", - slashable_stake_at_offset.to_string_native(), - ); - let change = cmp::max( - -slashable_stake_at_offset, - diff_slashed_amount, - ); + let list_slashes = slashes + .iter() + .filter(|&slash| { + self.params.in_redelegation_slashing_window( + slash.epoch, + self.params.redelegation_start_epoch_from_end( + redel_bond_start, + ), + redel_bond_start, + ) && bond_start <= slash.epoch + }) + .cloned() + .collect::>(); - tracing::debug!("Change = {}", change.to_string_native()); - *validator_stake_at_offset += change; + let slashable_stake = Self::apply_slashes_to_amount( + &self.params, + &list_slashes, + slashable_amount, + ) + .mul_ceil(slash_rate); - for os in (offset + 1)..=self.params.pipeline_len { - tracing::debug!("Adjust epoch {}", self.epoch + os); - let offset_stake = self - .validator_stakes - .entry(self.epoch + os) - .or_default() - .entry(validator.clone()) - .or_default(); - *offset_stake += change; - // let mut new_stake = - // *validator_stake - diff_slashed_amount; - // if new_stake < 0_i128 { - // new_stake = 0_i128; - // } - - // *validator_stake = new_stake; - tracing::debug!( - "New stake at epoch {} = {}", - self.epoch + os, - offset_stake.to_string_native() - ); - } + tot_unbonded = updated_total_unbonded; + + let to_slash = cmp::min(slashed, slashable_stake); + if !to_slash.is_zero() { + let slashed_amt = slash_amounts.entry(epoch).or_default(); + *slashed_amt += to_slash; + } + } + } + + fn has_redelegation( + total_redelegated_unbonded: &BTreeMap< + Epoch, + BTreeMap>, + >, + bond_start: Epoch, + redel_start: Epoch, + src_validator: &Address, + ) -> bool { + if let Some(redel_unbonded) = + total_redelegated_unbonded.get(&redel_start) + { + if let Some(unbonded) = redel_unbonded.get(src_validator) { + if unbonded.contains_key(&bond_start) { + return true; } } } + false } /// Get the pipeline epoch @@ -2826,9 +4500,9 @@ impl AbstractPosState { } /// Find the sums of the bonds across all epochs - fn bond_sums(&self) -> BTreeMap { + fn bond_sums(&self) -> BTreeMap { self.bonds.iter().fold( - BTreeMap::::new(), + BTreeMap::::new(), |mut acc, (id, bonds)| { for delta in bonds.values() { let entry = acc.entry(id.clone()).or_default(); @@ -2843,10 +4517,10 @@ impl AbstractPosState { fn withdrawable_unbonds(&self) -> BTreeMap { self.unbonds.iter().fold( BTreeMap::::new(), - |mut acc, (epoch, unbonds)| { - if *epoch <= self.epoch { + |mut acc, ((_start_epoch, withdraw_epoch), unbonds)| { + if *withdraw_epoch <= self.epoch { for (id, amount) in unbonds { - if *amount > token::Amount::default() { + if *amount > token::Amount::zero() { *acc.entry(id.clone()).or_default() += *amount; } } @@ -2858,11 +4532,13 @@ impl AbstractPosState { /// Compute the cubic slashing rate for the current epoch fn cubic_slash_rate(&self) -> Dec { - let infraction_epoch = self.epoch - - self.params.unbonding_len - - 1_u64 - - self.params.cubic_slashing_window_length; - tracing::debug!("Infraction epoch: {}", infraction_epoch); + let infraction_epoch = + self.epoch - self.params.slash_processing_epoch_offset(); + tracing::debug!( + "Infraction epoch: {}, Current epoch: {}", + infraction_epoch, + self.epoch + ); let window_width = self.params.cubic_slashing_window_length; let epoch_start = Epoch::from( infraction_epoch @@ -2877,7 +4553,7 @@ impl AbstractPosState { for epoch in Epoch::iter_bounds_inclusive(epoch_start, epoch_end) { let consensus_stake = self.consensus_set.get(&epoch).unwrap().iter().fold( - token::Amount::default(), + token::Amount::zero(), |sum, (val_stake, validators)| { sum + *val_stake * validators.len() as u64 }, @@ -2895,14 +4571,13 @@ impl AbstractPosState { let enqueued_slashes = self.enqueued_slashes.get(&processing_epoch); if let Some(enqueued_slashes) = enqueued_slashes { for (validator, slashes) in enqueued_slashes.iter() { - let val_stake = token::Amount::from_change( - self.validator_stakes - .get(&epoch) - .unwrap() - .get(validator) - .cloned() - .unwrap_or_default(), - ); + let val_stake = self + .validator_stakes + .get(&epoch) + .unwrap() + .get(validator) + .cloned() + .unwrap_or_default(); tracing::debug!( "Val {} stake epoch {}: {}", &validator, @@ -2957,14 +4632,11 @@ impl AbstractPosState { deltas_stake.to_string_native(), val_state ); - debug_assert_eq!( - *amount, - token::Amount::from_change(*deltas_stake) - ); + debug_assert_eq!(*amount, *deltas_stake); debug_assert_eq!(*val_state, ValidatorState::Consensus); } } - let mut max_bc = token::Amount::default(); + let mut max_bc = token::Amount::zero(); let bc = self.below_capacity_set.get(&epoch).unwrap(); for (amount, vals) in bc { if token::Amount::from(*amount) > max_bc { @@ -2993,13 +4665,13 @@ impl AbstractPosState { ); debug_assert_eq!( token::Amount::from(*amount), - token::Amount::from_change(deltas_stake) + deltas_stake ); debug_assert_eq!(*val_state, ValidatorState::BelowCapacity); } } if max_bc > min_consensus { - println!( + tracing::debug!( "min_consensus = {}, max_bc = {}", min_consensus.to_string_native(), max_bc.to_string_native() @@ -3069,6 +4741,364 @@ impl AbstractPosState { } } } + + fn is_chained_redelegation( + current_epoch: Epoch, + params: &PosParams, + incoming_redelegations: &AbstractIncomingRedelegations, + delegator: &Address, + src_validator: &Address, + ) -> bool { + let src_incoming_redelegations = + incoming_redelegations.get(src_validator); + if let Some(incoming) = src_incoming_redelegations { + if let Some(redel_end_epoch) = incoming.get(delegator) { + return redel_end_epoch.prev() + + params.slash_processing_epoch_offset() + > current_epoch; + } + } + false + } + + fn find_bonds_to_remove( + bonds: &BTreeMap, + amount: token::Amount, + ) -> BondsForRemovalRes { + let mut bonds_for_removal = BondsForRemovalRes::default(); + let mut remaining = amount; + + for (&bond_epoch, &bond_amount) in bonds.iter().rev() { + let to_unbond = cmp::min(bond_amount, remaining); + if to_unbond == bond_amount { + bonds_for_removal.epochs.insert(bond_epoch); + } else { + bonds_for_removal.new_entry = + Some((bond_epoch, bond_amount - to_unbond)); + } + remaining -= to_unbond; + if remaining.is_zero() { + break; + } + } + bonds_for_removal + } + + fn compute_modified_redelegation( + delegator_redelegated_bonds: &mut BTreeMap< + Epoch, + BTreeMap>, + >, + bond_epoch: Epoch, + amount: token::Amount, + ) -> ModifiedRedelegation { + let mut modified_redelegation = ModifiedRedelegation::default(); + + let redelegated_bonds = + delegator_redelegated_bonds.entry(bond_epoch).or_default(); + let (src_validators, total_redelegated) = + redelegated_bonds.iter().fold( + (BTreeSet::
::new(), token::Amount::zero()), + |mut acc, (src_val, redel_bonds)| { + acc.0.insert(src_val.clone()); + acc.1 += redel_bonds + .values() + .fold(token::Amount::zero(), |sum, val| sum + *val); + acc + }, + ); + + modified_redelegation.epoch = Some(bond_epoch); + + if total_redelegated <= amount { + return modified_redelegation; + } + + let mut remaining = amount; + for src_val in src_validators { + if remaining.is_zero() { + break; + } + let bonds = redelegated_bonds.get(&src_val).unwrap(); + let total_src_amount = + bonds.values().cloned().sum::(); + + modified_redelegation + .validators_to_remove + .insert(src_val.clone()); + + if total_src_amount <= remaining { + remaining -= total_src_amount; + } else { + let src_bonds_to_remove = + Self::find_bonds_to_remove(bonds, remaining); + + remaining = token::Amount::zero(); + + if let Some((bond_epoch, new_bond_amount)) = + src_bonds_to_remove.new_entry + { + modified_redelegation.validator_to_modify = Some(src_val); + modified_redelegation.epochs_to_remove = { + let mut epochs = src_bonds_to_remove.epochs; + epochs.insert(bond_epoch); + epochs + }; + modified_redelegation.epoch_to_modify = Some(bond_epoch); + modified_redelegation.new_amount = Some(new_bond_amount); + } else { + modified_redelegation.validator_to_modify = Some(src_val); + modified_redelegation.epochs_to_remove = + src_bonds_to_remove.epochs; + } + } + } + + modified_redelegation + } + + fn compute_new_redelegated_unbonds( + redelegated_bonds: &mut BTreeMap< + Epoch, + BTreeMap>, + >, + epochs_to_remove: &BTreeSet, + modified_redelegation: &ModifiedRedelegation, + ) -> BTreeMap>> + { + let unbonded_epochs = if let Some(epoch) = modified_redelegation.epoch { + let mut epochs = epochs_to_remove.clone(); + epochs.insert(epoch); + epochs + .iter() + .cloned() + .filter(|e| redelegated_bonds.contains_key(e)) + .collect::>() + } else { + epochs_to_remove + .iter() + .cloned() + .filter(|e| redelegated_bonds.contains_key(e)) + .collect::>() + }; + + let new_redelegated_unbonds: EagerRedelegatedUnbonds = unbonded_epochs + .into_iter() + .map(|start| { + let mut rbonds = EagerRedelegatedBondsMap::default(); + if modified_redelegation + .epoch + .map(|redelegation_epoch| start != redelegation_epoch) + .unwrap_or(true) + || modified_redelegation.validators_to_remove.is_empty() + { + for (src_val, bonds) in + redelegated_bonds.get(&start).unwrap() + { + for (bond_epoch, bond_amount) in bonds { + rbonds + .entry(src_val.clone()) + .or_default() + .insert(*bond_epoch, *bond_amount); + } + } + (start, rbonds) + } else { + for src_validator in + &modified_redelegation.validators_to_remove + { + if modified_redelegation + .validator_to_modify + .as_ref() + .map(|validator| src_validator != validator) + .unwrap_or(true) + { + let raw_bonds = redelegated_bonds + .entry(start) + .or_default() + .entry(src_validator.clone()) + .or_default(); + for (bond_epoch, bond_amount) in raw_bonds { + rbonds + .entry(src_validator.clone()) + .or_default() + .insert(*bond_epoch, *bond_amount); + } + } else { + for bond_start in + &modified_redelegation.epochs_to_remove + { + let cur_redel_bond_amount = redelegated_bonds + .entry(start) + .or_default() + .entry(src_validator.clone()) + .or_default() + .entry(*bond_start) + .or_default(); + + let raw_bonds = rbonds + .entry(src_validator.clone()) + .or_default(); + if modified_redelegation + .epoch_to_modify + .as_ref() + .map(|epoch| bond_start != epoch) + .unwrap_or(true) + { + raw_bonds.insert( + *bond_start, + *cur_redel_bond_amount, + ); + } else { + raw_bonds.insert( + *bond_start, + *cur_redel_bond_amount + - modified_redelegation + .new_amount + // Safe unwrap - it shouldn't + // get to + // this if it's None + .unwrap(), + ); + } + } + } + } + (start, rbonds) + } + }) + .collect(); + new_redelegated_unbonds + } + + fn compute_amount_after_slashing_unbond( + params: &PosParams, + all_slashes: &BTreeMap>, + validator: &Address, + new_unbonds: &BTreeMap<(Epoch, Epoch), token::Amount>, + new_redelegated_unbonded: &BTreeMap< + Epoch, + BTreeMap>, + >, + ) -> ResultSlashing { + let mut result_slashing = ResultSlashing::default(); + let validator_slashes = + all_slashes.get(validator).cloned().unwrap_or_default(); + for ((start_epoch, _withdraw_epoch), to_unbond) in new_unbonds { + let slashes = validator_slashes + .iter() + .filter(|&s| s.epoch >= *start_epoch) + .cloned() + .collect::>(); + + // Begin the logic for `fold_and_slash_redelegated_bonds` + let result_fold = { + let (mut total_redelegated, mut total_after_slashing) = + (token::Amount::zero(), token::Amount::zero()); + + for (src_validator, unbonded_map) in new_redelegated_unbonded + .get(start_epoch) + .cloned() + .unwrap_or_default() + { + for (bond_start, unbonded) in unbonded_map { + let src_slashes = all_slashes + .get(&src_validator) + .cloned() + .unwrap_or_default() + .iter() + .filter(|&s| { + params.in_redelegation_slashing_window( + s.epoch, + params.redelegation_start_epoch_from_end( + *start_epoch, + ), + *start_epoch, + ) && bond_start <= s.epoch + }) + .cloned() + .collect::>(); + + let mut merged = slashes + .iter() + .chain(src_slashes.iter()) + .cloned() + .collect::>(); + merged.sort_by(|s1, s2| { + s1.epoch.partial_cmp(&s2.epoch).unwrap() + }); + + total_redelegated += unbonded; + total_after_slashing += Self::apply_slashes_to_amount( + params, &merged, unbonded, + ); + } + } + + FoldRedelegatedBondsResult { + total_redelegated, + total_after_slashing, + } + }; + + let total_not_redelegated = + *to_unbond - result_fold.total_redelegated; + let after_not_redelegated = Self::apply_slashes_to_amount( + params, + &slashes, + total_not_redelegated, + ); + let amount_after_slashing = + after_not_redelegated + result_fold.total_after_slashing; + result_slashing.sum += amount_after_slashing; + result_slashing + .epoch_map + .insert(*start_epoch, amount_after_slashing); + } + + result_slashing + } + + fn apply_slashes_to_amount( + params: &PosParams, + slashes: &[Slash], + amount: token::Amount, + ) -> token::Amount { + let mut final_amount = amount; + let mut computed_slashes = BTreeMap::::new(); + for slash in slashes { + let slashed_amount = Self::compute_slashable_amount( + params, + slash, + amount, + &computed_slashes, + ); + final_amount = + final_amount.checked_sub(slashed_amount).unwrap_or_default(); + + computed_slashes.insert(slash.epoch, slashed_amount); + } + final_amount + } + + fn compute_slashable_amount( + params: &PosParams, + slash: &Slash, + amount: token::Amount, + computed_slashes: &BTreeMap, + ) -> token::Amount { + let updated_amount = computed_slashes + .iter() + .filter(|(&epoch, _)| { + // TODO: check if bounds correct! + // slashes that have already been applied and processed + epoch + params.slash_processing_epoch_offset() <= slash.epoch + }) + .fold(amount, |acc, (_, amnt)| { + acc.checked_sub(*amnt).unwrap_or_default() + }); + updated_amount.mul_ceil(slash.rate) + } } /// Arbitrary bond transition that adds tokens to an existing bond @@ -3175,44 +5205,3 @@ fn arb_slash(state: &AbstractPosState) -> impl Strategy { }, ) } - -fn compute_amount_after_slashing( - slashes: &BTreeMap, - amount: token::Amount, - unbonding_len: u64, - cubic_slash_window_len: u64, -) -> token::Amount { - let mut computed_amounts = Vec::::new(); - let mut updated_amount = amount; - - for (infraction_epoch, slash_rate) in slashes { - let mut indices_to_remove = BTreeSet::::new(); - - for (idx, slashed_amount) in computed_amounts.iter().enumerate() { - if slashed_amount.epoch + unbonding_len + cubic_slash_window_len - < *infraction_epoch - { - updated_amount = updated_amount - .checked_sub(slashed_amount.amount) - .unwrap_or_default(); - indices_to_remove.insert(idx); - } - } - for idx in indices_to_remove.into_iter().rev() { - computed_amounts.remove(idx); - } - computed_amounts.push(SlashedAmount { - amount: *slash_rate * updated_amount, - epoch: *infraction_epoch, - }); - } - updated_amount - .checked_sub( - computed_amounts - .iter() - .fold(token::Amount::default(), |sum, computed| { - sum + computed.amount - }), - ) - .unwrap_or_default() -} diff --git a/proof_of_stake/src/tests/state_machine_v2.rs b/proof_of_stake/src/tests/state_machine_v2.rs new file mode 100644 index 0000000000..ce2e0e6817 --- /dev/null +++ b/proof_of_stake/src/tests/state_machine_v2.rs @@ -0,0 +1,4584 @@ +//! Test PoS transitions with a state machine + +use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque}; +use std::ops::{AddAssign, Deref}; +use std::{cmp, mem}; + +use assert_matches::assert_matches; +use derivative::Derivative; +use itertools::Itertools; +use namada_core::ledger::storage::testing::TestWlStorage; +use namada_core::ledger::storage_api::collections::lazy_map::{ + NestedSubKey, SubKey, +}; +use namada_core::ledger::storage_api::token::read_balance; +use namada_core::ledger::storage_api::{token, StorageRead}; +use namada_core::types::address::{self, Address}; +use namada_core::types::dec::Dec; +use namada_core::types::key; +use namada_core::types::key::common::PublicKey; +use namada_core::types::storage::Epoch; +use namada_core::types::token::Change; +use proptest::prelude::*; +use proptest::test_runner::Config; +use proptest_state_machine::{ + prop_state_machine, ReferenceStateMachine, StateMachineTest, +}; +// Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to see +// `tracing` logs from tests +use test_log::test; +use yansi::Paint; + +use super::utils::DbgPrintDiff; +use crate::parameters::testing::arb_rate; +use crate::parameters::PosParams; +use crate::tests::arb_params_and_genesis_validators; +use crate::tests::utils::pause_for_enter; +use crate::types::{ + BondId, GenesisValidator, ReverseOrdTokenAmount, Slash, SlashType, + ValidatorState, WeightedValidator, +}; +use crate::{ + below_capacity_validator_set_handle, bond_handle, + consensus_validator_set_handle, delegator_redelegated_bonds_handle, + enqueued_slashes_handle, find_slashes_in_range, + read_below_threshold_validator_set_addresses, read_pos_params, + redelegate_tokens, validator_deltas_handle, validator_slashes_handle, + validator_state_handle, RedelegationError, +}; + +prop_state_machine! { + #![proptest_config(Config { + cases: 2, + .. Config::default() + })] + #[ignore] + #[test] + /// A `StateMachineTest` implemented on `PosState` + fn pos_state_machine_test_v2(sequential 1000 => ConcretePosState); +} + +/// Abstract representation of a state of PoS system +#[derive(Clone, Derivative)] +#[derivative(Debug)] +struct AbstractPosState { + /// Current epoch + epoch: Epoch, + /// Parameters + params: PosParams, + /// Genesis validators + #[derivative(Debug = "ignore")] + genesis_validators: Vec, + /// Records of bonds, unbonds, withdrawal and redelegations with slashes, + /// if any + validator_records: BTreeMap, + /// Validator stakes. These are NOT deltas. + /// Pipelined. + validator_stakes: BTreeMap>, + /// Consensus validator set. Pipelined. + consensus_set: BTreeMap>>, + /// Below-capacity validator set. Pipelined. + below_capacity_set: + BTreeMap>>, + /// Below-threshold validator set. Pipelined. + below_threshold_set: BTreeMap>, + /// Validator states. Pipelined. + validator_states: BTreeMap>, + /// Validator slashes post-processing + validator_slashes: BTreeMap>, + /// Enqueued slashes pre-processing + enqueued_slashes: BTreeMap>>, + /// The last epoch in which a validator committed an infraction + validator_last_slash_epochs: BTreeMap, +} + +impl AbstractPosState { + /// Copy validator sets and validator states at the given epoch from its + /// predecessor + fn copy_discrete_epoched_data(&mut self, epoch: Epoch) { + let prev_epoch = epoch.prev(); + // Copy the non-delta data from the last epoch into the new one + self.consensus_set.insert( + epoch, + self.consensus_set.get(&prev_epoch).unwrap().clone(), + ); + self.below_capacity_set.insert( + epoch, + self.below_capacity_set.get(&prev_epoch).unwrap().clone(), + ); + self.below_threshold_set.insert( + epoch, + self.below_threshold_set.get(&prev_epoch).unwrap().clone(), + ); + self.validator_states.insert( + epoch, + self.validator_states.get(&prev_epoch).unwrap().clone(), + ); + self.validator_stakes.insert( + epoch, + self.validator_stakes.get(&prev_epoch).unwrap().clone(), + ); + } + + /// Add a bond. + fn bond( + &mut self, + BondId { source, validator }: &BondId, + amount: token::Amount, + ) { + let start = self.pipeline(); + + let records = self.records_mut(validator, source); + let bond_at_start = records.bonds.entry(start).or_default(); + bond_at_start.tokens.amount += amount; + + let change = amount.change(); + let pipeline_state = self + .validator_states + .get(&start) + .unwrap() + .get(validator) + .unwrap(); + // Validator sets need to be updated before total stake + if *pipeline_state != ValidatorState::Jailed { + self.update_validator_sets(validator, change, self.pipeline()); + } + self.update_validator_total_stake(validator, change, self.pipeline()); + } + + /// Unbond a bond. + fn unbond( + &mut self, + BondId { source, validator }: &BondId, + amount: token::Amount, + ) { + // Last epoch in which it contributes to stake + let end = self.pipeline().prev(); + let withdrawable_epoch = + self.epoch + self.params.withdrawable_epoch_offset(); + let pipeline_len = self.params.pipeline_len; + + let records = self.records_mut(validator, source); + // The amount requested is before any slashing that may be applicable + let mut to_unbond = amount; + let mut amount_after_slashing = token::Amount::zero(); + + 'bonds_iter: for (&start, bond) in records.bonds.iter_mut().rev() { + // In every loop, try to unbond redelegations first. We have to + // go in reverse order of the start epoch to match the order of + // unbond in the implementation. + for (dest_validator, redelegs) in bond.incoming_redelegs.iter_mut() + { + let _redeleg_epoch = start - pipeline_len; + + for (&src_bond_start, redeleg) in + redelegs.tokens.iter_mut().rev() + { + let amount_before_slashing = + redeleg.amount_before_slashing(); + + let unbonded = if to_unbond >= amount_before_slashing { + // Unbond the whole bond + to_unbond -= amount_before_slashing; + amount_after_slashing += redeleg.amount; + + mem::take(redeleg) + } else { + // We have to divide this bond in case there are slashes + let unbond_slash = + to_unbond.mul_ceil(redeleg.slash_rates_sum()); + let to_unbond_after_slash = to_unbond - unbond_slash; + + to_unbond = token::Amount::zero(); + amount_after_slashing += to_unbond_after_slash; + + redeleg.amount -= to_unbond_after_slash; + let removed_slashes = + redeleg.subtract_slash(unbond_slash); + + TokensWithSlashes { + amount: to_unbond_after_slash, + slashes: removed_slashes, + } + }; + + let unbond = + bond.unbonds.entry(end).or_insert_with(|| Unbond { + withdrawable_epoch, + tokens: Default::default(), + incoming_redelegs: Default::default(), + }); + debug_assert_eq!( + unbond.withdrawable_epoch, + withdrawable_epoch + ); + let redeleg_unbond = unbond + .incoming_redelegs + .entry(dest_validator.clone()) + .or_default(); + let redeleg_unbond_tokens = redeleg_unbond + .tokens + .entry(src_bond_start) + .or_default(); + redeleg_unbond_tokens.amount += unbonded.amount; + redeleg_unbond_tokens.add_slashes(&unbonded.slashes); + + // Stop once all is unbonded + if to_unbond.is_zero() { + break 'bonds_iter; + } + } + } + + // Then try to unbond regular bonds + if !to_unbond.is_zero() { + let amount_before_slashing = + bond.tokens.amount_before_slashing(); + + let unbonded = if to_unbond >= amount_before_slashing { + // Unbond the whole bond + to_unbond -= amount_before_slashing; + amount_after_slashing += bond.tokens.amount; + + mem::take(&mut bond.tokens) + } else { + // We have to divide this bond in case there are slashes + let unbond_slash = + to_unbond.mul_ceil(bond.tokens.slash_rates_sum()); + let to_unbond_after_slash = to_unbond - unbond_slash; + + to_unbond = token::Amount::zero(); + amount_after_slashing += to_unbond_after_slash; + + bond.tokens.amount -= to_unbond_after_slash; + let removed_slashes = + bond.tokens.subtract_slash(unbond_slash); + + TokensWithSlashes { + amount: to_unbond_after_slash, + slashes: removed_slashes, + } + }; + + let unbond = + bond.unbonds.entry(end).or_insert_with(|| Unbond { + withdrawable_epoch, + tokens: Default::default(), + incoming_redelegs: Default::default(), + }); + debug_assert_eq!(unbond.withdrawable_epoch, withdrawable_epoch); + unbond.tokens.amount += unbonded.amount; + unbond.tokens.add_slashes(&unbonded.slashes); + + // Stop once all is unbonded + if to_unbond.is_zero() { + break; + } + } + } + assert!(to_unbond.is_zero()); + + let pipeline_state = self + .validator_states + .get(&self.pipeline()) + .unwrap() + .get(validator) + .unwrap(); + if *pipeline_state != ValidatorState::Jailed { + self.update_validator_sets( + validator, + -amount_after_slashing.change(), + self.pipeline(), + ); + } + self.update_validator_total_stake( + validator, + -amount_after_slashing.change(), + self.pipeline(), + ); + } + + /// Redelegate a bond. + fn redelegate( + &mut self, + BondId { source, validator }: &BondId, + new_validator: &Address, + amount: token::Amount, + ) { + // Last epoch in which it contributes to stake of thhe source validator + let current_epoch = self.epoch; + let pipeline = self.pipeline(); + let src_end = pipeline.prev(); + let withdrawable_epoch_offset = self.params.withdrawable_epoch_offset(); + let pipeline_len = self.params.pipeline_len; + + let records = self.records_mut(validator, source); + + // The amount requested is before any slashing that may be applicable + let mut to_unbond = amount; + let mut amount_after_slashing = token::Amount::zero(); + // Keyed by redelegation src bond start epoch + let mut dest_incoming_redelegs = + BTreeMap::::new(); + + 'bonds_iter: for (&start, bond) in records.bonds.iter_mut().rev() { + // In every loop, try to redelegate redelegations first. We have to + // go in reverse order of the start epoch to match the order of + // redelegation in the implementation. + for (_src_validator, redelegs) in + bond.incoming_redelegs.iter_mut().rev() + { + let _redeleg_epoch = start - pipeline_len; + + for (_src_bond_start, redeleg) in + redelegs.tokens.iter_mut().rev() + { + let amount_before_slashing = + redeleg.amount_before_slashing(); + + // No chained redelegations + if Epoch( + start.0.checked_sub(pipeline_len).unwrap_or_default(), + ) + withdrawable_epoch_offset + <= current_epoch + { + let unbonded = if to_unbond >= amount_before_slashing { + // Unbond the whole bond + to_unbond -= amount_before_slashing; + amount_after_slashing += redeleg.amount; + + mem::take(redeleg) + } else { + // We have to divide this bond in case there are + // slashes + let unbond_slash = + to_unbond.mul_ceil(redeleg.slash_rates_sum()); + let to_unbond_after_slash = + to_unbond - unbond_slash; + + to_unbond = token::Amount::zero(); + amount_after_slashing += to_unbond_after_slash; + + redeleg.amount -= to_unbond_after_slash; + let removed_slashes = + redeleg.subtract_slash(unbond_slash); + + TokensWithSlashes { + amount: to_unbond_after_slash, + slashes: removed_slashes, + } + }; + + let outgoing_redeleg = bond + .outgoing_redelegs + .entry(src_end) + .or_default() + .entry(new_validator.clone()) + .or_default(); + + outgoing_redeleg.amount += unbonded.amount; + outgoing_redeleg.add_slashes(&unbonded.slashes); + + let redeleg = + dest_incoming_redelegs.entry(start).or_default(); + redeleg.amount += unbonded.amount; + redeleg.add_slashes(&unbonded.slashes); + + // Stop once all is unbonded + if to_unbond.is_zero() { + break 'bonds_iter; + } + } + } + } + + // Then try to redelegate regular bonds + if !to_unbond.is_zero() { + let amount_before_slashing = + bond.tokens.amount_before_slashing(); + + let unbonded = if to_unbond >= amount_before_slashing { + // Unbond the whole bond + to_unbond -= amount_before_slashing; + amount_after_slashing += bond.tokens.amount; + + mem::take(&mut bond.tokens) + } else { + // We have to divide this bond in case there are slashes + let unbond_slash = + to_unbond.mul_ceil(bond.tokens.slash_rates_sum()); + let to_unbond_after_slash = to_unbond - unbond_slash; + + to_unbond = token::Amount::zero(); + amount_after_slashing += to_unbond_after_slash; + + bond.tokens.amount -= to_unbond_after_slash; + let removed_slashes = + bond.tokens.subtract_slash(unbond_slash); + + TokensWithSlashes { + amount: to_unbond_after_slash, + slashes: removed_slashes, + } + }; + + let outgoing_redeleg = bond + .outgoing_redelegs + .entry(src_end) + .or_default() + .entry(new_validator.clone()) + .or_default(); + outgoing_redeleg.amount += unbonded.amount; + outgoing_redeleg.add_slashes(&unbonded.slashes); + let dest_incoming_redeleg = + dest_incoming_redelegs.entry(start).or_default(); + dest_incoming_redeleg.amount += unbonded.amount; + dest_incoming_redeleg.add_slashes(&unbonded.slashes); + } + // Stop once all is unbonded + if to_unbond.is_zero() { + break; + } + } + assert!(to_unbond.is_zero()); + + // Record the incoming redelegations on destination validator + let dest_records = self.records_mut(new_validator, source); + let redeleg = dest_records + .bonds + .entry(pipeline) + .or_default() + .incoming_redelegs + .entry(validator.clone()) + .or_default(); + for (start, inc_redeleg) in dest_incoming_redelegs { + let redeleg_tokens = redeleg.tokens.entry(start).or_default(); + redeleg_tokens.amount += inc_redeleg.amount; + redeleg_tokens.add_slashes(&inc_redeleg.slashes); + } + + // Update stake of src validator + let src_pipeline_state = self + .validator_states + .get(&self.pipeline()) + .unwrap() + .get(validator) + .unwrap(); + if *src_pipeline_state != ValidatorState::Jailed { + self.update_validator_sets( + validator, + -amount_after_slashing.change(), + self.pipeline(), + ); + } + self.update_validator_total_stake( + validator, + -amount_after_slashing.change(), + self.pipeline(), + ); + + // Update stake of dest validator + let dest_pipeline_state = self + .validator_states + .get(&self.pipeline()) + .unwrap() + .get(new_validator) + .unwrap(); + if *dest_pipeline_state != ValidatorState::Jailed { + self.update_validator_sets( + new_validator, + amount_after_slashing.change(), + self.pipeline(), + ); + } + self.update_validator_total_stake( + new_validator, + amount_after_slashing.change(), + self.pipeline(), + ); + } + + /// Withdraw all unbonds that can be withdrawn. + fn withdraw(&mut self, BondId { source, validator }: &BondId) { + let epoch = self.epoch; + let records = self.records_mut(validator, source); + let mut to_store = BTreeMap::::new(); + for (_start, bond) in records.bonds.iter_mut() { + bond.unbonds.retain(|_end, unbond| { + let is_withdrawable = unbond.withdrawable_epoch <= epoch; + if is_withdrawable { + let withdrawn = to_store.entry(epoch).or_default(); + withdrawn.amount += unbond.tokens.amount; + withdrawn.add_slashes(&unbond.tokens.slashes); + for redeleg in unbond.incoming_redelegs.values() { + for tokens in redeleg.tokens.values() { + withdrawn.amount += tokens.amount; + withdrawn.add_slashes(&tokens.slashes); + } + } + } + !is_withdrawable + }) + } + records.withdrawn.extend(to_store.into_iter()); + } + + /// Get or insert default mutable records + fn records_mut( + &mut self, + validator: &Address, + source: &Address, + ) -> &mut Records { + self.validator_records + .entry(validator.clone()) + .or_default() + .per_source + .entry(source.clone()) + .or_default() + } + + /// Get records + fn records( + &self, + validator: &Address, + source: &Address, + ) -> Option<&Records> { + self.validator_records + .get(validator) + .and_then(|records| records.per_source.get(source)) + } + + /// Update validator's total stake with bonded or unbonded change at the + /// pipeline epoch + fn update_validator_total_stake( + &mut self, + validator: &Address, + change: token::Change, + epoch: Epoch, + ) { + let total_stakes = self + .validator_stakes + .entry(epoch) + .or_default() + .entry(validator.clone()) + .or_default(); + tracing::debug!("TOTAL {validator} stakes before {}", total_stakes); + *total_stakes += change; + tracing::debug!("TOTAL {validator} stakes after {}", total_stakes); + } + + /// Update validator in sets with bonded or unbonded change (should be + /// called with epoch at pipeline) or slashes. + fn update_validator_sets( + &mut self, + validator: &Address, + change: token::Change, + epoch: Epoch, + ) { + let consensus_set = self.consensus_set.entry(epoch).or_default(); + let below_cap_set = self.below_capacity_set.entry(epoch).or_default(); + let below_thresh_set = + self.below_threshold_set.entry(epoch).or_default(); + + let validator_stakes = self.validator_stakes.get(&epoch).unwrap(); + let validator_states = self.validator_states.get_mut(&epoch).unwrap(); + + let state_pre = validator_states.get(validator).unwrap(); + + let this_val_stake_pre = *validator_stakes.get(validator).unwrap(); + let this_val_stake_post = + token::Amount::from_change(this_val_stake_pre + change); + let this_val_stake_pre = token::Amount::from_change(this_val_stake_pre); + + let threshold = self.params.validator_stake_threshold; + if this_val_stake_pre < threshold && this_val_stake_post < threshold { + // Validator is already below-threshold and will remain there, so do + // nothing + debug_assert!(below_thresh_set.contains(validator)); + return; + } + + match state_pre { + ValidatorState::Consensus => { + // tracing::debug!("Validator initially in consensus"); + // Remove from the prior stake + let vals = consensus_set.entry(this_val_stake_pre).or_default(); + // dbg!(&vals); + vals.retain(|addr| addr != validator); + // dbg!(&vals); + + if vals.is_empty() { + consensus_set.remove(&this_val_stake_pre); + } + + // If posterior stake is below threshold, place into the + // below-threshold set + if this_val_stake_post < threshold { + below_thresh_set.insert(validator.clone()); + validator_states.insert( + validator.clone(), + ValidatorState::BelowThreshold, + ); + + // Promote the next below-cap validator if there is one + if let Some(mut max_below_cap) = below_cap_set.last_entry() + { + let max_below_cap_stake = *max_below_cap.key(); + let vals = max_below_cap.get_mut(); + let promoted_val = vals.pop_front().unwrap(); + // Remove the key if there's nothing left + if vals.is_empty() { + below_cap_set.remove(&max_below_cap_stake); + } + + consensus_set + .entry(max_below_cap_stake.0) + .or_default() + .push_back(promoted_val.clone()); + validator_states + .insert(promoted_val, ValidatorState::Consensus); + } + + return; + } + + // If unbonding, check the max below-cap validator's state if we + // need to do a swap + if change < token::Change::zero() { + if let Some(mut max_below_cap) = below_cap_set.last_entry() + { + let max_below_cap_stake = *max_below_cap.key(); + if max_below_cap_stake.0 > this_val_stake_post { + // Swap this validator with the max below-cap + let vals = max_below_cap.get_mut(); + let first_val = vals.pop_front().unwrap(); + // Remove the key if there's nothing left + if vals.is_empty() { + below_cap_set.remove(&max_below_cap_stake); + } + // Do the swap in the validator sets + consensus_set + .entry(max_below_cap_stake.0) + .or_default() + .push_back(first_val.clone()); + below_cap_set + .entry(this_val_stake_post.into()) + .or_default() + .push_back(validator.clone()); + + // Change the validator states + validator_states + .insert(first_val, ValidatorState::Consensus); + validator_states.insert( + validator.clone(), + ValidatorState::BelowCapacity, + ); + + // And we're done here + return; + } + } + } + + // Insert with the posterior stake + consensus_set + .entry(this_val_stake_post) + .or_default() + .push_back(validator.clone()); + } + ValidatorState::BelowCapacity => { + // tracing::debug!("Validator initially in below-cap"); + + // Remove from the prior stake + let vals = + below_cap_set.entry(this_val_stake_pre.into()).or_default(); + vals.retain(|addr| addr != validator); + if vals.is_empty() { + below_cap_set.remove(&this_val_stake_pre.into()); + } + + // If posterior stake is below threshold, place into the + // below-threshold set + if this_val_stake_post < threshold { + below_thresh_set.insert(validator.clone()); + validator_states.insert( + validator.clone(), + ValidatorState::BelowThreshold, + ); + return; + } + + // If bonding, check the min consensus validator's state if we + // need to do a swap + if change >= token::Change::zero() { + // dbg!(&consensus_set); + if let Some(mut min_consensus) = consensus_set.first_entry() + { + // dbg!(&min_consensus); + let min_consensus_stake = *min_consensus.key(); + if this_val_stake_post > min_consensus_stake { + // Swap this validator with the max consensus + let vals = min_consensus.get_mut(); + let last_val = vals.pop_back().unwrap(); + // Remove the key if there's nothing left + if vals.is_empty() { + consensus_set.remove(&min_consensus_stake); + } + // Do the swap in the validator sets + below_cap_set + .entry(min_consensus_stake.into()) + .or_default() + .push_back(last_val.clone()); + consensus_set + .entry(this_val_stake_post) + .or_default() + .push_back(validator.clone()); + + // Change the validator states + validator_states.insert( + validator.clone(), + ValidatorState::Consensus, + ); + validator_states.insert( + last_val, + ValidatorState::BelowCapacity, + ); + + // And we're done here + return; + } + } + } + + // Insert with the posterior stake + below_cap_set + .entry(this_val_stake_post.into()) + .or_default() + .push_back(validator.clone()); + } + ValidatorState::BelowThreshold => { + // We know that this validator will be promoted into one of the + // higher sets, so first remove from the below-threshold set. + below_thresh_set.remove(validator); + + let num_consensus = + consensus_set.iter().fold(0, |sum, (_, validators)| { + sum + validators.len() as u64 + }); + if num_consensus < self.params.max_validator_slots { + // Place the validator directly into the consensus set + consensus_set + .entry(this_val_stake_post) + .or_default() + .push_back(validator.clone()); + validator_states + .insert(validator.clone(), ValidatorState::Consensus); + return; + } + // Determine which set to place the validator into + if let Some(mut min_consensus) = consensus_set.first_entry() { + // dbg!(&min_consensus); + let min_consensus_stake = *min_consensus.key(); + if this_val_stake_post > min_consensus_stake { + // Swap this validator with the max consensus + let vals = min_consensus.get_mut(); + let last_val = vals.pop_back().unwrap(); + // Remove the key if there's nothing left + if vals.is_empty() { + consensus_set.remove(&min_consensus_stake); + } + // Do the swap in the validator sets + below_cap_set + .entry(min_consensus_stake.into()) + .or_default() + .push_back(last_val.clone()); + consensus_set + .entry(this_val_stake_post) + .or_default() + .push_back(validator.clone()); + + // Change the validator states + validator_states.insert( + validator.clone(), + ValidatorState::Consensus, + ); + validator_states + .insert(last_val, ValidatorState::BelowCapacity); + } else { + // Place the validator into the below-capacity set + below_cap_set + .entry(this_val_stake_post.into()) + .or_default() + .push_back(validator.clone()); + validator_states.insert( + validator.clone(), + ValidatorState::BelowCapacity, + ); + } + } + } + ValidatorState::Inactive => { + panic!("unexpected state") + } + ValidatorState::Jailed => { + panic!("unexpected state (jailed)") + } + } + } + + fn process_enqueued_slashes(&mut self) { + let slashes_this_epoch = self + .enqueued_slashes + .get(&self.epoch) + .cloned() + .unwrap_or_default(); + if !slashes_this_epoch.is_empty() { + let infraction_epoch = self.epoch + - self.params.unbonding_len + - self.params.cubic_slashing_window_length + - 1; + + let cubic_rate = self.cubic_slash_rate(); + for (validator, slashes) in slashes_this_epoch { + // Slash this validator on it's full stake at infration + self.slash_a_validator( + &validator, + &slashes, + infraction_epoch, + cubic_rate, + ); + } + } + } + + fn slash_a_validator( + &mut self, + validator: &Address, + slashes: &[Slash], + infraction_epoch: Epoch, + cubic_rate: Dec, + ) { + let current_epoch = self.epoch; + let mut total_rate = Dec::zero(); + + for slash in slashes { + debug_assert_eq!(slash.epoch, infraction_epoch); + let rate = + cmp::max(slash.r#type.get_slash_rate(&self.params), cubic_rate); + let processed_slash = Slash { + epoch: slash.epoch, + block_height: slash.block_height, + r#type: slash.r#type, + rate, + }; + let cur_slashes = + self.validator_slashes.entry(validator.clone()).or_default(); + cur_slashes.push(processed_slash.clone()); + + total_rate += rate; + } + total_rate = cmp::min(total_rate, Dec::one()); + tracing::debug!("Total rate: {}", total_rate); + + // Find validator stakes before slashing for up to pipeline epoch + let mut validator_stakes_pre = + BTreeMap::>::new(); + for epoch in + Epoch::iter_bounds_inclusive(current_epoch, self.pipeline()) + { + for (validator, records) in &self.validator_records { + let stake = records.stake(epoch); + validator_stakes_pre + .entry(epoch) + .or_default() + .insert(validator.clone(), stake); + } + } + + let mut redelegations_to_slash = BTreeMap::< + Address, + BTreeMap>>, + >::new(); + for (addr, records) in self.validator_records.iter_mut() { + if addr == validator { + for (source, records) in records.per_source.iter_mut() { + // Apply slashes on non-redelegated bonds + records.slash(total_rate, infraction_epoch, current_epoch); + + // Slash tokens in the outgoing redelegation records for + // this validator + for (&start, bond) in records.bonds.iter_mut() { + for (&end, redelegs) in + bond.outgoing_redelegs.iter_mut() + { + if start <= infraction_epoch + && end >= infraction_epoch + { + for (dest, tokens) in redelegs.iter_mut() { + let slashed = tokens.slash( + total_rate, + infraction_epoch, + current_epoch, + ); + // Store the redelegation slashes to apply + // on destination validator + *redelegations_to_slash + .entry(dest.clone()) + .or_default() + .entry(source.clone()) + .or_default() + .entry( + // start epoch of redelegation + end.next(), + ) + .or_default() + // redelegation src bond start epoch + .entry(start) + .or_default() += TokensSlash { + amount: slashed, + rate: total_rate, + }; + } + } + } + } + } + } + } + // Apply redelegation slashes on destination validator + for (dest_validator, redelegations) in redelegations_to_slash { + for (source, tokens) in redelegations { + for (redelegation_start, slashes) in tokens { + for (src_bond_start, slash) in slashes { + let records = self + .validator_records + .get_mut(&dest_validator) + .unwrap() + .per_source + .get_mut(&source) + .unwrap(); + records.subtract_redelegation_slash( + validator, + src_bond_start, + redelegation_start, + slash, + current_epoch, + ); + } + } + } + } + + // Find validator stakes after slashing for up to pipeline epoch + let mut validator_stakes_post = + BTreeMap::>::new(); + for epoch in + Epoch::iter_bounds_inclusive(current_epoch, self.pipeline()) + { + for (validator, records) in &self.validator_records { + let stake = records.stake(epoch); + validator_stakes_post + .entry(epoch) + .or_default() + .insert(validator.clone(), stake); + } + } + + // Apply the difference in stakes to validator_stakes, states and deltas + for epoch in + Epoch::iter_bounds_inclusive(current_epoch, self.pipeline()) + { + for (validator_to_update, &stake_post) in + validator_stakes_post.get(&epoch).unwrap() + { + let stake_pre = validator_stakes_pre + .get(&epoch) + .unwrap() + .get(validator_to_update) + .cloned() + .unwrap_or_default(); + let change = stake_post.change() - stake_pre.change(); + + if !change.is_zero() { + let state = self + .validator_states + .get(&epoch) + .unwrap() + .get(validator_to_update) + .unwrap(); + // Validator sets need to be updated before total + // stake + if *state != ValidatorState::Jailed { + self.update_validator_sets( + validator_to_update, + change, + epoch, + ); + } + self.update_validator_total_stake( + validator_to_update, + change, + epoch, + ); + } + } + } + } + + /// Get the pipeline epoch + fn pipeline(&self) -> Epoch { + self.epoch + self.params.pipeline_len + } + + /// Check if the given address is of a known validator + fn is_validator(&self, validator: &Address, epoch: Epoch) -> bool { + self.validator_states + .get(&epoch) + .unwrap() + .keys() + .any(|val| val == validator) + } + + fn is_in_consensus_w_info( + &self, + validator: &Address, + epoch: Epoch, + ) -> Option<(usize, token::Amount)> { + for (stake, vals) in self.consensus_set.get(&epoch).unwrap() { + if let Some(index) = vals.iter().position(|val| val == validator) { + return Some((index, *stake)); + } + } + None + } + + fn is_in_below_capacity_w_info( + &self, + validator: &Address, + epoch: Epoch, + ) -> Option<(usize, token::Amount)> { + for (stake, vals) in self.below_capacity_set.get(&epoch).unwrap() { + if let Some(index) = vals.iter().position(|val| val == validator) { + return Some((index, (*stake).into())); + } + } + None + } + + fn is_in_below_threshold(&self, validator: &Address, epoch: Epoch) -> bool { + self.below_threshold_set + .get(&epoch) + .unwrap() + .iter() + .any(|val| val == validator) + } + + /// Find the sum of bonds that can be unbonded. The returned amounts are + /// prior to slashing. + fn unbondable_bonds(&self) -> BTreeMap { + let mut sums = BTreeMap::::new(); + for (validator, records) in &self.validator_records { + for (source, record) in &records.per_source { + let unbondable = sums + .entry(BondId { + source: source.clone(), + validator: validator.clone(), + }) + .or_default(); + // Add bonds and incoming redelegations + for (&start, bond) in &record.bonds { + *unbondable += bond.tokens.amount_before_slashing(); + for redeleg in bond.incoming_redelegs.values() { + let redeleg_epoch = start - self.params.pipeline_len; + *unbondable += redeleg + .amount_before_slashing_after_redeleg( + redeleg_epoch, + ); + } + } + } + } + // Filter out any 0s. + sums.retain(|_id, tokens| !tokens.is_zero()); + sums + } + + /// Find the sum of bonds that can be redelegated. The returned amounts are + /// prior to slashing. + fn redelegatable_bonds(&self) -> BTreeMap { + let mut sums = BTreeMap::::new(); + for (validator, records) in &self.validator_records { + for (source, record) in &records.per_source { + // Self-bonds cannot be redelegated + if validator != source { + let unbondable = sums + .entry(BondId { + source: source.clone(), + validator: validator.clone(), + }) + .or_default(); + // Add bonds + for (&start, bond) in &record.bonds { + *unbondable += bond.tokens.amount_before_slashing(); + // Add redelegations + for redeleg in bond.incoming_redelegs.values() { + // No chained redelegations + if Epoch( + start + .0 + .checked_sub(self.params.pipeline_len) + .unwrap_or_default(), + ) + self.params.withdrawable_epoch_offset() + <= self.epoch + { + *unbondable += redeleg.amount_before_slashing(); + } + } + } + } + } + } + // Filter out any 0s. + sums.retain(|_id, tokens| !tokens.is_zero()); + sums + } + + fn unchainable_redelegations(&self) -> BTreeSet { + let mut unchainable = BTreeSet::new(); + for records in self.validator_records.values() { + for (owner, records) in &records.per_source { + for bond in records.bonds.values() { + for (&end, redelegs) in &bond.outgoing_redelegs { + // If the outgoing redelegation is still slashable for + // source validator ... + if end + self.params.slash_processing_epoch_offset() + > self.epoch + { + // ... it cannot be redelegated for now + for (dest_validator, tokens) in redelegs { + if !tokens.is_zero() { + unchainable.insert(BondId { + source: owner.clone(), + validator: dest_validator.clone(), + }); + } + } + } + } + } + } + } + unchainable + } + + /// Find the sums of withdrawable unbonds + fn withdrawable_unbonds(&self) -> BTreeMap { + let mut withdrawable = BTreeMap::::new(); + for (validator, records) in &self.validator_records { + for (source, records) in &records.per_source { + for bond in records.bonds.values() { + for unbond in bond.unbonds.values() { + if unbond.withdrawable_epoch <= self.epoch { + let entry = withdrawable + .entry(BondId { + source: source.clone(), + validator: validator.clone(), + }) + .or_default(); + // Add withdrawable unbonds including redelegations + *entry += unbond.amount_before_slashing(); + } + } + } + } + } + withdrawable + } + + fn existing_bond_ids(&self) -> Vec { + let mut ids = Vec::new(); + for (validator, records) in &self.validator_records { + for source in records.per_source.keys() { + ids.push(BondId { + source: source.clone(), + validator: validator.clone(), + }); + } + } + ids + } + + /// Compute the cubic slashing rate for the current epoch + fn cubic_slash_rate(&self) -> Dec { + let infraction_epoch = self.epoch + - self.params.unbonding_len + - 1_u64 + - self.params.cubic_slashing_window_length; + tracing::debug!("Infraction epoch: {}", infraction_epoch); + let window_width = self.params.cubic_slashing_window_length; + let epoch_start = Epoch::from( + infraction_epoch + .0 + .checked_sub(window_width) + .unwrap_or_default(), + ); + let epoch_end = infraction_epoch + window_width; + + // Calculate cubic slashing rate with the abstract state + let mut vp_frac_sum = Dec::zero(); + for epoch in Epoch::iter_bounds_inclusive(epoch_start, epoch_end) { + let consensus_stake = + self.consensus_set.get(&epoch).unwrap().iter().fold( + token::Amount::zero(), + |sum, (val_stake, validators)| { + sum + *val_stake * validators.len() as u64 + }, + ); + tracing::debug!( + "Consensus stake in epoch {}: {}", + epoch, + consensus_stake.to_string_native() + ); + + let processing_epoch = epoch + + self.params.unbonding_len + + 1_u64 + + self.params.cubic_slashing_window_length; + let enqueued_slashes = self.enqueued_slashes.get(&processing_epoch); + if let Some(enqueued_slashes) = enqueued_slashes { + for (validator, slashes) in enqueued_slashes.iter() { + let val_stake = token::Amount::from_change( + self.validator_stakes + .get(&epoch) + .unwrap() + .get(validator) + .cloned() + .unwrap_or_default(), + ); + tracing::debug!( + "Val {} stake epoch {}: {}", + &validator, + epoch, + val_stake.to_string_native(), + ); + vp_frac_sum += Dec::from(slashes.len()) + * Dec::from(val_stake) + / Dec::from(consensus_stake); + } + } + } + let vp_frac_sum = cmp::min(Dec::one(), vp_frac_sum); + tracing::debug!("vp_frac_sum: {}", vp_frac_sum); + + cmp::min( + Dec::new(9, 0).unwrap() * vp_frac_sum * vp_frac_sum, + Dec::one(), + ) + } + + fn debug_validators(&self) { + let current_epoch = self.epoch; + for epoch in + Epoch::iter_bounds_inclusive(current_epoch, self.pipeline()) + { + let mut min_consensus = token::Amount::from(u64::MAX); + let consensus = self.consensus_set.get(&epoch).unwrap(); + for (amount, vals) in consensus { + if *amount < min_consensus { + min_consensus = *amount; + } + for val in vals { + let deltas_stake = self + .validator_stakes + .get(&epoch) + .unwrap() + .get(val) + .unwrap(); + let val_state = self + .validator_states + .get(&epoch) + .unwrap() + .get(val) + .unwrap(); + debug_assert_eq!( + *amount, + token::Amount::from_change(*deltas_stake) + ); + debug_assert_eq!(*val_state, ValidatorState::Consensus); + } + } + let mut max_bc = token::Amount::zero(); + let bc = self.below_capacity_set.get(&epoch).unwrap(); + for (amount, vals) in bc { + if token::Amount::from(*amount) > max_bc { + max_bc = token::Amount::from(*amount); + } + for val in vals { + let deltas_stake = self + .validator_stakes + .get(&epoch) + .unwrap() + .get(val) + .cloned() + .unwrap_or_default(); + let val_state = self + .validator_states + .get(&epoch) + .unwrap() + .get(val) + .unwrap(); + debug_assert_eq!( + token::Amount::from(*amount), + token::Amount::from_change(deltas_stake) + ); + debug_assert_eq!(*val_state, ValidatorState::BelowCapacity); + } + } + if max_bc > min_consensus { + tracing::debug!( + "min_consensus = {}, max_bc = {}", + min_consensus.to_string_native(), + max_bc.to_string_native() + ); + } + assert!(min_consensus >= max_bc); + + for addr in self.below_threshold_set.get(&epoch).unwrap() { + let state = self + .validator_states + .get(&epoch) + .unwrap() + .get(addr) + .unwrap(); + + assert_eq!(*state, ValidatorState::BelowThreshold); + } + + for addr in self + .validator_states + .get(&epoch) + .unwrap() + .keys() + .cloned() + .collect::>() + { + if let (None, None, false) = ( + self.is_in_consensus_w_info(&addr, epoch), + self.is_in_below_capacity_w_info(&addr, epoch), + self.is_in_below_threshold(&addr, epoch), + ) { + assert_eq!( + self.validator_states + .get(&epoch) + .unwrap() + .get(&addr) + .cloned(), + Some(ValidatorState::Jailed) + ); + } + } + } + } + + fn is_chained_redelegation( + unchainable_redelegations: &BTreeSet, + delegator: &Address, + src_validator: &Address, + ) -> bool { + unchainable_redelegations.contains(&BondId { + source: delegator.clone(), + validator: src_validator.clone(), + }) + } +} + +#[derive(Clone, Debug, Default)] +struct ValidatorRecords { + /// All records to a validator that contribute to its + /// [`ValidatorBonds::stake`]. For self-bonds the key is a validator + /// and for delegations a delegator. + per_source: BTreeMap, +} + +impl ValidatorRecords { + /// Validator's stake is a sum of bond amounts with any slashing applied. + fn stake(&self, epoch: Epoch) -> token::Amount { + let mut total = token::Amount::zero(); + for bonds in self.per_source.values() { + total += bonds.amount(epoch); + } + total + } + + /// Find how much slash rounding error at most can be tolerated for slashes + /// that were processed before or at the given epoch on a total validator's + /// stake vs sum of slashes on bond deltas, unbonded, withdrawn or + /// redelegated bonds. + /// + /// We allow `n - 1` slash rounding error for `n` number of slashes in + /// unique epochs for bonds, unbonds and withdrawals. The bond deltas, + /// unbonds and withdrawals are slashed individually and so their total + /// slashed may be more than the slash on a sum of total validator's + /// stake. + fn slash_round_err_tolerance(&self, epoch: Epoch) -> token::Amount { + let mut unique_count = 0_u64; + for record in self.per_source.values() { + unique_count += record.num_of_slashes(epoch); + } + token::Amount::from(unique_count.checked_sub(1).unwrap_or_default()) + } +} + +#[derive(Clone, Debug, Default)] +struct Records { + /// Key is a bond start epoch (when it first contributed to voting power) + /// The value contains the sum of all the bonds started at the same epoch. + bonds: BTreeMap, + /// Withdrawn tokens in the epoch + withdrawn: BTreeMap, +} + +impl Records { + /// Sum of bond amounts with any slashes that were processed before or at + /// the given epoch applied. + fn amount(&self, epoch: Epoch) -> token::Amount { + let Records { + bonds, + withdrawn: _, + } = self; + let mut total = token::Amount::zero(); + for (&start, bond) in bonds { + if start <= epoch { + // Bonds + total += bond.tokens.amount; + // Add back any slashes that were processed after the given + // epoch + total += bond.tokens.slashes_sum_after_epoch(epoch); + + for (&end, unbond) in &bond.unbonds { + if end >= epoch { + // Unbonds + total += unbond.tokens.amount; + total += unbond.tokens.slashes_sum_after_epoch(epoch); + + // Unbonded incoming redelegations + for redelegs in unbond.incoming_redelegs.values() { + for tokens in redelegs.tokens.values() { + total += tokens.amount; + total += tokens.slashes_sum_after_epoch(epoch); + } + } + } + } + + // Outgoing redelegations + for (&end, redelegs) in &bond.outgoing_redelegs { + if end >= epoch { + for tokens in redelegs.values() { + total += tokens.amount; + total += tokens.slashes_sum_after_epoch(epoch); + } + } + } + + // Incoming redelegations + for redelegs in bond.incoming_redelegs.values() { + for tokens in redelegs.tokens.values() { + total += tokens.amount; + total += tokens.slashes_sum_after_epoch(epoch); + } + } + } + } + total + } + + fn slash( + &mut self, + rate: Dec, + infraction_epoch: Epoch, + processing_epoch: Epoch, + ) { + for (&start, bond) in self.bonds.iter_mut() { + if start <= infraction_epoch { + bond.slash(rate, infraction_epoch, processing_epoch); + + for (&end, unbond) in bond.unbonds.iter_mut() { + if end >= infraction_epoch { + unbond.slash(rate, infraction_epoch, processing_epoch); + } + } + } + } + } + + fn subtract_redelegation_slash( + &mut self, + src_validator: &Address, + src_bond_start: Epoch, + redelegation_start: Epoch, + mut to_sub: TokensSlash, + processing_epoch: Epoch, + ) { + // Slash redelegation destination on the next epoch + let slash_epoch = processing_epoch.next(); + let bond = self.bonds.get_mut(&redelegation_start).unwrap(); + for unbond in bond.unbonds.values_mut() { + if let Some(redeleg) = + unbond.incoming_redelegs.get_mut(src_validator) + { + if let Some(tokens) = redeleg.tokens.get_mut(&src_bond_start) { + if tokens.amount >= to_sub.amount { + tokens.amount -= to_sub.amount; + *tokens.slashes.entry(slash_epoch).or_default() += + to_sub; + return; + } else { + to_sub.amount -= tokens.amount; + *tokens.slashes.entry(slash_epoch).or_default() += + TokensSlash { + amount: tokens.amount, + rate: to_sub.rate, + }; + tokens.amount = token::Amount::zero(); + } + } + } + } + let redeleg = bond.incoming_redelegs.get_mut(src_validator).unwrap(); + if let Some(tokens) = redeleg.tokens.get_mut(&src_bond_start) { + tokens.amount -= to_sub.amount; + *tokens.slashes.entry(slash_epoch).or_default() += to_sub; + } else { + debug_assert!(to_sub.amount.is_zero()); + } + } + + /// Find how much slash rounding error at most can be tolerated for slashes + /// that were processed before or at the given epoch on a bond's amount vs + /// sum of slashes on bond deltas, unbonded, withdrawn or redelegated + /// bonds. + /// + /// We allow `n - 1` slash rounding error for `n` number of slashes (`fn + /// num_of_slashes`) in unique epochs for bonds, unbonds and + /// withdrawals. The bond deltas, unbonds and withdrawals are slashed + /// individually and so their total slashed may be more than the slash + /// on a sum of a bond's total amount. + fn slash_round_err_tolerance(&self, epoch: Epoch) -> token::Amount { + token::Amount::from( + self.num_of_slashes(epoch) + .checked_sub(1) + .unwrap_or_default(), + ) + } + + /// Get the number of slashes in unique epochs that were processed before or + /// at the given epoch for all bonds, unbonds, redelegs, unbonded redelegs + /// and withdrawn tokens. + fn num_of_slashes(&self, epoch: Epoch) -> u64 { + let mut unique_count = 0_u64; + for bond in self.bonds.values() { + unique_count += bond.tokens.num_of_slashes(epoch); + for redeleg in bond.incoming_redelegs.values() { + for tokens in redeleg.tokens.values() { + unique_count += tokens.num_of_slashes(epoch); + } + } + for unbond in bond.unbonds.values() { + unique_count += unbond.tokens.num_of_slashes(epoch); + for redeleg in unbond.incoming_redelegs.values() { + for tokens in redeleg.tokens.values() { + unique_count += tokens.num_of_slashes(epoch); + } + } + } + } + for withdrawn in self.withdrawn.values() { + unique_count += withdrawn.num_of_slashes(epoch); + } + unique_count + } +} + +#[derive(Clone, Debug, Default)] +struct Bond { + /// Bonded amount is the amount that's been bonded originally, reduced by + /// unbonding or slashing, if any. Incoming redelegations are recorded + /// separately. + tokens: TokensWithSlashes, + /// Incoming redelegations contribute to the stake of this validator. + /// Their sum is not included in the `tokens` field. + incoming_redelegs: BTreeMap, + /// Key is end epoch in which the unbond last contributed to stake of the + /// validator. + unbonds: BTreeMap, + /// The outer key is an end epoch of the redelegated bond in which the bond + /// last contributed to voting power of this validator (the source). The + /// inner key is the redelegation destination validator. + /// + /// After a redelegation a bond transferred to destination validator is + /// liable for slashes on a source validator (key in the map) from the + /// Bond's `start` to key's `end` epoch. + outgoing_redelegs: BTreeMap>, +} + +impl Bond { + fn slash( + &mut self, + rate: Dec, + infraction_epoch: Epoch, + processing_epoch: Epoch, + ) { + self.tokens.slash(rate, infraction_epoch, processing_epoch); + for (_src, redeleg) in self.incoming_redelegs.iter_mut() { + for tokens in redeleg.tokens.values_mut() { + tokens.slash(rate, infraction_epoch, processing_epoch); + } + } + } +} + +#[derive(Clone, Debug, Default)] +struct IncomingRedeleg { + /// Total amount with all slashes keyed by redelegation source bond start + tokens: BTreeMap, +} +impl IncomingRedeleg { + /// Get the token amount before any slashes that were processed after the + /// redelegation epoch. + fn amount_before_slashing_after_redeleg( + &self, + redeleg_epoch: Epoch, + ) -> token::Amount { + self.tokens + .values() + .map(|tokens| { + tokens.amount_before_slashing_after_redeleg(redeleg_epoch) + }) + .sum() + } + + // Get the token amount before any slashing. + fn amount_before_slashing(&self) -> token::Amount { + self.tokens + .values() + .map(TokensWithSlashes::amount_before_slashing) + .sum() + } +} + +#[derive(Clone, Debug, Default, PartialEq)] +struct TokensWithSlashes { + /// Token amount after any applicable slashing + amount: token::Amount, + /// Total amount that's been slashed associated with the epoch in which the + /// slash was processed. + slashes: BTreeMap, +} + +#[derive(Clone, Debug, Default, PartialEq)] +struct TokensSlash { + amount: token::Amount, + rate: Dec, +} + +impl AddAssign for TokensSlash { + fn add_assign(&mut self, rhs: Self) { + self.amount += rhs.amount; + // Cap the rate at 1 + self.rate = cmp::min(Dec::one(), self.rate + rhs.rate); + } +} + +impl TokensWithSlashes { + /// Slash on original amount before slashes that were processed after the + /// infraction epoch. Returns the slashed amount. + fn slash( + &mut self, + rate: Dec, + infraction_epoch: Epoch, + processing_epoch: Epoch, + ) -> token::Amount { + // Add back slashes to slashable amount that didn't affect this epoch + // (applied after infraction epoch) + let slashable_amount = + self.amount + self.slashes_sum_after_epoch(infraction_epoch); + let amount = cmp::min(slashable_amount.mul_ceil(rate), self.amount); + if !amount.is_zero() { + self.amount -= amount; + let slash = self.slashes.entry(processing_epoch).or_default(); + *slash += TokensSlash { amount, rate }; + } + amount + } + + /// Add the given slashes at their epochs. + fn add_slashes(&mut self, slashes: &BTreeMap) { + for (&epoch, slash) in slashes { + *self.slashes.entry(epoch).or_default() += slash.clone(); + } + } + + /// Subtract the given slash amount in order of the epochs. Returns the + /// removed slashes. + fn subtract_slash( + &mut self, + mut to_slash: token::Amount, + ) -> BTreeMap { + let mut removed = BTreeMap::new(); + self.slashes.retain(|&epoch, slash| { + if to_slash.is_zero() { + return true; + } + if slash.amount > to_slash { + slash.amount -= to_slash; + removed.insert( + epoch, + TokensSlash { + amount: to_slash, + rate: slash.rate, + }, + ); + to_slash = token::Amount::zero(); + true + } else { + to_slash -= slash.amount; + removed.insert(epoch, slash.clone()); + false + } + }); + removed + } + + /// Get the token amount before any slashing. + fn amount_before_slashing(&self) -> token::Amount { + self.amount + self.slashes_sum() + } + + /// Get the token amount before any slashes that were processed after the + /// redelegation epoch. + fn amount_before_slashing_after_redeleg( + &self, + redeleg_epoch: Epoch, + ) -> token::Amount { + let mut amount = self.amount; + for (&processed_epoch, slash) in &self.slashes { + if processed_epoch > redeleg_epoch { + amount += slash.amount; + } + } + amount + } + + /// Get a sum of all slash amounts. + fn slashes_sum(&self) -> token::Amount { + self.slashes + .values() + .map(|TokensSlash { amount, rate: _ }| *amount) + .sum() + } + + /// Get a sum of all slash rates, capped at 1. + fn slash_rates_sum(&self) -> Dec { + cmp::min( + Dec::one(), + self.slashes + .values() + .map(|TokensSlash { amount: _, rate }| *rate) + .sum(), + ) + } + + /// Get a sum of all slashes that were processed after the given epoch. + fn slashes_sum_after_epoch(&self, epoch: Epoch) -> token::Amount { + let mut sum = token::Amount::zero(); + for (&processed_epoch, slash) in &self.slashes { + if processed_epoch > epoch { + sum += slash.amount; + } + } + sum + } + + /// Is the sum of tokens and slashed tokens zero? I.e. Are there no tokens? + fn is_zero(&self) -> bool { + self.amount.is_zero() && self.slashes_sum().is_zero() + } + + /// Get the number of slashes in unique epochs that were processed before or + /// at the given epoch. + fn num_of_slashes(&self, epoch: Epoch) -> u64 { + self.slashes + .keys() + .filter(|&&processed| processed <= epoch) + .count() as u64 + } +} + +#[derive(Clone, Debug, Default)] +struct Unbond { + /// A first epoch from which the unbond is withdrawable. + withdrawable_epoch: Epoch, + /// Bonded amount is the amount that's been bonded originally, reduced by + /// unbonding or slashing, if any. + tokens: TokensWithSlashes, + incoming_redelegs: BTreeMap, +} + +impl Unbond { + /// Get the total unbonded amount before slashing, including any unbonded + /// redelegations. + fn amount_before_slashing(&self) -> token::Amount { + self.tokens.amount_before_slashing() + + self + .incoming_redelegs + .iter() + .fold(token::Amount::zero(), |acc, (_src, redeleg)| { + acc + redeleg.amount_before_slashing() + }) + } + + fn slash( + &mut self, + rate: Dec, + infraction_epoch: Epoch, + processing_epoch: Epoch, + ) { + self.tokens.slash(rate, infraction_epoch, processing_epoch); + for (_src, redeleg) in self.incoming_redelegs.iter_mut() { + for tokens in redeleg.tokens.values_mut() { + tokens.slash(rate, infraction_epoch, processing_epoch); + } + } + } +} + +/// The PoS system under test +#[derive(Derivative)] +#[derivative(Debug)] +struct ConcretePosState { + /// Storage - contains all the PoS state + s: TestWlStorage, + /// Last reference state in debug format to print changes after transitions + #[derivative(Debug = "ignore")] + last_state_diff: DbgPrintDiff, +} + +/// State machine transitions +#[allow(clippy::large_enum_variant)] +#[derive(Clone, Derivative)] +#[derivative(Debug)] +enum Transition { + NextEpoch, + InitValidator { + address: Address, + #[derivative(Debug = "ignore")] + consensus_key: PublicKey, + #[derivative(Debug = "ignore")] + eth_cold_key: PublicKey, + #[derivative(Debug = "ignore")] + eth_hot_key: PublicKey, + commission_rate: Dec, + max_commission_rate_change: Dec, + }, + Bond { + id: BondId, + amount: token::Amount, + }, + Unbond { + id: BondId, + amount: token::Amount, + }, + Withdraw { + id: BondId, + }, + Redelegate { + /// A chained redelegation must fail + is_chained: bool, + id: BondId, + new_validator: Address, + amount: token::Amount, + }, + Misbehavior { + address: Address, + slash_type: SlashType, + infraction_epoch: Epoch, + height: u64, + }, + UnjailValidator { + address: Address, + }, +} + +impl StateMachineTest for ConcretePosState { + type Reference = AbstractPosState; + type SystemUnderTest = Self; + + fn init_test( + initial_state: &::State, + ) -> Self::SystemUnderTest { + tracing::debug!("New test case"); + tracing::debug!( + "Genesis validators: {:#?}", + initial_state + .genesis_validators + .iter() + .map(|val| &val.address) + .collect::>() + ); + let mut s = TestWlStorage::default(); + crate::init_genesis( + &mut s, + &initial_state.params, + initial_state.genesis_validators.clone().into_iter(), + initial_state.epoch, + ) + .unwrap(); + let last_state_diff = DbgPrintDiff::new().store(initial_state); + Self { s, last_state_diff } + } + + fn apply( + mut state: Self::SystemUnderTest, + ref_state: &::State, + transition: ::Transition, + ) -> Self::SystemUnderTest { + tracing::debug!( + "{} {:#?}", + Paint::green("Transition").underline(), + Paint::yellow(&transition) + ); + + if false { + // NOTE: enable to capture and print ref state diff + let new_diff = + state.last_state_diff.print_diff_and_store(ref_state); + state.last_state_diff = new_diff; + } + + pause_for_enter(); + + let params = crate::read_pos_params(&state.s).unwrap(); + let pos_balance = read_balance( + &state.s, + &state.s.storage.native_token, + &crate::ADDRESS, + ) + .unwrap(); + tracing::debug!("PoS balance: {}", pos_balance.to_string_native()); + match transition { + Transition::NextEpoch => { + tracing::debug!("\nCONCRETE Next epoch"); + super::advance_epoch(&mut state.s, ¶ms); + + // Need to apply some slashing + let current_epoch = state.s.storage.block.epoch; + super::process_slashes(&mut state.s, current_epoch).unwrap(); + + let params = read_pos_params(&state.s).unwrap(); + state.check_next_epoch_post_conditions(¶ms); + } + Transition::InitValidator { + address, + consensus_key, + eth_cold_key, + eth_hot_key, + commission_rate, + max_commission_rate_change, + } => { + tracing::debug!("\nCONCRETE Init validator"); + let current_epoch = state.current_epoch(); + + super::become_validator(super::BecomeValidator { + storage: &mut state.s, + params: ¶ms, + address: &address, + consensus_key: &consensus_key, + eth_cold_key: ð_cold_key, + eth_hot_key: ð_hot_key, + current_epoch, + commission_rate, + max_commission_rate_change, + }) + .unwrap(); + + let params = read_pos_params(&state.s).unwrap(); + state.check_init_validator_post_conditions( + current_epoch, + ¶ms, + &address, + ) + } + Transition::Bond { id, amount } => { + tracing::debug!("\nCONCRETE Bond"); + let current_epoch = state.current_epoch(); + let pipeline = current_epoch + params.pipeline_len; + let validator_stake_before_bond_cur = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + let validator_stake_before_bond_pipeline = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + + // Credit tokens to ensure we can apply the bond + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + token::credit_tokens( + &mut state.s, + &native_token, + &id.source, + amount, + ) + .unwrap(); + + let src_balance_pre = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // This must be ensured by both transitions generator and + // pre-conditions! + assert!( + crate::is_validator(&state.s, &id.validator).unwrap(), + "{} is not a validator", + id.validator + ); + + // Apply the bond + super::bond_tokens( + &mut state.s, + Some(&id.source), + &id.validator, + amount, + current_epoch, + ) + .unwrap(); + + let params = read_pos_params(&state.s).unwrap(); + state.check_bond_post_conditions( + current_epoch, + ¶ms, + id.clone(), + amount, + validator_stake_before_bond_cur, + validator_stake_before_bond_pipeline, + ); + + let src_balance_post = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // Post-condition: PoS balance should increase + assert!(pos_balance_pre < pos_balance_post); + // Post-condition: The difference in PoS balance should be the + // same as in the source + assert_eq!( + pos_balance_post - pos_balance_pre, + src_balance_pre - src_balance_post + ); + } + Transition::Unbond { id, amount } => { + tracing::debug!("\nCONCRETE Unbond"); + let current_epoch = state.current_epoch(); + let pipeline = current_epoch + params.pipeline_len; + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + let src_balance_pre = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + let validator_stake_before_unbond_cur = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + let validator_stake_before_unbond_pipeline = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + + // Apply the unbond + super::unbond_tokens( + &mut state.s, + Some(&id.source), + &id.validator, + amount, + current_epoch, + false, + ) + .unwrap(); + + let params = read_pos_params(&state.s).unwrap(); + state.check_unbond_post_conditions( + current_epoch, + ¶ms, + id.clone(), + amount, + validator_stake_before_unbond_cur, + validator_stake_before_unbond_pipeline, + ); + + let src_balance_post = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // Post-condition: PoS balance should not change + assert_eq!(pos_balance_pre, pos_balance_post); + // Post-condition: Source balance should not change + assert_eq!(src_balance_post, src_balance_pre); + + // Check that the bonds are the same + // let abs_bonds = ref_state.bonds.get(&id).cloned().unwrap(); + // let conc_bonds = crate::bond_handle(&id.source, + // &id.validator) .get_data_handler() + // .collect_map(&state.s) + // .unwrap(); + // assert_eq!(abs_bonds, conc_bonds); + + // // Check that the unbond records are the same + // // TODO: figure out how we get entries with 0 amount in the + // // abstract version (and prevent) + // let mut abs_unbond_records = ref_state + // .unbond_records + // .get(&id.validator) + // .cloned() + // .unwrap(); + // abs_unbond_records.retain(|_, inner_map| { + // inner_map.retain(|_, value| !value.is_zero()); + // !inner_map.is_empty() + // }); + // let conc_unbond_records = + // crate::total_unbonded_handle(&id.validator) + // .collect_map(&state.s) + // .unwrap(); + // assert_eq!(abs_unbond_records, conc_unbond_records); + } + Transition::Withdraw { + id: BondId { source, validator }, + } => { + tracing::debug!("\nCONCRETE Withdraw"); + let current_epoch = state.current_epoch(); + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + // TODO: add back when slash pool is being used again + // let slash_pool = address::POS_SLASH_POOL; + let src_balance_pre = + token::read_balance(&state.s, &native_token, &source) + .unwrap(); + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + // let slash_balance_pre = + // token::read_balance(&state.s, &native_token, &slash_pool) + // .unwrap(); + + // Apply the withdrawal + let withdrawn = super::withdraw_tokens( + &mut state.s, + Some(&source), + &validator, + current_epoch, + ) + .unwrap(); + + let src_balance_post = + token::read_balance(&state.s, &native_token, &source) + .unwrap(); + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + // let slash_balance_post = + // token::read_balance(&state.s, &native_token, &slash_pool) + // .unwrap(); + + // Post-condition: PoS balance should decrease or not change if + // nothing was withdrawn + assert!(pos_balance_pre >= pos_balance_post); + + // Post-condition: The difference in PoS balance should be equal + // to the sum of the difference in the source and the difference + // in the slash pool + // TODO: needs slash pool + // assert_eq!( + // pos_balance_pre - pos_balance_post, + // src_balance_post - src_balance_pre + slash_balance_post + // - slash_balance_pre + // ); + + // Post-condition: The increment in source balance should be + // equal to the withdrawn amount + assert_eq!(src_balance_post - src_balance_pre, withdrawn); + + // Post-condition: The amount withdrawn must match reference + // state withdrawal + let records = ref_state.records(&validator, &source).unwrap(); + let max_slash_round_err = + records.slash_round_err_tolerance(current_epoch); + let ref_withdrawn = + records.withdrawn.get(¤t_epoch).unwrap().amount; + assert!( + ref_withdrawn <= withdrawn + && withdrawn <= ref_withdrawn + max_slash_round_err, + "Expected to withdraw from validator {validator} owner \ + {source} amount {} ({}), but withdrawn {}.", + ref_withdrawn.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +{}", + max_slash_round_err.to_string_native() + ) + }, + withdrawn.to_string_native(), + ); + } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + tracing::debug!("\nCONCRETE Redelegate"); + + let current_epoch = state.current_epoch(); + let pipeline = current_epoch + params.pipeline_len; + + // Read data prior to applying the transition + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // Read validator's redelegations and bonds to find how much of + // them is slashed + let mut amount_after_slash = token::Amount::zero(); + let mut to_redelegate = amount; + + let redelegations_handle = + delegator_redelegated_bonds_handle(&id.source) + .at(&id.validator); + + let bonds: Vec> = + bond_handle(&id.source, &id.validator) + .get_data_handler() + .iter(&state.s) + .unwrap() + .collect(); + 'bonds_loop: for res in bonds.into_iter().rev() { + let (bond_start, bond_delta) = res.unwrap(); + + // Find incoming redelegations at this bond start epoch as a + // redelegation end epoch (the epoch in which it stopped to + // contributing to src) + let redeleg_end = bond_start; + let redeleg_start = + params.redelegation_start_epoch_from_end(redeleg_end); + let redelegations: Vec<_> = redelegations_handle + .at(&redeleg_end) + .iter(&state.s) + .unwrap() + .collect(); + // Iterate incoming redelegations first + for res in redelegations.into_iter().rev() { + let ( + NestedSubKey::Data { + key: src_validator, + nested_sub_key: + SubKey::Data(redeleg_src_bond_start), + }, + delta, + ) = res.unwrap(); + + // Apply slashes on this delta, if any + let mut this_amount_after_slash = delta; + + // Find redelegation source validator's slashes + let slashes = find_slashes_in_range( + &state.s, + redeleg_src_bond_start, + Some(redeleg_end), + &src_validator, + ) + .unwrap(); + for (slash_epoch, rate) in slashes { + // Only apply slashes that weren't processed before + // redelegation as those are applied eagerly + if slash_epoch + + params.slash_processing_epoch_offset() + > redeleg_start + { + let slash = delta.mul_ceil(rate); + this_amount_after_slash = + this_amount_after_slash + .checked_sub(slash) + .unwrap_or_default(); + } + } + // Find redelegation destination validator's slashes + let slashes = find_slashes_in_range( + &state.s, + redeleg_end, + None, + &id.validator, + ) + .unwrap(); + for (_slash_epoch, rate) in slashes { + let slash = delta.mul_ceil(rate); + this_amount_after_slash = this_amount_after_slash + .checked_sub(slash) + .unwrap_or_default(); + } + + if to_redelegate >= delta { + amount_after_slash += this_amount_after_slash; + to_redelegate -= delta; + } else { + // We have to divide this bond in case there are + // slashes + let slash_ratio = + Dec::from(this_amount_after_slash) + / Dec::from(delta); + amount_after_slash += slash_ratio * to_redelegate; + to_redelegate = token::Amount::zero(); + } + + if to_redelegate.is_zero() { + break 'bonds_loop; + } + } + + // Then if there's still something to redelegate, unbond the + // regular bonds + if !to_redelegate.is_zero() { + // Apply slashes on this bond delta, if any + let mut this_amount_after_slash = bond_delta; + + // Find validator's slashes + let slashes = find_slashes_in_range( + &state.s, + bond_start, + None, + &id.validator, + ) + .unwrap(); + for (_slash_epoch, rate) in slashes { + let slash = bond_delta.mul_ceil(rate); + this_amount_after_slash = this_amount_after_slash + .checked_sub(slash) + .unwrap_or_default(); + } + + if to_redelegate >= bond_delta { + amount_after_slash += this_amount_after_slash; + to_redelegate -= bond_delta; + } else { + // We have to divide this bond in case there are + // slashes + let slash_ratio = + Dec::from(this_amount_after_slash) + / Dec::from(bond_delta); + amount_after_slash += slash_ratio * to_redelegate; + to_redelegate = token::Amount::zero(); + } + if to_redelegate.is_zero() { + break; + } + } + } + + // Read src validator stakes + let src_validator_stake_cur_pre = crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + let src_validator_stake_pipeline_pre = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + + // Read dest validator stakes + let dest_validator_stake_cur_pre = crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + current_epoch, + ) + .unwrap(); + let dest_validator_stake_pipeline_pre = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + pipeline, + ) + .unwrap(); + + // Find delegations + let delegations_pre = + crate::find_delegations(&state.s, &id.source, &pipeline) + .unwrap(); + + // Apply redelegation + let result = redelegate_tokens( + &mut state.s, + &id.source, + &id.validator, + &new_validator, + current_epoch, + amount, + ); + + if !amount.is_zero() && is_chained { + assert!(result.is_err()); + let err = result.unwrap_err(); + let err_str = err.to_string(); + assert_matches!( + err.downcast::().unwrap().deref(), + RedelegationError::IsChainedRedelegation, + "A chained redelegation must be rejected, got \ + {err_str}", + ); + } else { + result.unwrap(); + + // Post-condition: PoS balance is unchanged + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos) + .unwrap(); + assert_eq!(pos_balance_pre, pos_balance_post); + + // Post-condition: Source validator stake at current epoch + // is unchanged + let src_validator_stake_cur_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + assert_eq!( + src_validator_stake_cur_pre, + src_validator_stake_cur_post + ); + + // Post-condition: Source validator stake at pipeline epoch + // is reduced by the redelegation amount + + // TODO: shouldn't this be reduced by the redelegation + // amount post-slashing tho? + // NOTE: We changed it to reduce it, check again later + let src_validator_stake_pipeline_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + let max_slash_round_err = ref_state + .validator_records + .get(&id.validator) + .map(|r| r.slash_round_err_tolerance(current_epoch)) + .unwrap_or_default(); + let expected_new_stake = src_validator_stake_pipeline_pre + .checked_sub(amount_after_slash) + .unwrap_or_default(); + assert!( + src_validator_stake_pipeline_post + <= expected_new_stake + max_slash_round_err + && expected_new_stake + <= src_validator_stake_pipeline_post + + max_slash_round_err, + "Expected src validator {} stake after redelegation \ + at pipeline to be equal to {} ({}), got {}.", + id.validator, + expected_new_stake.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +-{}", + max_slash_round_err.to_string_native() + ) + }, + src_validator_stake_pipeline_post.to_string_native() + ); + + // Post-condition: Destination validator stake at current + // epoch is unchanged + let dest_validator_stake_cur_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + current_epoch, + ) + .unwrap(); + assert_eq!( + dest_validator_stake_cur_pre, + dest_validator_stake_cur_post + ); + + // Post-condition: Destination validator stake at pipeline + // epoch is increased by the redelegation amount, less any + // slashes + let expected_new_stake = + dest_validator_stake_pipeline_pre + amount_after_slash; + let dest_validator_stake_pipeline_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + pipeline, + ) + .unwrap(); + assert!( + expected_new_stake + <= dest_validator_stake_pipeline_post + + max_slash_round_err + && dest_validator_stake_pipeline_post + <= expected_new_stake + max_slash_round_err, + "Expected dest validator {} stake after redelegation \ + at pipeline to be equal to {} ({}), got {}.", + new_validator, + expected_new_stake.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +-{}", + max_slash_round_err.to_string_native() + ) + }, + dest_validator_stake_pipeline_post.to_string_native() + ); + + // Post-condition: The difference at pipeline in src + // validator stake is equal to negative difference in dest + // validator. + assert_eq!( + src_validator_stake_pipeline_pre + - src_validator_stake_pipeline_post, + dest_validator_stake_pipeline_post + - dest_validator_stake_pipeline_pre + ); + + // Post-condition: The delegator's delegations should be + // updated with redelegation. For the source reduced by the + // redelegation amount and for the destination increased by + // the redelegation amount, less any slashes. + let delegations_post = crate::find_delegations( + &state.s, &id.source, &pipeline, + ) + .unwrap(); + let src_delegation_pre = delegations_pre + .get(&id.validator) + .cloned() + .unwrap_or_default(); + let src_delegation_post = delegations_post + .get(&id.validator) + .cloned() + .unwrap_or_default(); + assert_eq!( + src_delegation_pre - src_delegation_post, + amount + ); + let dest_delegation_pre = delegations_pre + .get(&new_validator) + .cloned() + .unwrap_or_default(); + let dest_delegation_post = delegations_post + .get(&new_validator) + .cloned() + .unwrap_or_default(); + let dest_delegation_diff = + dest_delegation_post - dest_delegation_pre; + assert!( + amount_after_slash + <= dest_delegation_diff + max_slash_round_err + && dest_delegation_diff + <= amount_after_slash + max_slash_round_err, + "Expected redelegation by {} to be increased by to {} \ + ({}), but it increased by {}.", + id.source, + amount_after_slash.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +-{}", + max_slash_round_err.to_string_native() + ) + }, + dest_delegation_diff.to_string_native(), + ); + } + } + Transition::Misbehavior { + address, + slash_type, + infraction_epoch, + height, + } => { + tracing::debug!("\nCONCRETE Misbehavior"); + let current_epoch = state.current_epoch(); + // Record the slash evidence + super::slash( + &mut state.s, + ¶ms, + current_epoch, + infraction_epoch, + height, + slash_type, + &address, + current_epoch.next(), + ) + .unwrap(); + + // Apply some post-conditions + let params = read_pos_params(&state.s).unwrap(); + state.check_misbehavior_post_conditions( + ¶ms, + current_epoch, + infraction_epoch, + slash_type, + &address, + ); + + // TODO: Any others? + } + Transition::UnjailValidator { address } => { + tracing::debug!("\nCONCRETE UnjailValidator"); + let current_epoch = state.current_epoch(); + + // Unjail the validator + super::unjail_validator(&mut state.s, &address, current_epoch) + .unwrap(); + + // Post-conditions + let params = read_pos_params(&state.s).unwrap(); + state.check_unjail_validator_post_conditions(¶ms, &address); + } + } + state + } + + fn check_invariants( + state: &Self::SystemUnderTest, + ref_state: &::State, + ) { + let current_epoch = state.current_epoch(); + let params = read_pos_params(&state.s).unwrap(); + state.check_global_post_conditions(¶ms, current_epoch, ref_state); + } +} + +impl ConcretePosState { + fn current_epoch(&self) -> Epoch { + self.s.storage.block.epoch + } + + fn check_next_epoch_post_conditions(&self, params: &PosParams) { + let pipeline = self.current_epoch() + params.pipeline_len; + let before_pipeline = pipeline.prev(); + + // Post-condition: Consensus validator sets at pipeline offset + // must be the same as at the epoch before it. + let consensus_set_before_pipeline = + crate::read_consensus_validator_set_addresses_with_stake( + &self.s, + before_pipeline, + ) + .unwrap(); + let consensus_set_at_pipeline = + crate::read_consensus_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + itertools::assert_equal( + consensus_set_before_pipeline.into_iter().sorted(), + consensus_set_at_pipeline.into_iter().sorted(), + ); + + // Post-condition: Below-capacity validator sets at pipeline + // offset must be the same as at the epoch before it. + let below_cap_before_pipeline = + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, + before_pipeline, + ) + .unwrap(); + let below_cap_at_pipeline = + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + itertools::assert_equal( + below_cap_before_pipeline.into_iter().sorted(), + below_cap_at_pipeline.into_iter().sorted(), + ); + + // TODO: post-conditions for processing of slashes, just throwing things + // here atm + let slashed_validators = enqueued_slashes_handle() + .at(&self.current_epoch()) + .iter(&self.s) + .unwrap() + .map(|a| { + let ( + NestedSubKey::Data { + key: address, + nested_sub_key: _, + }, + _b, + ) = a.unwrap(); + address + }) + .collect::>(); + + for validator in &slashed_validators { + assert!( + !validator_slashes_handle(validator) + .is_empty(&self.s) + .unwrap() + ); + assert_eq!( + validator_state_handle(validator) + .get(&self.s, self.current_epoch(), params) + .unwrap(), + Some(ValidatorState::Jailed) + ); + } + } + + fn check_bond_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + id: BondId, + amount: token::Amount, + validator_stake_before_bond_cur: token::Amount, + validator_stake_before_bond_pipeline: token::Amount, + ) { + let pipeline = submit_epoch + params.pipeline_len; + + let cur_stake = super::read_validator_stake( + &self.s, + params, + &id.validator, + submit_epoch, + ) + .unwrap(); + + // Post-condition: the validator stake at the current epoch should not + // change + assert_eq!(cur_stake, validator_stake_before_bond_cur); + + let stake_at_pipeline = super::read_validator_stake( + &self.s, + params, + &id.validator, + pipeline, + ) + .unwrap(); + + // Post-condition: the validator stake at the pipeline should be + // incremented by the bond amount + assert_eq!( + stake_at_pipeline, + validator_stake_before_bond_pipeline + amount + ); + + self.check_bond_and_unbond_post_conditions( + submit_epoch, + params, + id, + stake_at_pipeline, + ); + } + + fn check_unbond_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + id: BondId, + amount: token::Amount, + validator_stake_before_unbond_cur: token::Amount, + validator_stake_before_unbond_pipeline: token::Amount, + ) { + let pipeline = submit_epoch + params.pipeline_len; + + let cur_stake = super::read_validator_stake( + &self.s, + params, + &id.validator, + submit_epoch, + ) + .unwrap(); + + // Post-condition: the validator stake at the current epoch should not + // change + assert_eq!(cur_stake, validator_stake_before_unbond_cur); + + let stake_at_pipeline = super::read_validator_stake( + &self.s, + params, + &id.validator, + pipeline, + ) + .unwrap(); + + // Post-condition: the validator stake at the pipeline should be + // decremented at most by the bond amount (because slashing can reduce + // the actual amount unbonded) + // + // TODO: is this a weak assertion here? Seems cumbersome to calculate + // the exact amount considering the slashing applied can be complicated + assert!( + stake_at_pipeline + >= validator_stake_before_unbond_pipeline + .checked_sub(amount) + .unwrap_or_default() + ); + + self.check_bond_and_unbond_post_conditions( + submit_epoch, + params, + id, + stake_at_pipeline, + ); + } + + /// These post-conditions apply to bonding and unbonding + fn check_bond_and_unbond_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + id: BondId, + stake_at_pipeline: token::Amount, + ) { + let pipeline = submit_epoch + params.pipeline_len; + // Read the consensus sets data using iterator + let num_in_consensus = crate::consensus_validator_set_handle() + .at(&pipeline) + .iter(&self.s) + .unwrap() + .map(|res| res.unwrap()) + .filter(|(_keys, addr)| addr == &id.validator) + .count(); + + let num_in_below_cap = crate::below_capacity_validator_set_handle() + .at(&pipeline) + .iter(&self.s) + .unwrap() + .map(|res| res.unwrap()) + .filter(|(_keys, addr)| addr == &id.validator) + .count(); + + let num_in_below_thresh = + read_below_threshold_validator_set_addresses(&self.s, pipeline) + .unwrap() + .into_iter() + .filter(|addr| addr == &id.validator) + .count(); + + let num_occurrences = + num_in_consensus + num_in_below_cap + num_in_below_thresh; + let validator_is_jailed = crate::validator_state_handle(&id.validator) + .get(&self.s, pipeline, params) + .unwrap() + == Some(ValidatorState::Jailed); + + // Post-condition: There must only be one instance of this validator in + // the consensus + below-cap sets with some stake across all + // validator sets, OR there are no instances and this validator is + // jailed + assert!( + num_occurrences == 1 + || (num_occurrences == 0 && validator_is_jailed) + ); + + let consensus_set = + crate::read_consensus_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + let below_cap_set = + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + let below_thresh_set = + crate::read_below_threshold_validator_set_addresses( + &self.s, pipeline, + ) + .unwrap(); + let weighted = WeightedValidator { + bonded_stake: stake_at_pipeline, + address: id.validator, + }; + let consensus_val = consensus_set.get(&weighted); + let below_cap_val = below_cap_set.get(&weighted); + let below_thresh_val = below_thresh_set.get(&weighted.address); + + // Post-condition: The validator should be updated in exactly once in + // the validator sets + let jailed_condition = validator_is_jailed + && consensus_val.is_none() + && below_cap_val.is_none() + && below_thresh_val.is_none(); + + let mut num_sets = i32::from(consensus_val.is_some()); + num_sets += i32::from(below_cap_val.is_some()); + num_sets += i32::from(below_thresh_val.is_some()); + + assert!(num_sets == 1 || jailed_condition); + + // Post-condition: The stake of the validators in the consensus set is + // greater than or equal to below-capacity validators + for WeightedValidator { + bonded_stake: consensus_stake, + address: consensus_addr, + } in consensus_set.iter() + { + for WeightedValidator { + bonded_stake: below_cap_stake, + address: below_cap_addr, + } in below_cap_set.iter() + { + assert!( + consensus_stake >= below_cap_stake, + "Consensus validator {consensus_addr} with stake {} and \ + below-capacity {below_cap_addr} with stake {} should be \ + swapped.", + consensus_stake.to_string_native(), + below_cap_stake.to_string_native() + ); + } + } + } + + fn check_init_validator_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + address: &Address, + ) { + let pipeline = submit_epoch + params.pipeline_len; + + // Post-condition: the validator should not be in the validator set + // until the pipeline epoch + for epoch in submit_epoch.iter_range(params.pipeline_len) { + assert!( + !crate::read_consensus_validator_set_addresses(&self.s, epoch) + .unwrap() + .contains(address) + ); + assert!( + !crate::read_below_capacity_validator_set_addresses( + &self.s, epoch + ) + .unwrap() + .contains(address) + ); + assert!( + !crate::read_below_threshold_validator_set_addresses( + &self.s, epoch + ) + .unwrap() + .contains(address) + ); + assert!( + !crate::read_all_validator_addresses(&self.s, epoch) + .unwrap() + .contains(address) + ); + } + let in_consensus = + crate::read_consensus_validator_set_addresses(&self.s, pipeline) + .unwrap() + .contains(address); + let in_bc = crate::read_below_capacity_validator_set_addresses( + &self.s, pipeline, + ) + .unwrap() + .contains(address); + let in_below_thresh = + crate::read_below_threshold_validator_set_addresses( + &self.s, pipeline, + ) + .unwrap() + .contains(address); + + assert!(in_below_thresh && !in_consensus && !in_bc); + } + + fn check_misbehavior_post_conditions( + &self, + params: &PosParams, + current_epoch: Epoch, + infraction_epoch: Epoch, + slash_type: SlashType, + validator: &Address, + ) { + tracing::debug!( + "\nChecking misbehavior post conditions for validator: \n{}", + validator + ); + + // Validator state jailed and validator removed from the consensus set + // starting at the next epoch + for offset in 1..=params.pipeline_len { + // dbg!( + // crate::read_consensus_validator_set_addresses_with_stake( + // &self.s, + // current_epoch + offset + // ) + // .unwrap() + // ); + assert_eq!( + validator_state_handle(validator) + .get(&self.s, current_epoch + offset, params) + .unwrap(), + Some(ValidatorState::Jailed) + ); + let in_consensus = consensus_validator_set_handle() + .at(&(current_epoch + offset)) + .iter(&self.s) + .unwrap() + .any(|res| { + let (_, val_address) = res.unwrap(); + // dbg!(&val_address); + val_address == validator.clone() + }); + assert!(!in_consensus); + } + + // `enqueued_slashes` contains the slash element just added + let processing_epoch = infraction_epoch + + params.unbonding_len + + 1_u64 + + params.cubic_slashing_window_length; + let slash = enqueued_slashes_handle() + .at(&processing_epoch) + .at(validator) + .back(&self.s) + .unwrap(); + if let Some(slash) = slash { + assert_eq!(slash.epoch, infraction_epoch); + assert_eq!(slash.r#type, slash_type); + assert_eq!(slash.rate, Dec::zero()); + } else { + panic!("Could not find the slash enqueued"); + } + + // TODO: Any others? + } + + fn check_unjail_validator_post_conditions( + &self, + params: &PosParams, + validator: &Address, + ) { + let current_epoch = self.s.storage.block.epoch; + + // Make sure the validator is not in either set until the pipeline epoch + for epoch in current_epoch.iter_range(params.pipeline_len) { + let in_consensus = consensus_validator_set_handle() + .at(&epoch) + .iter(&self.s) + .unwrap() + .any(|res| { + let (_, val_address) = res.unwrap(); + val_address == validator.clone() + }); + + let in_bc = below_capacity_validator_set_handle() + .at(&epoch) + .iter(&self.s) + .unwrap() + .any(|res| { + let (_, val_address) = res.unwrap(); + val_address == validator.clone() + }); + assert!(!in_consensus && !in_bc); + + let val_state = validator_state_handle(validator) + .get(&self.s, epoch, params) + .unwrap(); + assert_eq!(val_state, Some(ValidatorState::Jailed)); + } + let pipeline_epoch = current_epoch + params.pipeline_len; + + let num_in_consensus = consensus_validator_set_handle() + .at(&pipeline_epoch) + .iter(&self.s) + .unwrap() + .map(|res| res.unwrap()) + .filter(|(_keys, addr)| addr == validator) + .count(); + + let num_in_bc = below_capacity_validator_set_handle() + .at(&pipeline_epoch) + .iter(&self.s) + .unwrap() + .map(|res| res.unwrap()) + .filter(|(_keys, addr)| addr == validator) + .count(); + + let num_in_bt = read_below_threshold_validator_set_addresses( + &self.s, + pipeline_epoch, + ) + .unwrap() + .into_iter() + .filter(|addr| addr == validator) + .count(); + + let num_occurrences = num_in_consensus + num_in_bc + num_in_bt; + assert_eq!(num_occurrences, 1); + + let val_state = validator_state_handle(validator) + .get(&self.s, current_epoch + params.pipeline_len, params) + .unwrap(); + assert!( + val_state == Some(ValidatorState::Consensus) + || val_state == Some(ValidatorState::BelowCapacity) + || val_state == Some(ValidatorState::BelowThreshold) + ); + } + + fn check_global_post_conditions( + &self, + params: &PosParams, + current_epoch: Epoch, + ref_state: &AbstractPosState, + ) { + // Ensure that every validator in each set has the proper state + for epoch in Epoch::iter_bounds_inclusive( + current_epoch, + current_epoch + params.pipeline_len, + ) { + tracing::debug!("Epoch {epoch}"); + let mut vals = HashSet::
::new(); + for WeightedValidator { + bonded_stake, + address: validator, + } in crate::read_consensus_validator_set_addresses_with_stake( + &self.s, epoch, + ) + .unwrap() + { + let deltas_stake = validator_deltas_handle(&validator) + .get_sum(&self.s, epoch, params) + .unwrap() + .unwrap_or_default(); + let max_slash_round_err = ref_state + .validator_records + .get(&validator) + .unwrap() + .slash_round_err_tolerance(epoch); + let ref_stake = ref_state + .validator_stakes + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap(); + let conc_stake = bonded_stake.change(); + let max_err_msg = if max_slash_round_err.is_zero() { + "no error expected".to_string() + } else { + format!( + "max err +-{}", + max_slash_round_err.to_string_native() + ) + }; + tracing::debug!( + "Consensus val {}, set stake: {}, deltas: {}, ref: {}, \ + {max_err_msg}", + &validator, + conc_stake.to_string_native(), + deltas_stake.to_string_native(), + ref_stake.to_string_native(), + ); + assert!(!deltas_stake.is_negative()); + assert_eq!(conc_stake, deltas_stake); + assert!( + ref_stake <= conc_stake + max_slash_round_err.change() + && conc_stake + <= ref_stake + max_slash_round_err.change(), + "Expected {} ({max_err_msg}), got {}.", + ref_stake.to_string_native(), + conc_stake.to_string_native() + ); + + let state = crate::validator_state_handle(&validator) + .get(&self.s, epoch, params) + .unwrap(); + + assert_eq!(state, Some(ValidatorState::Consensus)); + assert_eq!( + state.unwrap(), + ref_state + .validator_states + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap() + ); + assert!(!vals.contains(&validator)); + vals.insert(validator); + } + for WeightedValidator { + bonded_stake, + address: validator, + } in + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, epoch, + ) + .unwrap() + { + let deltas_stake = validator_deltas_handle(&validator) + .get_sum(&self.s, epoch, params) + .unwrap() + .unwrap_or_default(); + let max_slash_round_err = ref_state + .validator_records + .get(&validator) + .unwrap() + .slash_round_err_tolerance(epoch); + let ref_stake = ref_state + .validator_stakes + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap(); + let conc_stake = bonded_stake.change(); + let max_err_msg = if max_slash_round_err.is_zero() { + "no error expected".to_string() + } else { + format!( + "max err +-{}", + max_slash_round_err.to_string_native() + ) + }; + tracing::debug!( + "Below-cap val {}, set stake: {}, deltas: {}, ref: {}, \ + {max_err_msg}", + &validator, + conc_stake.to_string_native(), + deltas_stake.to_string_native(), + ref_stake.to_string_native(), + ); + assert_eq!(conc_stake, deltas_stake); + assert!( + conc_stake <= ref_stake + max_slash_round_err.change() + && ref_stake + <= conc_stake + max_slash_round_err.change(), + "Expected {} ({max_err_msg}), got {}.", + ref_stake.to_string_native(), + bonded_stake.to_string_native() + ); + + let state = crate::validator_state_handle(&validator) + .get(&self.s, epoch, params) + .unwrap(); + // if state.is_none() { + // dbg!( + // crate::validator_state_handle(&validator) + // .get(&self.s, current_epoch, params) + // .unwrap() + // ); + // dbg!( + // crate::validator_state_handle(&validator) + // .get(&self.s, current_epoch.next(), params) + // .unwrap() + // ); + // dbg!( + // crate::validator_state_handle(&validator) + // .get(&self.s, current_epoch.next(), params) + // .unwrap() + // ); + // } + assert_eq!(state, Some(ValidatorState::BelowCapacity)); + assert_eq!( + state.unwrap(), + ref_state + .validator_states + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap() + ); + assert!(!vals.contains(&validator)); + vals.insert(validator); + } + + for validator in + crate::read_below_threshold_validator_set_addresses( + &self.s, epoch, + ) + .unwrap() + { + let conc_stake = validator_deltas_handle(&validator) + .get_sum(&self.s, epoch, params) + .unwrap() + .unwrap_or_default(); + + let state = crate::validator_state_handle(&validator) + .get(&self.s, epoch, params) + .unwrap() + .unwrap(); + + assert_eq!(state, ValidatorState::BelowThreshold); + assert_eq!( + state, + ref_state + .validator_states + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap() + ); + let max_slash_round_err = ref_state + .validator_records + .get(&validator) + .map(|r| r.slash_round_err_tolerance(epoch)) + .unwrap_or_default(); + let ref_stake = ref_state + .validator_stakes + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap(); + let max_err_msg = if max_slash_round_err.is_zero() { + "no error expected".to_string() + } else { + format!( + "max err +-{}", + max_slash_round_err.to_string_native() + ) + }; + tracing::debug!( + "Below-thresh val {}, deltas: {}, ref: {}, {max_err_msg})", + &validator, + conc_stake.to_string_native(), + ref_stake.to_string_native(), + ); + assert!( + conc_stake <= ref_stake + max_slash_round_err.change() + && ref_stake + <= conc_stake + max_slash_round_err.change(), + "Expected {} ({max_err_msg}), got {}.", + ref_stake.to_string_native(), + conc_stake.to_string_native() + ); + assert!(!vals.contains(&validator)); + vals.insert(validator); + } + + // Jailed validators not in a set + let all_validators = + crate::read_all_validator_addresses(&self.s, epoch).unwrap(); + + for val in all_validators { + let state = validator_state_handle(&val) + .get(&self.s, epoch, params) + .unwrap() + .unwrap(); + + if state == ValidatorState::Jailed { + assert_eq!( + state, + ref_state + .validator_states + .get(&epoch) + .unwrap() + .get(&val) + .cloned() + .unwrap() + ); + let conc_stake = validator_deltas_handle(&val) + .get_sum(&self.s, epoch, params) + .unwrap() + .unwrap_or_default(); + let max_slash_round_err = ref_state + .validator_records + .get(&val) + .map(|r| r.slash_round_err_tolerance(epoch)) + .unwrap_or_default(); + let max_err_msg = if max_slash_round_err.is_zero() { + "no error expected".to_string() + } else { + format!( + "max err +-{}", + max_slash_round_err.to_string_native() + ) + }; + let ref_stake = ref_state + .validator_stakes + .get(&epoch) + .unwrap() + .get(&val) + .cloned() + .unwrap(); + tracing::debug!( + "Jailed val {}, deltas: {}, ref: {}, {max_err_msg}", + &val, + conc_stake.to_string_native(), + ref_stake.to_string_native(), + ); + + assert_eq!( + state, + ref_state + .validator_states + .get(&epoch) + .unwrap() + .get(&val) + .cloned() + .unwrap() + ); + assert!( + conc_stake <= ref_stake + max_slash_round_err.change() + && ref_stake + <= conc_stake + max_slash_round_err.change(), + "Expected {} ({}), got {}.", + ref_stake.to_string_native(), + max_err_msg, + conc_stake.to_string_native() + ); + assert!(!vals.contains(&val)); + } + } + } + + // Check that validator stakes are matching ref_state + for (validator, records) in &ref_state.validator_records { + // On every epoch from current up to pipeline + for epoch in current_epoch.iter_range(params.pipeline_len) { + let ref_stake = records.stake(epoch); + let conc_stake = crate::read_validator_stake( + &self.s, params, validator, epoch, + ) + .unwrap(); + let max_slash_round_err = + records.slash_round_err_tolerance(epoch); + assert!( + ref_stake <= conc_stake + max_slash_round_err + && conc_stake <= ref_stake + max_slash_round_err, + "Stake for validator {validator} in epoch {epoch} is not \ + matched against reference stake. Expected {} ({}), got \ + {}.", + ref_stake.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +-{}", + max_slash_round_err.to_string_native() + ) + }, + conc_stake.to_string_native() + ); + } + } + // TODO: expand above to include jailed validators + + for (validator, records) in &ref_state.validator_records { + for (source, records) in &records.per_source { + let bond_id = BondId { + source: source.clone(), + validator: validator.clone(), + }; + for epoch in current_epoch.iter_range(params.pipeline_len) { + let max_slash_round_err = + records.slash_round_err_tolerance(epoch); + let conc_bond_amount = + crate::bond_amount(&self.s, &bond_id, epoch).unwrap(); + let ref_bond_amount = records.amount(epoch); + assert!( + ref_bond_amount + <= conc_bond_amount + max_slash_round_err + && conc_bond_amount + <= ref_bond_amount + max_slash_round_err, + "Slashed `bond_amount` for validator {validator} in \ + epoch {epoch} is not matched against reference \ + state. Expected {} ({}), got {}.", + ref_bond_amount.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +-{}", + max_slash_round_err.to_string_native() + ) + }, + conc_bond_amount.to_string_native() + ); + } + } + } + } +} + +impl ReferenceStateMachine for AbstractPosState { + type State = Self; + type Transition = Transition; + + fn init_state() -> BoxedStrategy { + tracing::debug!("\nInitializing abstract state machine"); + arb_params_and_genesis_validators(Some(8), 8..10) + .prop_map(|(params, genesis_validators)| { + let epoch = Epoch::default(); + let mut state = Self { + epoch, + params, + genesis_validators: genesis_validators + .into_iter() + // Sorted by stake to fill in the consensus set first + .sorted_by(|a, b| Ord::cmp(&a.tokens, &b.tokens)) + .rev() + .collect(), + validator_records: Default::default(), + validator_stakes: Default::default(), + consensus_set: Default::default(), + below_capacity_set: Default::default(), + below_threshold_set: Default::default(), + validator_states: Default::default(), + validator_slashes: Default::default(), + enqueued_slashes: Default::default(), + validator_last_slash_epochs: Default::default(), + }; + + for GenesisValidator { + address, + tokens, + consensus_key: _, + eth_cold_key: _, + eth_hot_key: _, + commission_rate: _, + max_commission_rate_change: _, + } in state.genesis_validators.clone() + { + let records = state.records_mut(&address, &address); + let bond_at_start = records.bonds.entry(epoch).or_default(); + bond_at_start.tokens.amount = tokens; + + let total_stakes = + state.validator_stakes.entry(epoch).or_default(); + total_stakes + .insert(address.clone(), token::Change::from(tokens)); + + let consensus_set = + state.consensus_set.entry(epoch).or_default(); + let consensus_vals_len = consensus_set + .iter() + .map(|(_stake, validators)| validators.len() as u64) + .sum(); + + if tokens < state.params.validator_stake_threshold { + state + .below_threshold_set + .entry(epoch) + .or_default() + .insert(address.clone()); + state + .validator_states + .entry(epoch) + .or_default() + .insert(address, ValidatorState::BelowThreshold); + } else if state.params.max_validator_slots + > consensus_vals_len + { + state + .validator_states + .entry(epoch) + .or_default() + .insert(address.clone(), ValidatorState::Consensus); + consensus_set + .entry(tokens) + .or_default() + .push_back(address); + } else { + state + .validator_states + .entry(epoch) + .or_default() + .insert( + address.clone(), + ValidatorState::BelowCapacity, + ); + let below_cap_set = + state.below_capacity_set.entry(epoch).or_default(); + below_cap_set + .entry(ReverseOrdTokenAmount(tokens)) + .or_default() + .push_back(address) + }; + } + // Ensure that below-capacity and below-threshold sets are + // initialized even if empty + state.below_capacity_set.entry(epoch).or_default(); + state.below_threshold_set.entry(epoch).or_default(); + + // Copy validator sets up to pipeline epoch + for epoch in epoch.next().iter_range(state.params.pipeline_len) + { + state.copy_discrete_epoched_data(epoch) + } + state + }) + .boxed() + } + + // TODO: allow bonding to jailed val + fn transitions(state: &Self::State) -> BoxedStrategy { + // Let preconditions filter out what unbonds are not allowed + let unbondable = + state.unbondable_bonds().into_iter().collect::>(); + let redelegatable = + state.redelegatable_bonds().into_iter().collect::>(); + + let withdrawable = + state.withdrawable_unbonds().into_iter().collect::>(); + + let eligible_for_unjail = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .iter() + .filter_map(|(addr, &val_state)| { + let last_slash_epoch = + state.validator_last_slash_epochs.get(addr); + + if let Some(last_slash_epoch) = last_slash_epoch { + if val_state == ValidatorState::Jailed + // `last_slash_epoch` must be unbonding_len + window_width or more epochs + // before the current + && state.epoch.0 - last_slash_epoch.0 + > state.params.unbonding_len + state.params.cubic_slashing_window_length + { + return Some(addr.clone()); + } + } + None + }) + .collect::>(); + + // Transitions that can be applied if there are no bonds and unbonds + let basic = prop_oneof![ + 4 => Just(Transition::NextEpoch), + 6 => add_arb_bond_amount(state), + 5 => arb_delegation(state), + 3 => arb_self_bond(state), + 1 => ( + address::testing::arb_established_address(), + key::testing::arb_common_keypair(), + key::testing::arb_common_secp256k1_keypair(), + key::testing::arb_common_secp256k1_keypair(), + arb_rate(), + arb_rate(), + ) + .prop_map( + |( + addr, + consensus_key, + eth_hot_key, + eth_cold_key, + commission_rate, + max_commission_rate_change, + )| { + Transition::InitValidator { + address: Address::Established(addr), + consensus_key: consensus_key.to_public(), + eth_hot_key: eth_hot_key.to_public(), + eth_cold_key: eth_cold_key.to_public(), + commission_rate, + max_commission_rate_change, + } + }, + ), + 1 => arb_slash(state), + ]; + + // Add unjailing, if any eligible + let transitions = if eligible_for_unjail.is_empty() { + basic.boxed() + } else { + prop_oneof![ + // basic 6x more likely as it's got 6 cases + 6 => basic, + 1 => prop::sample::select(eligible_for_unjail).prop_map(|address| { + Transition::UnjailValidator { address } + }) + ] + .boxed() + }; + + // Add unbonds, if any + let transitions = if unbondable.is_empty() { + transitions + } else { + let arb_unbondable = prop::sample::select(unbondable); + let arb_unbond = + arb_unbondable.prop_flat_map(move |(id, bonds_sum)| { + let bonds_sum: i128 = + TryFrom::try_from(bonds_sum.change()).unwrap(); + (0..bonds_sum).prop_map(move |to_unbond| { + let id = id.clone(); + let amount = + token::Amount::from_change(Change::from(to_unbond)); + Transition::Unbond { id, amount } + }) + }); + prop_oneof![ + 7 => transitions, + 1 => arb_unbond, + ] + .boxed() + }; + + // Add withdrawals, if any + let transitions = if withdrawable.is_empty() { + transitions + } else { + let arb_withdrawable = prop::sample::select(withdrawable); + let arb_withdrawal = arb_withdrawable + .prop_map(|(id, _)| Transition::Withdraw { id }); + + prop_oneof![ + 8 => transitions, + 1 => arb_withdrawal, + ] + .boxed() + }; + + // Add redelegations, if any + if redelegatable.is_empty() { + transitions + } else { + let arb_redelegatable = prop::sample::select(redelegatable); + let validators = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .keys() + .cloned() + .collect::>(); + let unchainable_redelegations = state.unchainable_redelegations(); + let arb_redelegation = + arb_redelegatable.prop_flat_map(move |(id, deltas_sum)| { + let deltas_sum = + i128::try_from(deltas_sum.change()).unwrap(); + // Generate an amount to redelegate, up to the sum + assert!( + deltas_sum > 0, + "Bond {id} deltas_sum must be non-zero" + ); + let arb_amount = (0..deltas_sum).prop_map(|to_unbond| { + token::Amount::from_change(Change::from(to_unbond)) + }); + // Generate a new validator for redelegation + let current_validator = id.validator.clone(); + let new_validators = validators + .iter() + // The validator must be other than the current + .filter(|validator| *validator != ¤t_validator) + .cloned() + .collect::>(); + let arb_new_validator = + prop::sample::select(new_validators); + let unchainable_redelegations = + unchainable_redelegations.clone(); + (arb_amount, arb_new_validator).prop_map( + move |(amount, new_validator)| Transition::Redelegate { + is_chained: Self::is_chained_redelegation( + &unchainable_redelegations, + &id.source, + &id.validator, + ), + id: id.clone(), + new_validator, + amount, + }, + ) + }); + prop_oneof![ + 9 => transitions, + // Cranked up to make redelegations more common + 15 => arb_redelegation, + ] + .boxed() + } + } + + fn apply( + mut state: Self::State, + transition: &Self::Transition, + ) -> Self::State { + match transition { + Transition::NextEpoch => { + state.epoch = state.epoch.next(); + tracing::debug!("Starting epoch {}", state.epoch); + + // Copy the non-delta data into pipeline epoch from its pred. + state.copy_discrete_epoched_data(state.pipeline()); + + // Process slashes enqueued for the new epoch + state.process_enqueued_slashes(); + + // print-out the state + state.debug_validators(); + } + Transition::InitValidator { + address, + consensus_key: _, + eth_cold_key: _, + eth_hot_key: _, + commission_rate: _, + max_commission_rate_change: _, + } => { + let pipeline: Epoch = state.pipeline(); + + // Initialize the stake at pipeline + state + .validator_stakes + .entry(pipeline) + .or_default() + .insert(address.clone(), 0_i128.into()); + + // Insert into the below-threshold set at pipeline since the + // initial stake is 0 + state + .below_threshold_set + .entry(pipeline) + .or_default() + .insert(address.clone()); + state + .validator_states + .entry(pipeline) + .or_default() + .insert(address.clone(), ValidatorState::BelowThreshold); + + state.debug_validators(); + } + Transition::Bond { id, amount } => { + if !amount.is_zero() { + state.bond(id, *amount); + state.debug_validators(); + } + } + Transition::Unbond { id, amount } => { + if !amount.is_zero() { + state.unbond(id, *amount); + state.debug_validators(); + } + } + Transition::Withdraw { id } => { + state.withdraw(id); + } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + if *is_chained { + return state; + } + if !amount.is_zero() { + state.redelegate(id, new_validator, *amount); + state.debug_validators(); + } + } + Transition::Misbehavior { + address, + slash_type, + infraction_epoch, + height, + } => { + let current_epoch = state.epoch; + let processing_epoch = *infraction_epoch + + state.params.unbonding_len + + 1_u64 + + state.params.cubic_slashing_window_length; + let slash = Slash { + epoch: *infraction_epoch, + block_height: *height, + r#type: *slash_type, + rate: Dec::zero(), + }; + + // Enqueue the slash for future processing + state + .enqueued_slashes + .entry(processing_epoch) + .or_default() + .entry(address.clone()) + .or_default() + .push(slash); + + // Remove the validator from either the consensus or + // below-capacity set and place it into the jailed validator set + + // Remove from the validator set starting at the next epoch and + // up thru the pipeline + for offset in 1..=state.params.pipeline_len { + let real_stake = token::Amount::from_change( + state + .validator_stakes + .get(&(current_epoch + offset)) + .unwrap() + .get(address) + .cloned() + .unwrap_or_default(), + ); + + if let Some((index, stake)) = state + .is_in_consensus_w_info(address, current_epoch + offset) + { + debug_assert_eq!(stake, real_stake); + + let vals = state + .consensus_set + .entry(current_epoch + offset) + .or_default() + .entry(stake) + .or_default(); + let removed = vals.remove(index); + debug_assert_eq!(removed, Some(address.clone())); + if vals.is_empty() { + state + .consensus_set + .entry(current_epoch + offset) + .or_default() + .remove(&stake); + } + + // At pipeline epoch, if was consensus, replace it with + // a below-capacity validator + if offset == state.params.pipeline_len { + let below_cap_pipeline = state + .below_capacity_set + .entry(current_epoch + offset) + .or_default(); + + if let Some(mut max_below_cap) = + below_cap_pipeline.last_entry() + { + let max_bc_stake = *max_below_cap.key(); + let vals = max_below_cap.get_mut(); + let first_val = vals.pop_front().unwrap(); + if vals.is_empty() { + below_cap_pipeline.remove(&max_bc_stake); + } + state + .consensus_set + .entry(current_epoch + offset) + .or_default() + .entry(max_bc_stake.into()) + .or_default() + .push_back(first_val.clone()); + state + .validator_states + .entry(current_epoch + offset) + .or_default() + .insert( + first_val.clone(), + ValidatorState::Consensus, + ); + } + } + } else if let Some((index, stake)) = state + .is_in_below_capacity_w_info( + address, + current_epoch + offset, + ) + { + debug_assert_eq!(stake, real_stake); + + let vals = state + .below_capacity_set + .entry(current_epoch + offset) + .or_default() + .entry(stake.into()) + .or_default(); + + let removed = vals.remove(index); + debug_assert_eq!(removed, Some(address.clone())); + if vals.is_empty() { + state + .below_capacity_set + .entry(current_epoch + offset) + .or_default() + .remove(&stake.into()); + } + } else if state + .is_in_below_threshold(address, current_epoch + offset) + { + let removed = state + .below_threshold_set + .entry(current_epoch + offset) + .or_default() + .remove(address); + debug_assert!(removed); + } else { + // Just make sure the validator is already jailed + debug_assert_eq!( + state + .validator_states + .get(&(current_epoch + offset)) + .unwrap() + .get(address) + .cloned() + .unwrap(), + ValidatorState::Jailed + ); + } + + state + .validator_states + .entry(current_epoch + offset) + .or_default() + .insert(address.clone(), ValidatorState::Jailed); + } + + // Update the most recent infraction epoch for the validator + if let Some(last_epoch) = + state.validator_last_slash_epochs.get(address) + { + if infraction_epoch > last_epoch { + state + .validator_last_slash_epochs + .insert(address.clone(), *infraction_epoch); + } + } else { + state + .validator_last_slash_epochs + .insert(address.clone(), *infraction_epoch); + } + + state.debug_validators(); + } + Transition::UnjailValidator { address } => { + let pipeline_epoch = state.pipeline(); + let consensus_set_pipeline = + state.consensus_set.entry(pipeline_epoch).or_default(); + let pipeline_stake = state + .validator_stakes + .get(&pipeline_epoch) + .unwrap() + .get(address) + .cloned() + .unwrap_or_default(); + let validator_states_pipeline = + state.validator_states.entry(pipeline_epoch).or_default(); + + // Insert the validator back into the appropriate validator set + // and update its state + let num_consensus = consensus_set_pipeline + .iter() + .fold(0, |sum, (_, validators)| { + sum + validators.len() as u64 + }); + + if pipeline_stake + < state.params.validator_stake_threshold.change() + { + // Place into the below-threshold set + let below_threshold_set_pipeline = state + .below_threshold_set + .entry(pipeline_epoch) + .or_default(); + below_threshold_set_pipeline.insert(address.clone()); + validator_states_pipeline.insert( + address.clone(), + ValidatorState::BelowThreshold, + ); + } else if num_consensus < state.params.max_validator_slots { + // Place directly into the consensus set + debug_assert!( + state + .below_capacity_set + .get(&pipeline_epoch) + .unwrap() + .is_empty() + ); + consensus_set_pipeline + .entry(token::Amount::from_change(pipeline_stake)) + .or_default() + .push_back(address.clone()); + validator_states_pipeline + .insert(address.clone(), ValidatorState::Consensus); + } else if let Some(mut min_consensus) = + consensus_set_pipeline.first_entry() + { + let below_capacity_set_pipeline = state + .below_capacity_set + .entry(pipeline_epoch) + .or_default(); + + let min_consensus_stake = *min_consensus.key(); + if pipeline_stake > min_consensus_stake.change() { + // Place into the consensus set and demote the last + // min_consensus validator + let min_validators = min_consensus.get_mut(); + let last_val = min_validators.pop_back().unwrap(); + // Remove the key if there's nothing left + if min_validators.is_empty() { + consensus_set_pipeline.remove(&min_consensus_stake); + } + // Do the swap + below_capacity_set_pipeline + .entry(min_consensus_stake.into()) + .or_default() + .push_back(last_val.clone()); + validator_states_pipeline + .insert(last_val, ValidatorState::BelowCapacity); + + consensus_set_pipeline + .entry(token::Amount::from_change(pipeline_stake)) + .or_default() + .push_back(address.clone()); + validator_states_pipeline + .insert(address.clone(), ValidatorState::Consensus); + } else { + // Just place into the below-capacity set + below_capacity_set_pipeline + .entry( + token::Amount::from_change(pipeline_stake) + .into(), + ) + .or_default() + .push_back(address.clone()); + validator_states_pipeline.insert( + address.clone(), + ValidatorState::BelowCapacity, + ); + } + } else { + panic!("Should not reach here I don't think") + } + state.debug_validators(); + } + } + + state + } + + fn preconditions( + state: &Self::State, + transition: &Self::Transition, + ) -> bool { + match transition { + // TODO: should there be any slashing preconditions for `NextEpoch`? + Transition::NextEpoch => true, + Transition::InitValidator { + address, + consensus_key: _, + eth_cold_key: _, + eth_hot_key: _, + commission_rate: _, + max_commission_rate_change: _, + } => { + let pipeline = state.pipeline(); + // The address must not belong to an existing validator + !state.is_validator(address, pipeline) && + // There must be no delegations from this address + !state.unbondable_bonds().into_iter().any(|(id, _sum)| + &id.source == address) + } + Transition::Bond { id, amount: _ } => { + let pipeline = state.pipeline(); + // The validator must be known + if !state.is_validator(&id.validator, pipeline) { + return false; + } + + id.validator == id.source + // If it's not a self-bond, the source must not be a validator + || !state.is_validator(&id.source, pipeline) + } + Transition::Unbond { id, amount } => { + let pipeline = state.pipeline(); + + let is_unbondable = state + .unbondable_bonds() + .get(id) + .map(|sum| sum >= amount) + .unwrap_or_default(); + + // The validator must not be frozen currently + let is_frozen = if let Some(last_epoch) = + state.validator_last_slash_epochs.get(&id.validator) + { + *last_epoch + + state.params.unbonding_len + + 1u64 + + state.params.cubic_slashing_window_length + > state.epoch + } else { + false + }; + + // if is_frozen { + // tracing::debug!( + // "\nVALIDATOR {} IS FROZEN - CANNOT UNBOND\n", + // &id.validator + // ); + // } + + // The validator must be known + state.is_validator(&id.validator, pipeline) + // The amount must be available to unbond and the validator not jailed + && is_unbondable && !is_frozen + } + Transition::Withdraw { id } => { + let pipeline = state.pipeline(); + + let is_withdrawable = state + .withdrawable_unbonds() + .get(id) + .map(|amount| *amount > token::Amount::zero()) + .unwrap_or_default(); + + // The validator must not be jailed currently + let is_jailed = state + .validator_states + .get(&state.epoch) + .unwrap() + .get(&id.validator) + .cloned() + == Some(ValidatorState::Jailed); + + // The validator must be known + state.is_validator(&id.validator, pipeline) + // The amount must be available to unbond + && is_withdrawable && !is_jailed + } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + let pipeline = state.pipeline(); + + if *is_chained { + Self::is_chained_redelegation( + &state.unchainable_redelegations(), + &id.source, + new_validator, + ) + } else { + // The src and dest validator must be known + if !state.is_validator(&id.validator, pipeline) + || !state.is_validator(new_validator, pipeline) + { + return false; + } + + // The amount must be available to redelegate + if !state + .unbondable_bonds() + .get(id) + .map(|sum| sum >= amount) + .unwrap_or_default() + { + return false; + } + + // The src validator must not be frozen + if let Some(last_epoch) = + state.validator_last_slash_epochs.get(&id.validator) + { + if *last_epoch + + state.params.unbonding_len + + 1u64 + + state.params.cubic_slashing_window_length + > state.epoch + { + return false; + } + } + + // The dest validator must not be frozen + if let Some(last_epoch) = + state.validator_last_slash_epochs.get(new_validator) + { + if *last_epoch + + state.params.unbonding_len + + 1u64 + + state.params.cubic_slashing_window_length + > state.epoch + { + return false; + } + } + + true + } + } + Transition::Misbehavior { + address, + slash_type: _, + infraction_epoch, + height: _, + } => { + let is_validator = + state.is_validator(address, *infraction_epoch); + + // The infraction epoch cannot be in the future or more than + // unbonding_len epochs in the past + let current_epoch = state.epoch; + let valid_epoch = *infraction_epoch <= current_epoch + && current_epoch.0 - infraction_epoch.0 + <= state.params.unbonding_len; + + // Only misbehave when there is more than 3 validators that's + // not jailed, so there's always at least one honest left + let enough_honest_validators = || { + let num_of_honest = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .iter() + .filter(|(_addr, val_state)| match val_state { + ValidatorState::Consensus + | ValidatorState::BelowCapacity => true, + ValidatorState::Inactive + | ValidatorState::Jailed + // Below threshold cannot be in consensus + | ValidatorState::BelowThreshold => false, + }) + .count(); + + // Find the number of enqueued slashes to unique validators + let num_of_enquequed_slashes = state + .enqueued_slashes + .iter() + // find all validators with any enqueued slashes + .fold(BTreeSet::new(), |mut acc, (&epoch, slashes)| { + if epoch > current_epoch { + acc.extend(slashes.keys().cloned()); + } + acc + }) + .len(); + + num_of_honest - num_of_enquequed_slashes > 3 + }; + + // Ensure that the validator is in consensus when it misbehaves + // TODO: possibly also test allowing below-capacity validators + // tracing::debug!("\nVal to possibly misbehave: {}", &address); + let state_at_infraction = state + .validator_states + .get(infraction_epoch) + .unwrap() + .get(address); + if state_at_infraction.is_none() { + // Figure out why this happening + tracing::debug!( + "State is None at Infraction epoch {}", + infraction_epoch + ); + for epoch in Epoch::iter_bounds_inclusive( + infraction_epoch.next(), + state.epoch, + ) { + let state_ep = state + .validator_states + .get(infraction_epoch) + .unwrap() + .get(address) + .cloned(); + tracing::debug!( + "State at epoch {} is {:?}", + epoch, + state_ep + ); + } + } + + let can_misbehave = state_at_infraction.cloned() + == Some(ValidatorState::Consensus); + + is_validator + && valid_epoch + && enough_honest_validators() + && can_misbehave + + // TODO: any others conditions? + } + Transition::UnjailValidator { address } => { + // Validator address must be jailed thru the pipeline epoch + for epoch in + Epoch::iter_bounds_inclusive(state.epoch, state.pipeline()) + { + if state + .validator_states + .get(&epoch) + .unwrap() + .get(address) + .cloned() + .unwrap() + != ValidatorState::Jailed + { + return false; + } + } + // Most recent misbehavior is >= unbonding_len epochs away from + // current epoch + if let Some(last_slash_epoch) = + state.validator_last_slash_epochs.get(address) + { + if state.epoch.0 - last_slash_epoch.0 + < state.params.unbonding_len + { + return false; + } + } + + true + // TODO: any others? + } + } + } +} + +/// Arbitrary bond transition that adds tokens to an existing bond +fn add_arb_bond_amount( + state: &AbstractPosState, +) -> impl Strategy { + let bond_ids = state.existing_bond_ids(); + let arb_bond_id = prop::sample::select(bond_ids); + (arb_bond_id, arb_bond_amount()) + .prop_map(|(id, amount)| Transition::Bond { id, amount }) +} + +/// Arbitrary delegation to one of the validators +fn arb_delegation( + state: &AbstractPosState, +) -> impl Strategy { + // Bond is allowed to any validator in any set - including jailed validators + let validators = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .keys() + .cloned() + .collect::>(); + let validator_vec = validators.clone().into_iter().collect::>(); + let arb_source = address::testing::arb_non_internal_address() + .prop_filter("Must be a non-validator address", move |addr| { + !validators.contains(addr) + }); + let arb_validator = prop::sample::select(validator_vec); + (arb_source, arb_validator, arb_bond_amount()).prop_map( + |(source, validator, amount)| Transition::Bond { + id: BondId { source, validator }, + amount, + }, + ) +} + +/// Arbitrary validator self-bond +fn arb_self_bond( + state: &AbstractPosState, +) -> impl Strategy { + // Bond is allowed to any validator in any set - including jailed validators + let validator_vec = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .keys() + .cloned() + .collect::>(); + let arb_validator = prop::sample::select(validator_vec); + (arb_validator, arb_bond_amount()).prop_map(|(validator, amount)| { + Transition::Bond { + id: BondId { + source: validator.clone(), + validator, + }, + amount, + } + }) +} + +// Bond up to 10 tokens (in micro units) to avoid overflows +pub fn arb_bond_amount() -> impl Strategy { + (1_u64..10).prop_map(|val| token::Amount::from_uint(val, 0).unwrap()) +} + +/// Arbitrary validator misbehavior +fn arb_slash(state: &AbstractPosState) -> impl Strategy { + let validators = state.consensus_set.iter().fold( + Vec::new(), + |mut acc, (_epoch, vals)| { + for vals in vals.values() { + for validator in vals { + acc.push(validator.clone()); + } + } + acc + }, + ); + let current_epoch = state.epoch.0; + + let arb_validator = prop::sample::select(validators); + let slash_types = + vec![SlashType::LightClientAttack, SlashType::DuplicateVote]; + let arb_type = prop::sample::select(slash_types); + let arb_epoch = (current_epoch + .checked_sub(state.params.unbonding_len) + .unwrap_or_default()..=current_epoch) + .prop_map(Epoch::from); + (arb_validator, arb_type, arb_epoch).prop_map( + |(validator, slash_type, infraction_epoch)| Transition::Misbehavior { + address: validator, + slash_type, + infraction_epoch, + height: 0, + }, + ) +} diff --git a/proof_of_stake/src/tests/utils.rs b/proof_of_stake/src/tests/utils.rs new file mode 100644 index 0000000000..1e5f5acf62 --- /dev/null +++ b/proof_of_stake/src/tests/utils.rs @@ -0,0 +1,81 @@ +use std::marker::PhantomData; +use std::str::FromStr; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::Relaxed; +use std::{env, fmt}; + +// TODO: allow custom fmt fn +#[derive(Clone)] +pub struct DbgPrintDiff +where + T: fmt::Debug, +{ + last: String, + phantom_t: PhantomData, +} +impl DbgPrintDiff +where + T: fmt::Debug, +{ + pub fn new() -> Self { + Self { + last: Default::default(), + phantom_t: PhantomData, + } + } + + /// Store a state in dbg format string + pub fn store(&self, data: &T) -> Self { + Self { + last: Self::fmt_data(data), + phantom_t: PhantomData, + } + } + + /// Diff a state in dbg format string against the stored state + pub fn print_diff_and_store(&self, data: &T) -> Self { + let dbg_str = Self::fmt_data(data); + println!( + "{}", + pretty_assertions::StrComparison::new(&self.last, &dbg_str,) + ); + Self { + last: dbg_str, + phantom_t: PhantomData, + } + } + + fn fmt_data(data: &T) -> String { + format!("{:#?}", data) + } +} + +const ENV_VAR_TEST_PAUSES: &str = "TEST_PAUSES"; + +pub fn pause_for_enter() { + if paused_enabled() { + println!("Press Enter to continue"); + let mut input = String::new(); + std::io::stdin().read_line(&mut input).unwrap(); + } +} + +fn paused_enabled() -> bool { + // Cache the result of reading the environment variable + static ENABLED: AtomicUsize = AtomicUsize::new(0); + match ENABLED.load(Relaxed) { + 0 => {} + 1 => return false, + _ => return true, + } + let enabled: bool = matches!( + env::var(ENV_VAR_TEST_PAUSES).map(|val| { + FromStr::from_str(&val).unwrap_or_else(|_| { + panic!("Expected a bool for {ENV_VAR_TEST_PAUSES} env var.") + }) + }), + Ok(true), + ); + ENABLED.store(enabled as usize + 1, Relaxed); + enabled +} diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 736ffe7a46..8477b21cf2 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -3,7 +3,7 @@ mod rev_order; use core::fmt::Debug; -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::convert::TryFrom; use std::fmt::Display; use std::hash::Hash; @@ -149,7 +149,7 @@ pub type CommissionRates = /// Epoched validator's bonds pub type Bonds = crate::epoched::EpochedDelta< - token::Change, + token::Amount, crate::epoched::OffsetPipelineLen, U64_MAX, >; @@ -176,6 +176,10 @@ pub type EpochedSlashes = crate::epoched::NestedEpoched< >; /// Epoched validator's unbonds +/// +/// The map keys from outside in are: +/// - start epoch of the bond in which it started contributing to stake +/// - withdrawable epoch of the unbond pub type Unbonds = NestedMap>; /// Consensus keys set, used to ensure uniqueness @@ -186,17 +190,104 @@ pub type ConsensusKeys = LazySet; /// (affects the deltas, pipeline after submission). The inner `Epoch` /// corresponds to the epoch from which the underlying bond became active /// (affected deltas). -pub type ValidatorUnbondRecords = +pub type ValidatorTotalUnbonded = NestedMap>; +/// A validator's incoming redelegations, where the key is the bond owner +/// address and the value is the redelegation end epoch +pub type IncomingRedelegations = LazyMap; + +/// A validator's outgoing redelegations, where the validator in question is a +/// source validator. +/// +/// The map keys from outside in are: +/// - destination validator's address +/// - bond start epoch +/// - redelegation epoch in which it started contributing to destination +/// validator +/// +/// The value is the redelegated bond amount. +pub type OutgoingRedelegations = + NestedMap>>; + +/// A validator's total redelegated unbonded tokens for any delegator. +/// The map keys from outside in are: +/// +/// - redelegation epoch in which it started contributing to destination +/// validator +/// - redelegation source validator +/// - start epoch of the bond that's been redelegated +pub type TotalRedelegatedBonded = NestedMap; + +/// A validator's total redelegated unbonded tokens for any delegator. +/// The map keys from outside in are: +/// +/// - unbond epoch +/// - redelegation epoch in which it started contributing to destination +/// validator +/// - redelegation source validator +/// - bond start epoch +pub type TotalRedelegatedUnbonded = NestedMap; + +/// Map of redelegated tokens. +/// The map keys from outside in are: +/// +/// - redelegation source validator +/// - start epoch of the bond that's been redelegated +pub type RedelegatedTokens = NestedMap>; + +/// Map of redelegated bonds or unbonds. +/// The map keys from outside in are: +/// +/// - for bonds redelegation epoch in which the redelegation started +/// contributing to destination validator, for unbonds it's withdrawal epoch +/// - redelegation source validator +/// - start epoch of the bond that's been redelegated +/// +/// TODO: it's a confusing that the outermost epoch is different for bonds vs +/// unbonds, can we swap withdrawal with redelegation epoch for +/// `DelegatorRedelegatedUnbonded`? +pub type RedelegatedBondsOrUnbonds = NestedMap; + +/// A delegator's redelegated bonded token amount. +/// The map keys from outside in are: +/// +/// - redelegation destination validator +/// - redelegation epoch in which the redelegation started contributing to +/// destination validator +/// - redelegation source validator +/// - start epoch of the bond that's been redelegated +pub type DelegatorRedelegatedBonded = + NestedMap; + +/// A delegator's redelegated unbonded token amounts. +/// The map keys from outside in are: +/// +/// - redelegation destination validator +/// - redelegation epoch in which the redelegation started contributing to +/// destination validator +/// - withdrawal epoch of the unbond +/// - redelegation source validator +/// - start epoch of the bond that's been redelegated +pub type DelegatorRedelegatedUnbonded = + NestedMap>; + +/// In-memory map of redelegated bonds. +/// The map keys from outside in are: +/// +/// - src validator address +/// - src bond start epoch where it started contributing to src validator +pub type EagerRedelegatedBondsMap = + BTreeMap>; + #[derive( Debug, Clone, BorshSerialize, BorshDeserialize, Eq, Hash, PartialEq, )] -/// TODO: slashed amount for thing +/// Slashed amount of tokens. pub struct SlashedAmount { - /// Perlangus + /// Amount of tokens that were slashed. pub amount: token::Amount, - /// Churms + /// Infraction epoch from which the tokens were slashed pub epoch: Epoch, } @@ -216,6 +307,20 @@ pub type RewardsProducts = LazyMap; /// rewards owed over the course of an epoch) pub type RewardsAccumulator = LazyMap; +/// Eager data for a generic redelegation +#[derive(Debug)] +pub struct Redelegation { + /// Start epoch of the redelegation is the first epoch in which the + /// redelegated amount no longer contributes to the stake of source + /// validator and starts contributing to destination validator. + pub redel_bond_start: Epoch, + /// Source validator + pub src_validator: Address, + /// Start epoch of the redelgated bond + pub bond_start: Epoch, + /// Redelegation amount + pub amount: token::Amount, +} // -------------------------------------------------------------------------------------------- /// A genesis validator definition. diff --git a/shared/src/ledger/queries/vp/pos.rs b/shared/src/ledger/queries/vp/pos.rs index e78bff146b..f54a19c9c4 100644 --- a/shared/src/ledger/queries/vp/pos.rs +++ b/shared/src/ledger/queries/vp/pos.rs @@ -18,7 +18,8 @@ use namada_proof_of_stake::{ read_consensus_validator_set_addresses_with_stake, read_pos_params, read_total_stake, read_validator_max_commission_rate_change, read_validator_stake, unbond_handle, validator_commission_rate_handle, - validator_slashes_handle, validator_state_handle, + validator_incoming_redelegations_handle, validator_slashes_handle, + validator_state_handle, }; use crate::ledger::queries::types::RequestCtx; @@ -28,8 +29,6 @@ use crate::types::address::Address; use crate::types::storage::Epoch; use crate::types::token; -type AmountPair = (token::Amount, token::Amount); - // PoS validity predicate queries router! {POS, ( "validator" ) = { @@ -49,6 +48,9 @@ router! {POS, ( "state" / [validator: Address] / [epoch: opt Epoch] ) -> Option = validator_state, + + ( "incoming_redelegation" / [src_validator: Address] / [delegator: Address] ) + -> Option = validator_incoming_redelegation, }, ( "validator_set" ) = { @@ -79,7 +81,7 @@ router! {POS, -> token::Amount = bond, ( "bond_with_slashing" / [source: Address] / [validator: Address] / [epoch: opt Epoch] ) - -> AmountPair = bond_with_slashing, + -> token::Amount = bond_with_slashing, ( "unbond" / [source: Address] / [validator: Address] ) -> HashMap<(Epoch, Epoch), token::Amount> = unbond, @@ -262,7 +264,28 @@ where { let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); let params = read_pos_params(ctx.wl_storage)?; - read_validator_stake(ctx.wl_storage, ¶ms, &validator, epoch) + if namada_proof_of_stake::is_validator(ctx.wl_storage, &validator)? { + let stake = + read_validator_stake(ctx.wl_storage, ¶ms, &validator, epoch)?; + Ok(Some(stake)) + } else { + Ok(None) + } +} + +/// Get the incoming redelegation epoch for a source validator - delegator pair, +/// if there is any. +fn validator_incoming_redelegation( + ctx: RequestCtx<'_, D, H>, + src_validator: Address, + delegator: Address, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let handle = validator_incoming_redelegations_handle(&src_validator); + handle.get(ctx.wl_storage, &delegator) } /// Get all the validator in the consensus set with their bonded stake. @@ -312,7 +335,7 @@ fn bond_deltas( ctx: RequestCtx<'_, D, H>, source: Address, validator: Address, -) -> storage_api::Result> +) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -339,7 +362,6 @@ where let handle = bond_handle(&source, &validator); handle .get_sum(ctx.wl_storage, epoch, ¶ms)? - .map(token::Amount::from_change) .ok_or_err_msg("Cannot find bond") } @@ -348,7 +370,7 @@ fn bond_with_slashing( source: Address, validator: Address, epoch: Option, -) -> storage_api::Result +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, diff --git a/shared/src/ledger/queries/vp/token.rs b/shared/src/ledger/queries/vp/token.rs index 3b99cb0fda..498d5e19b8 100644 --- a/shared/src/ledger/queries/vp/token.rs +++ b/shared/src/ledger/queries/vp/token.rs @@ -53,7 +53,7 @@ pub mod client_only_methods { .await?; let balance = if response.data.is_empty() { - token::Amount::default() + token::Amount::zero() } else { token::Amount::try_from_slice(&response.data) .unwrap_or_default() diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index b765dece5a..15122bc7ee 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -316,6 +316,23 @@ pub struct Unbond { pub tx_code_path: PathBuf, } +/// Redelegation arguments +#[derive(Clone, Debug)] +pub struct Redelegate { + /// Common tx arguments + pub tx: Tx, + /// Source validator address + pub src_validator: C::Address, + /// Destination validator address + pub dest_validator: C::Address, + /// Owner of the bonds that are being redelegated + pub owner: C::Address, + /// The amount of tokens to redelegate + pub amount: token::Amount, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + /// Reveal public key #[derive(Clone, Debug)] pub struct RevealPk { diff --git a/shared/src/sdk/error.rs b/shared/src/sdk/error.rs index b103a9523f..af927d4814 100644 --- a/shared/src/sdk/error.rs +++ b/shared/src/sdk/error.rs @@ -165,6 +165,12 @@ pub enum TxError { /// Error retrieving from storage #[error("Error retrieving from storage")] Retrieval, + /// Bond amount is zero + #[error("The requested bond amount is 0.")] + BondIsZero, + /// Unond amount is zero + #[error("The requested unbond amount is 0.")] + UnbondIsZero, /// No unbonded bonds ready to withdraw in the current epoch #[error( "There are no unbonded bonds ready to withdraw in the current epoch \ @@ -278,6 +284,28 @@ pub enum TxError { /// Invalid owner account #[error("The source account {0} is not valid or doesn't exist.")] InvalidAccount(String), + /// The redelegation amount is larger than the remaining bond amount + #[error( + "The redelegation amount is larger than the remaining bond amount. \ + Amount to redelegate is {0} and the remaining bond amount is {1}." + )] + RedelegationAmountTooLarge(String, String), + /// The redelegation amount is 0 + #[error("The amount requested to redelegate is 0 tokens")] + RedelegationIsZero, + /// The src and dest validators are the same + #[error("The source and destination validators are the same")] + RedelegationSrcEqDest, + /// The redelegation owner is a validator + #[error("The redelegation owner {0} is a validator")] + RedelegatorIsValidator(Address), + /// There is an incoming redelegation that is still subject to possible + /// slashing + #[error( + "An incoming redelegation from delegator {0} to validator {1} is \ + still subject to possible slashing" + )] + IncomingRedelIsStillSlashable(Address, Address), /// Other Errors that may show up when using the interface #[error("{0}")] Other(String), diff --git a/shared/src/sdk/rpc.rs b/shared/src/sdk/rpc.rs index 58609bed42..670f78ba64 100644 --- a/shared/src/sdk/rpc.rs +++ b/shared/src/sdk/rpc.rs @@ -738,6 +738,23 @@ pub async fn query_commission_rate( ) } +/// Query and return the incoming redelegation epoch for a given pair of source +/// validator and delegator, if there is any. +pub async fn query_incoming_redelegations< + C: crate::ledger::queries::Client + Sync, +>( + client: &C, + src_validator: &Address, + delegator: &Address, +) -> Result, Error> { + convert_response::>( + RPC.vp() + .pos() + .validator_incoming_redelegation(client, src_validator, delegator) + .await, + ) +} + /// Query a validator's bonds for a given epoch pub async fn query_bond( client: &C, @@ -798,7 +815,7 @@ pub async fn query_and_print_unbonds< let unbonds = query_unbond_with_slashing(client, source, validator).await?; let current_epoch = query_epoch(client).await?; - let mut total_withdrawable = token::Amount::default(); + let mut total_withdrawable = token::Amount::zero(); let mut not_yet_withdrawable = HashMap::::new(); for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { if withdraw_epoch <= current_epoch { @@ -809,7 +826,7 @@ pub async fn query_and_print_unbonds< *withdrawable_amount += amount; } } - if total_withdrawable != token::Amount::default() { + if !total_withdrawable.is_zero() { display_line!( IO, "Total withdrawable now: {}.", @@ -887,14 +904,14 @@ pub async fn get_bond_amount_at( delegator: &Address, validator: &Address, epoch: Epoch, -) -> Result, error::Error> { - let (_total, total_active) = convert_response::( +) -> Result { + let total_active = convert_response::( RPC.vp() .pos() .bond_with_slashing(client, delegator, validator, &Some(epoch)) .await, )?; - Ok(Some(total_active)) + Ok(total_active) } /// Get bonds and unbonds with all details (slashes and rewards, if any) diff --git a/shared/src/sdk/signing.rs b/shared/src/sdk/signing.rs index 042be03a63..ffcae08639 100644 --- a/shared/src/sdk/signing.rs +++ b/shared/src/sdk/signing.rs @@ -358,7 +358,7 @@ pub async fn wrap_tx< if !args.force { return Err(e); } else { - token::Amount::default() + token::Amount::zero() } } }; diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 9d7fe0cfe4..9d2de44e4b 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -809,6 +809,164 @@ pub async fn build_unjail_validator< .await } +/// Redelegate bonded tokens from one validator to another +pub async fn build_redelegation< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, + V: ShieldedUtils, + IO: Io, +>( + client: &C, + wallet: &mut Wallet, + shielded: &mut ShieldedContext, + args::Redelegate { + tx: tx_args, + src_validator, + dest_validator, + owner, + amount: redel_amount, + tx_code_path, + }: args::Redelegate, + fee_payer: common::PublicKey, +) -> Result { + // Require a positive amount of tokens to be redelegated + if redel_amount.is_zero() { + edisplay_line!( + IO, + "The requested redelegation amount is 0. A positive amount must \ + be requested." + ); + if !tx_args.force { + return Err(Error::from(TxError::RedelegationIsZero)); + } + } + + // The src and dest validators must actually be validators + let src_validator = known_validator_or_err::<_, IO>( + src_validator.clone(), + tx_args.force, + client, + ) + .await?; + let dest_validator = known_validator_or_err::<_, IO>( + dest_validator.clone(), + tx_args.force, + client, + ) + .await?; + + // The delegator (owner) must exist on-chain and must not be a validator + let owner = + source_exists_or_err::<_, IO>(owner.clone(), tx_args.force, client) + .await?; + if rpc::is_validator(client, &owner).await? { + edisplay_line!( + IO, + "The given address {} is a validator. A validator is prohibited \ + from redelegating its own bonds.", + &owner + ); + if !tx_args.force { + return Err(Error::from(TxError::RedelegatorIsValidator( + owner.clone(), + ))); + } + } + + // Prohibit redelegation to the same validator + if src_validator == dest_validator { + edisplay_line!( + IO, + "The provided source and destination validators are the same. \ + Redelegation is not allowed to the same validator." + ); + if !tx_args.force { + return Err(Error::from(TxError::RedelegationSrcEqDest)); + } + } + + // Prohibit chained redelegations + let params = rpc::get_pos_params(client).await?; + let incoming_redel_epoch = + rpc::query_incoming_redelegations(client, &src_validator, &owner) + .await?; + let current_epoch = rpc::query_epoch(client).await?; + let is_not_chained = if let Some(redel_end_epoch) = incoming_redel_epoch { + let last_contrib_epoch = redel_end_epoch.prev(); + last_contrib_epoch + params.slash_processing_epoch_offset() + <= current_epoch + } else { + true + }; + if !is_not_chained { + edisplay_line!( + IO, + "The source validator {} has an incoming redelegation from the \ + delegator {} that may still be subject to future slashing. \ + Redelegation is not allowed until this is no longer the case.", + &src_validator, + &owner + ); + if !tx_args.force { + return Err(Error::from(TxError::IncomingRedelIsStillSlashable( + src_validator.clone(), + owner.clone(), + ))); + } + } + + // There must be at least as many tokens in the bond as the requested + // redelegation amount + let bond_amount = + rpc::query_bond(client, &owner, &src_validator, None).await?; + if redel_amount > bond_amount { + edisplay_line!( + IO, + "There are not enough tokens available for the desired \ + redelegation at the current epoch {}. Requested to redelegate {} \ + tokens but only {} tokens are available.", + current_epoch, + redel_amount.to_string_native(), + bond_amount.to_string_native() + ); + if !tx_args.force { + return Err(Error::from(TxError::RedelegationAmountTooLarge( + redel_amount.to_string_native(), + bond_amount.to_string_native(), + ))); + } + } else { + display_line!( + IO, + "{} NAM tokens available for redelegation. Submitting \ + redelegation transaction for {} tokens...", + bond_amount.to_string_native(), + redel_amount.to_string_native() + ); + } + + let data = pos::Redelegation { + src_validator, + dest_validator, + owner, + amount: redel_amount, + }; + + let (tx, _epoch) = build::<_, _, _, _, _, IO>( + client, + wallet, + shielded, + &tx_args, + tx_code_path, + data, + do_nothing, + &fee_payer, + None, + ) + .await?; + Ok(tx) +} + /// Submit transaction to withdraw an unbond pub async fn build_withdraw< C: crate::sdk::queries::Client + Sync, @@ -904,43 +1062,60 @@ pub async fn build_unbond< }: args::Unbond, fee_payer: common::PublicKey, ) -> Result<(Tx, Option, Option<(Epoch, token::Amount)>)> { - let source = source.clone(); - // Check the source's current bond amount - let bond_source = source.clone().unwrap_or_else(|| validator.clone()); + // Require a positive amount of tokens to be unbonded + if amount.is_zero() { + edisplay_line!( + IO, + "The requested unbbond amount is 0. A positive amount must be \ + requested." + ); + if !tx_args.force { + return Err(Error::from(TxError::UnbondIsZero)); + } + } - if !tx_args.force { - known_validator_or_err::<_, IO>( - validator.clone(), - tx_args.force, - client, - ) - .await?; + // The validator must actually be a validator + let validator = known_validator_or_err::<_, IO>( + validator.clone(), + tx_args.force, + client, + ) + .await?; - let bond_amount = - rpc::query_bond(client, &bond_source, &validator, None).await?; - display_line!( + // Check that the source address exists on chain + let source = match source.clone() { + Some(source) => { + source_exists_or_err::<_, IO>(source, tx_args.force, client) + .await + .map(Some) + } + None => Ok(source.clone()), + }?; + let bond_source = source.clone().unwrap_or(validator.clone()); + + // Check the source's current bond amount + let bond_amount = + rpc::query_bond(client, &bond_source, &validator, None).await?; + display_line!( + IO, + "Bond amount available for unbonding: {} NAM", + bond_amount.to_string_native() + ); + if amount > bond_amount { + edisplay_line!( IO, - "Bond amount available for unbonding: {} NAM", + "The total bonds of the source {} is lower than the amount to be \ + unbonded. Amount to unbond is {} and the total bonds is {}.", + bond_source, + amount.to_string_native(), bond_amount.to_string_native() ); - - if amount > bond_amount { - edisplay_line!( - IO, - "The total bonds of the source {} is lower than the amount to \ - be unbonded. Amount to unbond is {} and the total bonds is \ - {}.", + if !tx_args.force { + return Err(Error::from(TxError::LowerBondThanUnbond( bond_source, amount.to_string_native(), - bond_amount.to_string_native() - ); - if !tx_args.force { - return Err(Error::from(TxError::LowerBondThanUnbond( - bond_source, - amount.to_string_native(), - bond_amount.to_string_native(), - ))); - } + bond_amount.to_string_native(), + ))); } } @@ -958,7 +1133,7 @@ pub async fn build_unbond< let data = pos::Unbond { validator: validator.clone(), amount, - source: source.clone(), + source, }; let (tx, epoch) = build::<_, _, _, _, _, IO>( @@ -1064,6 +1239,19 @@ pub async fn build_bond< }: args::Bond, fee_payer: common::PublicKey, ) -> Result<(Tx, Option)> { + // Require a positive amount of tokens to be bonded + if amount.is_zero() { + edisplay_line!( + IO, + "The requested bond amount is 0. A positive amount must be \ + requested." + ); + if !tx_args.force { + return Err(Error::from(TxError::BondIsZero)); + } + } + + // The validator must actually be a validator let validator = known_validator_or_err::<_, IO>( validator.clone(), tx_args.force, @@ -1707,7 +1895,7 @@ pub async fn build_transfer< // This has no side-effect because transaction is to self. let (_amount, token) = if source == masp_addr && target == masp_addr { // TODO Refactor me, we shouldn't rely on any specific token here. - (token::Amount::default(), args.native_token.clone()) + (token::Amount::zero(), args.native_token.clone()) } else { (validated_amount.amount, token) }; @@ -2157,7 +2345,7 @@ async fn check_balance_too_low_err< ) .await, ); - Ok(token::Amount::default()) + Ok(token::Amount::zero()) } else { Err(Error::from(TxError::BalanceTooLow( source.clone(), @@ -2178,7 +2366,7 @@ async fn check_balance_too_low_err< source, token ); - Ok(token::Amount::default()) + Ok(token::Amount::zero()) } else { Err(Error::from(TxError::NoBalanceForToken( source.clone(), diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 2f5bbe4ea7..98bf27bc6c 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -1051,13 +1051,16 @@ fn invalid_transactions() -> Result<()> { /// PoS bonding, unbonding and withdrawal tests. In this test we: /// /// 1. Run the ledger node with shorter epochs for faster progression -/// 2. Submit a self-bond for the genesis validator -/// 3. Submit a delegation to the genesis validator -/// 4. Submit an unbond of the self-bond -/// 5. Submit an unbond of the delegation -/// 6. Wait for the unbonding epoch -/// 7. Submit a withdrawal of the self-bond -/// 8. Submit a withdrawal of the delegation +/// 2. Submit a self-bond for the first genesis validator +/// 3. Submit a delegation to the first genesis validator +/// 4. Submit a re-delegation from the first to the second genesis validator +/// 5. Submit an unbond of the self-bond +/// 6. Submit an unbond of the delegation from the first validator +/// 7. Submit an unbond of the re-delegation from the second validator +/// 8. Wait for the unbonding epoch +/// 9. Submit a withdrawal of the self-bond +/// 10. Submit a withdrawal of the delegation +/// 11. Submit an withdrawal of the re-delegation #[test] fn pos_bonds() -> Result<()> { let pipeline_len = 2; @@ -1075,11 +1078,17 @@ fn pos_bonds() -> Result<()> { unbonding_len, ..genesis.pos_params }; - GenesisConfig { + let genesis = GenesisConfig { parameters, pos_params, ..genesis - } + }; + let mut genesis = + setup::set_validators(2, genesis, default_port_offset); + // Remove stake from the 2nd validator so chain can run with a + // single node + genesis.validator.get_mut("validator-1").unwrap().tokens = None; + genesis }, None, )?; @@ -1093,13 +1102,13 @@ fn pos_bonds() -> Result<()> { ); // 1. Run the ledger node - let _bg_ledger = + let _bg_validator_0 = start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); - // 2. Submit a self-bond for the genesis validator + // 2. Submit a self-bond for the first genesis validator let tx_args = vec![ "bond", "--validator", @@ -1109,7 +1118,7 @@ fn pos_bonds() -> Result<()> { "--signing-keys", "validator-0-account-key", "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; @@ -1117,7 +1126,7 @@ fn pos_bonds() -> Result<()> { client.exp_string("Transaction is valid.")?; client.assert_success(); - // 3. Submit a delegation to the genesis validator + // 3. Submit a delegation to the first genesis validator let tx_args = vec![ "bond", "--validator", @@ -1129,14 +1138,35 @@ fn pos_bonds() -> Result<()> { "--signing-keys", BERTHA_KEY, "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); - // 4. Submit an unbond of the self-bond + // 4. Submit a re-delegation from the first to the second genesis validator + let tx_args = vec![ + "redelegate", + "--source-validator", + "validator-0", + "--destination-validator", + "validator-1", + "--owner", + BERTHA, + "--amount", + "2500.0", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_0_rpc, + ]; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // 5. Submit an unbond of the self-bond let tx_args = vec![ "unbond", "--validator", @@ -1146,7 +1176,7 @@ fn pos_bonds() -> Result<()> { "--signing-keys", "validator-0-account-key", "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; @@ -1154,7 +1184,7 @@ fn pos_bonds() -> Result<()> { .exp_string("Amount 5100.000000 withdrawable starting from epoch ")?; client.assert_success(); - // 5. Submit an unbond of the delegation + // 6. Submit an unbond of the delegation from the first validator let tx_args = vec![ "unbond", "--validator", @@ -1162,22 +1192,41 @@ fn pos_bonds() -> Result<()> { "--source", BERTHA, "--amount", - "3200.", + "1600.", "--signing-keys", BERTHA_KEY, "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - let expected = "Amount 3200.000000 withdrawable starting from epoch "; + let expected = "Amount 1600.000000 withdrawable starting from epoch "; + let _ = client.exp_regex(&format!("{expected}.*\n"))?; + client.assert_success(); + + // 7. Submit an unbond of the re-delegation from the second validator + let tx_args = vec![ + "unbond", + "--validator", + "validator-1", + "--source", + BERTHA, + "--amount", + "1600.", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_0_rpc, + ]; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + let expected = "Amount 1600.000000 withdrawable starting from epoch "; let (_unread, matched) = client.exp_regex(&format!("{expected}.*\n"))?; let epoch_raw = matched.trim().split_once(expected).unwrap().1; let delegation_withdrawable_epoch = Epoch::from_str(epoch_raw).unwrap(); client.assert_success(); - // 6. Wait for the delegation withdrawable epoch (the self-bond was unbonded + // 8. Wait for the delegation withdrawable epoch (the self-bond was unbonded // before it) - let epoch = get_epoch(&test, &validator_one_rpc)?; + let epoch = get_epoch(&test, &validator_0_rpc)?; println!( "Current epoch: {}, earliest epoch for withdrawal: {}", @@ -1192,13 +1241,13 @@ fn pos_bonds() -> Result<()> { delegation_withdrawable_epoch ); } - let epoch = epoch_sleep(&test, &validator_one_rpc, 40)?; + let epoch = epoch_sleep(&test, &validator_0_rpc, 40)?; if epoch >= delegation_withdrawable_epoch { break; } } - // 7. Submit a withdrawal of the self-bond + // 9. Submit a withdrawal of the self-bond let tx_args = vec![ "withdraw", "--validator", @@ -1206,7 +1255,7 @@ fn pos_bonds() -> Result<()> { "--signing-keys", "validator-0-account-key", "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; @@ -1214,7 +1263,7 @@ fn pos_bonds() -> Result<()> { client.exp_string("Transaction is valid.")?; client.assert_success(); - // 8. Submit a withdrawal of the delegation + // 10. Submit a withdrawal of the delegation let tx_args = vec![ "withdraw", "--validator", @@ -1224,12 +1273,30 @@ fn pos_bonds() -> Result<()> { "--signing-keys", BERTHA_KEY, "--node", - &validator_one_rpc, + &validator_0_rpc, + ]; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // 11. Submit an withdrawal of the re-delegation + let tx_args = vec![ + "withdraw", + "--validator", + "validator-1", + "--source", + BERTHA, + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); + Ok(()) } diff --git a/tests/src/native_vp/pos.rs b/tests/src/native_vp/pos.rs index 344a75d4e3..3715804b66 100644 --- a/tests/src/native_vp/pos.rs +++ b/tests/src/native_vp/pos.rs @@ -97,7 +97,7 @@ use namada::ledger::pos::namada_proof_of_stake::init_genesis; use namada::proof_of_stake::parameters::PosParams; -use namada::proof_of_stake::storage::GenesisValidator; +use namada::proof_of_stake::types::GenesisValidator; use namada::types::storage::Epoch; use crate::tx::tx_host_env; @@ -572,8 +572,7 @@ pub mod testing { use namada::proof_of_stake::epoched::DynEpochOffset; use namada::proof_of_stake::parameters::testing::arb_rate; use namada::proof_of_stake::parameters::PosParams; - use namada::proof_of_stake::storage::BondId; - use namada::proof_of_stake::types::ValidatorState; + use namada::proof_of_stake::types::{BondId, ValidatorState}; use namada::proof_of_stake::{ get_num_consensus_validators, read_pos_params, unbond_handle, ADDRESS as POS_ADDRESS, @@ -1033,7 +1032,7 @@ pub mod testing { // .sum() // }) // .unwrap_or_default(); - let token_delta = token::Change::default(); + let token_delta = token::Change::zero(); vec![ PosStorageChange::WithdrawUnbond { owner, validator }, @@ -1150,7 +1149,7 @@ pub mod testing { // last // update, until we unbond the full // amount let mut bond_epoch = // u64::from(bonds.last_update()) + params.unbonding_len; - // 'outer: while to_unbond != token::Amount::default() + // 'outer: while to_unbond != token::Amount::zero() // && bond_epoch >= bonds.last_update().into() // { // if let Some(bond) = bonds.get_delta_at_epoch(bond_epoch) diff --git a/tx_prelude/src/proof_of_stake.rs b/tx_prelude/src/proof_of_stake.rs index cc8bcb7b63..36584a4d19 100644 --- a/tx_prelude/src/proof_of_stake.rs +++ b/tx_prelude/src/proof_of_stake.rs @@ -7,15 +7,15 @@ use namada_core::types::{key, token}; pub use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::{ become_validator, bond_tokens, change_validator_commission_rate, - read_pos_params, unbond_tokens, unjail_validator, withdraw_tokens, - BecomeValidator, + read_pos_params, redelegate_tokens, unbond_tokens, unjail_validator, + withdraw_tokens, BecomeValidator, }; -pub use namada_proof_of_stake::{parameters, types}; +pub use namada_proof_of_stake::{parameters, types, ResultSlashing}; use super::*; impl Ctx { - /// NEW: Self-bond tokens to a validator when `source` is `None` or equal to + /// Self-bond tokens to a validator when `source` is `None` or equal to /// the `validator` address, or delegate tokens from the `source` to the /// `validator`. pub fn bond_tokens( @@ -28,7 +28,7 @@ impl Ctx { bond_tokens(self, source, validator, amount, current_epoch) } - /// NEW: Unbond self-bonded tokens from a validator when `source` is `None` + /// Unbond self-bonded tokens from a validator when `source` is `None` /// or equal to the `validator` address, or unbond delegated tokens from /// the `source` to the `validator`. pub fn unbond_tokens( @@ -36,12 +36,12 @@ impl Ctx { source: Option<&Address>, validator: &Address, amount: token::Amount, - ) -> TxResult { + ) -> EnvResult { let current_epoch = self.get_block_epoch()?; - unbond_tokens(self, source, validator, amount, current_epoch) + unbond_tokens(self, source, validator, amount, current_epoch, false) } - /// NEW: Withdraw unbonded tokens from a self-bond to a validator when + /// Withdraw unbonded tokens from a self-bond to a validator when /// `source` is `None` or equal to the `validator` address, or withdraw /// unbonded tokens delegated to the `validator` to the `source`. pub fn withdraw_tokens( @@ -53,7 +53,7 @@ impl Ctx { withdraw_tokens(self, source, validator, current_epoch) } - /// NEW: Change validator commission rate. + /// Change validator commission rate. pub fn change_validator_commission_rate( &mut self, validator: &Address, @@ -69,7 +69,26 @@ impl Ctx { unjail_validator(self, validator, current_epoch) } - /// NEW: Attempt to initialize a validator account. On success, returns the + /// Redelegate bonded tokens from one validator to another one. + pub fn redelegate_tokens( + &mut self, + owner: &Address, + src_validator: &Address, + dest_validator: &Address, + amount: token::Amount, + ) -> TxResult { + let current_epoch = self.get_block_epoch()?; + redelegate_tokens( + self, + owner, + src_validator, + dest_validator, + current_epoch, + amount, + ) + } + + /// Attempt to initialize a validator account. On success, returns the /// initialized validator account's address. pub fn init_validator( &mut self, diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index 81f07ba049..dfc46004db 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -20,6 +20,7 @@ tx_ibc = ["namada_tx_prelude"] tx_init_account = ["namada_tx_prelude"] tx_init_proposal = ["namada_tx_prelude"] tx_init_validator = ["namada_tx_prelude"] +tx_redelegate = ["namada_tx_prelude"] tx_reveal_pk = ["namada_tx_prelude"] tx_transfer = ["namada_tx_prelude"] tx_unbond = ["namada_tx_prelude"] diff --git a/wasm/wasm_source/Makefile b/wasm/wasm_source/Makefile index 7b00424baf..e78237c89d 100644 --- a/wasm/wasm_source/Makefile +++ b/wasm/wasm_source/Makefile @@ -12,6 +12,7 @@ wasms += tx_ibc wasms += tx_init_account wasms += tx_init_proposal wasms += tx_init_validator +wasms += tx_redelegate wasms += tx_reveal_pk wasms += tx_transfer wasms += tx_unbond diff --git a/wasm/wasm_source/src/lib.rs b/wasm/wasm_source/src/lib.rs index d376f8ca70..139835fe9f 100644 --- a/wasm/wasm_source/src/lib.rs +++ b/wasm/wasm_source/src/lib.rs @@ -12,6 +12,8 @@ pub mod tx_init_account; pub mod tx_init_proposal; #[cfg(feature = "tx_init_validator")] pub mod tx_init_validator; +#[cfg(feature = "tx_redelegate")] +pub mod tx_redelegate; #[cfg(feature = "tx_resign_steward")] pub mod tx_resign_steward; #[cfg(feature = "tx_reveal_pk")] diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 3453747161..b792fdd990 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -17,7 +17,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { mod tests { use std::collections::BTreeSet; - use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; + use namada::ledger::pos::{PosParams, PosVP}; + use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; use namada::proof_of_stake::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, @@ -37,7 +38,6 @@ mod tests { use namada_tx_prelude::key::RefTo; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; use namada_tx_prelude::token; - use namada_vp_prelude::proof_of_stake::WeightedValidator; use proptest::prelude::*; use super::*; @@ -68,7 +68,7 @@ mod tests { ) -> TxResult { // Remove the validator stake threshold for simplicity let pos_params = PosParams { - validator_stake_threshold: token::Amount::default(), + validator_stake_threshold: token::Amount::zero(), ..pos_params }; @@ -138,15 +138,12 @@ mod tests { &pos_params, Epoch(epoch), )?); - epoched_validator_stake_pre.push( - read_validator_stake( - ctx(), - &pos_params, - &bond.validator, - Epoch(epoch), - )? - .unwrap(), - ); + epoched_validator_stake_pre.push(read_validator_stake( + ctx(), + &pos_params, + &bond.validator, + Epoch(epoch), + )?); epoched_validator_set_pre.push( read_consensus_validator_set_addresses_with_stake( ctx(), @@ -171,15 +168,12 @@ mod tests { &pos_params, Epoch(epoch), )?); - epoched_validator_stake_post.push( - read_validator_stake( - ctx(), - &pos_params, - &bond.validator, - Epoch(epoch), - )? - .unwrap(), - ); + epoched_validator_stake_post.push(read_validator_stake( + ctx(), + &pos_params, + &bond.validator, + Epoch(epoch), + )?); epoched_validator_set_post.push( read_consensus_validator_set_addresses_with_stake( ctx(), @@ -269,13 +263,6 @@ mod tests { let bonds_post = bond_handle(&bond_src, &bond.validator); // let bonds_post = ctx().read_bond(&bond_id)?.unwrap(); - for epoch in 0..pos_params.unbonding_len { - dbg!( - epoch, - bonds_post.get_delta_val(ctx(), Epoch(epoch), &pos_params)? - ); - } - if is_delegation { // A delegation is applied at pipeline offset // Check that bond is empty before pipeline offset @@ -290,7 +277,7 @@ mod tests { } // Check that bond is updated after the pipeline length for epoch in pos_params.pipeline_len..=pos_params.unbonding_len { - let expected_bond_amount = bond.amount.change(); + let expected_bond_amount = bond.amount; let bond = bonds_post.get_sum(ctx(), Epoch(epoch), &pos_params)?; assert_eq!( @@ -305,7 +292,7 @@ mod tests { // Check that a bond already exists from genesis with initial stake // for the validator for epoch in 0..pos_params.pipeline_len { - let expected_bond_amount = initial_stake.change(); + let expected_bond_amount = initial_stake; let bond = bonds_post .get_sum(ctx(), Epoch(epoch), &pos_params) .expect("Genesis validator should already have self-bond"); @@ -323,7 +310,7 @@ mod tests { bonds_post.get_sum(ctx(), Epoch(epoch), &pos_params)?; assert_eq!( bond, - Some(expected_bond_amount.change()), + Some(expected_bond_amount), "Self-bond at and after pipeline offset should contain \ genesis stake and the bonded amount - checking epoch \ {epoch}" diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index c1e1b35226..29f8c58364 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -20,6 +20,7 @@ mod tests { use std::cmp; use namada::ledger::pos::{PosParams, PosVP}; + use namada::proof_of_stake::types::GenesisValidator; use namada::proof_of_stake::validator_commission_rate_handle; use namada::types::dec::{Dec, POS_DECIMAL_PRECISION}; use namada::types::storage::Epoch; @@ -33,7 +34,6 @@ mod tests { use namada_tx_prelude::key::RefTo; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; use namada_tx_prelude::token; - use namada_vp_prelude::proof_of_stake::GenesisValidator; use proptest::prelude::*; use super::*; diff --git a/wasm/wasm_source/src/tx_redelegate.rs b/wasm/wasm_source/src/tx_redelegate.rs new file mode 100644 index 0000000000..12d6bd8549 --- /dev/null +++ b/wasm/wasm_source/src/tx_redelegate.rs @@ -0,0 +1,409 @@ +//! A tx for a delegator (non-validator bond owner) to redelegate bonded tokens +//! from one validator to another. + +use namada_tx_prelude::*; + +#[transaction(gas = 460000)] +fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { + let signed = tx_data; + let data = signed.data().ok_or_err_msg("Missing data")?; + let transaction::pos::Redelegation { + src_validator, + dest_validator, + owner, + amount, + } = transaction::pos::Redelegation::try_from_slice(&data[..]) + .wrap_err("failed to decode a Redelegation")?; + ctx.redelegate_tokens(&owner, &src_validator, &dest_validator, amount) +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use namada::ledger::pos::{PosParams, PosVP}; + use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; + use namada::proof_of_stake::{ + bond_handle, read_consensus_validator_set_addresses_with_stake, + read_total_stake, read_validator_stake, unbond_handle, + }; + use namada::types::dec::Dec; + use namada::types::storage::Epoch; + use namada_tests::log::test; + use namada_tests::native_vp::pos::init_pos; + use namada_tests::native_vp::TestNativeVpEnv; + use namada_tests::tx::*; + use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::chain::ChainId; + use namada_tx_prelude::key::testing::arb_common_keypair; + use namada_tx_prelude::key::RefTo; + use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; + use namada_tx_prelude::token; + use proptest::prelude::*; + + use super::*; + + proptest! { + /// In this test we setup the ledger and PoS system with an arbitrary + /// initial state with 1 genesis validator, a delegation bond if the + /// unbond is for a delegation, arbitrary PoS parameters, and + /// we generate an arbitrary unbond that we'd like to apply. + /// + /// After we apply the unbond, we check that all the storage values + /// in PoS system have been updated as expected and then we also check + /// that this transaction is accepted by the PoS validity predicate. + #[test] + fn test_tx_redelegate( + (initial_stake, redelegation) in arb_initial_stake_and_redelegation(), + // A key to sign the transaction + key in arb_common_keypair(), + pos_params in arb_pos_params(None)) { + test_tx_redelegate_aux(initial_stake, redelegation, key, pos_params).unwrap() + } + } + + // TODO: more assertions needed!! + fn test_tx_redelegate_aux( + initial_stake: token::Amount, + redelegation: transaction::pos::Redelegation, + key: key::common::SecretKey, + pos_params: PosParams, + ) -> TxResult { + // Remove the validator stake threshold for simplicity + let pos_params = PosParams { + validator_stake_threshold: token::Amount::zero(), + ..pos_params + }; + dbg!(&initial_stake, &redelegation); + + let consensus_key_1 = key::testing::keypair_1().ref_to(); + let consensus_key_2 = key::testing::keypair_2().ref_to(); + let eth_cold_key = key::testing::keypair_3().ref_to(); + let eth_hot_key = key::testing::keypair_4().ref_to(); + let commission_rate = Dec::new(5, 2).expect("Cannot fail"); + let max_commission_rate_change = Dec::new(1, 2).expect("Cannot fail"); + + let genesis_validators = [ + GenesisValidator { + address: redelegation.src_validator.clone(), + tokens: token::Amount::zero(), + consensus_key: consensus_key_1, + eth_cold_key: eth_cold_key.clone(), + eth_hot_key: eth_hot_key.clone(), + commission_rate, + max_commission_rate_change, + }, + GenesisValidator { + address: redelegation.dest_validator.clone(), + tokens: token::Amount::zero(), + consensus_key: consensus_key_2, + eth_cold_key, + eth_hot_key, + commission_rate, + max_commission_rate_change, + }, + ]; + + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + + let native_token = tx_host_env::with(|tx_env| { + let native_token = tx_env.wl_storage.storage.native_token.clone(); + let owner = &redelegation.owner; + tx_env.spawn_accounts([owner]); + + // First, credit the delegator with the initial stake, + // before we initialize the bond below + tx_env.credit_tokens(owner, &native_token, initial_stake); + native_token + }); + + // Create the initial bond. + ctx().bond_tokens( + Some(&redelegation.owner), + &redelegation.src_validator, + initial_stake, + )?; + tx_host_env::commit_tx_and_block(); + + let tx_code = vec![]; + let tx_data = redelegation.try_to_vec().unwrap(); + let mut tx = Tx::new(ChainId::default(), None); + tx.add_code(tx_code) + .add_serialized_data(tx_data) + .sign_wrapper(key); + let signed_tx = tx; + + // Check that PoS balance is the same as the initial validator stake + let pos_balance_key = token::balance_key( + &native_token, + &Address::Internal(InternalAddress::PoS), + ); + let pos_balance_pre: token::Amount = ctx() + .read(&pos_balance_key)? + .expect("PoS must have balance"); + assert_eq!(pos_balance_pre, initial_stake); + + let mut epoched_total_stake_pre: Vec = Vec::new(); + let mut epoched_src_validator_stake_pre: Vec = + Vec::new(); + let mut epoched_dest_validator_stake_pre: Vec = + Vec::new(); + let mut epoched_src_bonds_pre: Vec> = Vec::new(); + let mut epoched_dest_bonds_pre: Vec> = Vec::new(); + let mut epoched_validator_set_pre: Vec> = + Vec::new(); + + for epoch in 0..=pos_params.withdrawable_epoch_offset() { + epoched_total_stake_pre.push(read_total_stake( + ctx(), + &pos_params, + Epoch(epoch), + )?); + epoched_src_validator_stake_pre.push(read_validator_stake( + ctx(), + &pos_params, + &redelegation.src_validator, + Epoch(epoch), + )?); + epoched_dest_validator_stake_pre.push(read_validator_stake( + ctx(), + &pos_params, + &redelegation.dest_validator, + Epoch(epoch), + )?); + epoched_src_bonds_pre.push( + bond_handle(&redelegation.owner, &redelegation.src_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + ); + epoched_dest_bonds_pre.push( + bond_handle(&redelegation.owner, &redelegation.src_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + ); + epoched_validator_set_pre.push( + read_consensus_validator_set_addresses_with_stake( + ctx(), + Epoch(epoch), + )?, + ); + } + + // Apply the redelegation tx + apply_tx(ctx(), signed_tx)?; + + // Read the data after the redelegation tx is executed. + // The following storage keys should be updated: + // - `#{PoS}/validator/#{validator}/deltas` + // - `#{PoS}/total_deltas` + // - `#{PoS}/validator_set` + + let mut epoched_src_bonds_post: Vec> = Vec::new(); + let mut epoched_dest_bonds_post: Vec> = + Vec::new(); + for epoch in 0..=pos_params.unbonding_len { + epoched_src_bonds_post.push( + bond_handle(&redelegation.owner, &redelegation.src_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + ); + epoched_dest_bonds_post.push( + bond_handle(&redelegation.owner, &redelegation.dest_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + ); + } + + // Before pipeline offset, there can only be self-bond for genesis + // validator. In case of a delegation the state is setup so that there + // is no bond until pipeline offset. + for epoch in 0..pos_params.pipeline_len { + assert_eq!( + read_validator_stake( + ctx(), + &pos_params, + &redelegation.src_validator, + Epoch(epoch) + )?, + token::Amount::zero(), + "The validator stake before the pipeline offset must be 0 - \ + checking in epoch: {epoch}" + ); + assert_eq!( + read_validator_stake( + ctx(), + &pos_params, + &redelegation.dest_validator, + Epoch(epoch) + )?, + token::Amount::zero(), + "The validator stake before the pipeline offset must be 0 - \ + checking in epoch: {epoch}" + ); + assert_eq!( + read_total_stake(ctx(), &pos_params, Epoch(epoch))?, + token::Amount::zero(), + "The total stake before the pipeline offset must be 0 - \ + checking in epoch: {epoch}" + ); + assert_eq!( + epoched_validator_set_pre[epoch as usize], + read_consensus_validator_set_addresses_with_stake( + ctx(), + Epoch(epoch), + )?, + "Validator set before pipeline offset must not change - \ + checking epoch {epoch}" + ); + } + + // Check stakes after the pipeline length + for epoch in + pos_params.pipeline_len..=pos_params.withdrawable_epoch_offset() + { + assert_eq!( + read_validator_stake( + ctx(), + &pos_params, + &redelegation.src_validator, + Epoch(epoch) + )?, + initial_stake - redelegation.amount, + "The validator stake at and after the pipeline offset must \ + have changed - checking in epoch: {epoch}" + ); + assert_eq!( + read_validator_stake( + ctx(), + &pos_params, + &redelegation.dest_validator, + Epoch(epoch) + )?, + redelegation.amount, + "The validator stake at and after the pipeline offset must \ + have changed - checking in epoch: {epoch}" + ); + assert_eq!( + read_total_stake(ctx(), &pos_params, Epoch(epoch))?, + initial_stake, + "The total stake at and after the pipeline offset must have \ + changed - checking in epoch: {epoch}" + ); + } + // Check validator sets + assert_eq!( + BTreeSet::from_iter([ + WeightedValidator { + bonded_stake: initial_stake - redelegation.amount, + address: redelegation.src_validator.clone() + }, + WeightedValidator { + bonded_stake: redelegation.amount, + address: redelegation.dest_validator.clone() + } + ]), + read_consensus_validator_set_addresses_with_stake( + ctx(), + Epoch(pos_params.pipeline_len), + )?, + "The validator set at pipeline offset should have changed" + ); + + // Check that PoS account balance is unchanged by the redelegation + let pos_balance_post: token::Amount = + ctx().read(&pos_balance_key)?.unwrap(); + assert_eq!( + pos_balance_pre, pos_balance_post, + "Unbonding should not affect PoS system balance" + ); + + // Check that no unbonds exist + assert!( + unbond_handle(&redelegation.owner, &redelegation.src_validator) + .is_empty(ctx())? + ); + assert!( + unbond_handle(&redelegation.owner, &redelegation.dest_validator) + .is_empty(ctx())? + ); + + // Check bonds + for epoch in 0..pos_params.withdrawable_epoch_offset() { + let (exp_src_bond, exp_dest_bond) = + if epoch == pos_params.pipeline_len { + ( + Some(initial_stake - redelegation.amount), + Some(redelegation.amount), + ) + } else { + (None, None) + }; + + assert_eq!( + bond_handle(&redelegation.owner, &redelegation.src_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + exp_src_bond, + "After the tx is applied, the bond should be changed in \ + place, checking epoch {epoch}" + ); + assert_eq!( + bond_handle(&redelegation.owner, &redelegation.dest_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + exp_dest_bond, + "After the tx is applied, the bond should be changed in \ + place, checking epoch {epoch}" + ); + } + + // Use the tx_env to run PoS VP + let tx_env = tx_host_env::take(); + let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); + let result = vp_env.validate_tx(PosVP::new); + let result = + result.expect("Validation of valid changes must not fail!"); + assert!( + result, + "PoS Validity predicate must accept this transaction" + ); + Ok(()) + } + + /// Generates an initial validator stake and a redelegation, while making + /// sure that the `initial_stake >= redelegation.amount`. + fn arb_initial_stake_and_redelegation() + -> impl Strategy + { + // Generate initial stake + token::testing::arb_amount_ceiled((i64::MAX / 8) as u64).prop_flat_map( + |initial_stake| { + // Use the initial stake to limit the bond amount + let redelegation = arb_redelegation( + u128::try_from(initial_stake).unwrap() as u64, + ); + // Use the generated initial stake too too + (Just(initial_stake), redelegation) + }, + ) + } + + /// Generates an arbitrary redelegation, with the amount constrained from + /// above. + fn arb_redelegation( + max_amount: u64, + ) -> impl Strategy { + ( + address::testing::arb_established_address(), + address::testing::arb_established_address(), + address::testing::arb_non_internal_address(), + token::testing::arb_amount_non_zero_ceiled(max_amount), + ) + .prop_map( + |(src_validator, dest_validator, owner, amount)| { + let src_validator = Address::Established(src_validator); + let dest_validator = Address::Established(dest_validator); + transaction::pos::Redelegation { + src_validator, + dest_validator, + owner, + amount, + } + }, + ) + } +} diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 7e08c0dcda..32ad502761 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -10,15 +10,22 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let unbond = transaction::pos::Unbond::try_from_slice(&data[..]) .wrap_err("failed to decode Unbond")?; - ctx.unbond_tokens(unbond.source.as_ref(), &unbond.validator, unbond.amount) + ctx.unbond_tokens( + unbond.source.as_ref(), + &unbond.validator, + unbond.amount, + )?; + // TODO: would using debug_log! be useful? + + Ok(()) } #[cfg(test)] mod tests { use std::collections::BTreeSet; - use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; - use namada::proof_of_stake::types::WeightedValidator; + use namada::ledger::pos::{PosParams, PosVP}; + use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; use namada::proof_of_stake::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, unbond_handle, @@ -66,7 +73,7 @@ mod tests { ) -> TxResult { // Remove the validator stake threshold for simplicity let pos_params = PosParams { - validator_stake_threshold: token::Amount::default(), + validator_stake_threshold: token::Amount::zero(), ..pos_params }; @@ -85,7 +92,7 @@ mod tests { tokens: if is_delegation { // If we're unbonding a delegation, we'll give the initial stake // to the delegation instead of the validator - token::Amount::default() + token::Amount::zero() } else { initial_stake }, @@ -113,8 +120,9 @@ mod tests { native_token }); - // Initialize the delegation if it is the case - unlike genesis - // validator's self-bond, this happens at pipeline offset + // If delegation, initialize the bond with a delegation from the unbond + // source, which will become active at pipeline offset. If a self-bond, + // the bond is already active from genesis. if is_delegation { ctx().bond_tokens( unbond.source.as_ref(), @@ -136,11 +144,8 @@ mod tests { .source .clone() .unwrap_or_else(|| unbond.validator.clone()); - // let unbond_id = BondId { - // validator: unbond.validator.clone(), - // source: unbond_src.clone(), - // }; + // Check that PoS balance is the same as the initial validator stake let pos_balance_key = token::balance_key( &native_token, &Address::Internal(InternalAddress::PoS), @@ -158,26 +163,20 @@ mod tests { let mut epoched_validator_set_pre: Vec> = Vec::new(); - for epoch in 0..=pos_params.unbonding_len { + for epoch in 0..=pos_params.withdrawable_epoch_offset() { epoched_total_stake_pre.push(read_total_stake( ctx(), &pos_params, Epoch(epoch), )?); - epoched_validator_stake_pre.push( - read_validator_stake( - ctx(), - &pos_params, - &unbond.validator, - Epoch(epoch), - )? - .unwrap(), - ); - epoched_bonds_pre.push( - bond_handle - .get_delta_val(ctx(), Epoch(epoch), &pos_params)? - .map(token::Amount::from_change), - ); + epoched_validator_stake_pre.push(read_validator_stake( + ctx(), + &pos_params, + &unbond.validator, + Epoch(epoch), + )?); + epoched_bonds_pre + .push(bond_handle.get_delta_val(ctx(), Epoch(epoch))?); epoched_validator_set_pre.push( read_consensus_validator_set_addresses_with_stake( ctx(), @@ -185,31 +184,25 @@ mod tests { )?, ); } - // dbg!(&epoched_bonds_pre); // Apply the unbond tx apply_tx(ctx(), signed_tx)?; - // Read the data after the tx is executed. + // Read the data after the unbond tx is executed. // The following storage keys should be updated: - // - `#{PoS}/validator/#{validator}/deltas` // - `#{PoS}/total_deltas` // - `#{PoS}/validator_set` - let mut epoched_bonds_post: Vec> = Vec::new(); + let mut epoched_bonds_post: Vec> = Vec::new(); for epoch in 0..=pos_params.unbonding_len { - epoched_bonds_post.push( - bond_handle - .get_delta_val(ctx(), Epoch(epoch), &pos_params)? - .map(token::Amount::from_change), - ); + epoched_bonds_post + .push(bond_handle.get_delta_val(ctx(), Epoch(epoch))?); } - // dbg!(&epoched_bonds_post); let expected_amount_before_pipeline = if is_delegation { // When this is a delegation, there will be no bond until pipeline - token::Amount::default() + token::Amount::zero() } else { // Before pipeline offset, there can only be self-bond initial_stake @@ -226,7 +219,7 @@ mod tests { &unbond.validator, Epoch(epoch) )?, - Some(expected_amount_before_pipeline), + expected_amount_before_pipeline, "The validator deltas before the pipeline offset must not \ change - checking in epoch: {epoch}" ); @@ -249,7 +242,9 @@ mod tests { // At and after pipeline offset, there can be either delegation or // self-bond, both of which are initialized to the same `initial_stake` - for epoch in pos_params.pipeline_len..pos_params.unbonding_len { + for epoch in + pos_params.pipeline_len..=pos_params.withdrawable_epoch_offset() + { assert_eq!( read_validator_stake( ctx(), @@ -257,16 +252,17 @@ mod tests { &unbond.validator, Epoch(epoch) )?, - Some(initial_stake - unbond.amount), - "The validator deltas at and after the pipeline offset must \ + initial_stake - unbond.amount, + "The validator stake at and after the pipeline offset must \ have changed - checking in epoch: {epoch}" ); assert_eq!( read_total_stake(ctx(), &pos_params, Epoch(epoch))?, (initial_stake - unbond.amount), - "The total deltas at and after the pipeline offset must have \ + "The total stake at and after the pipeline offset must have \ changed - checking in epoch: {epoch}" ); + // Only at pipeline because the read won't return anything after if epoch == pos_params.pipeline_len { assert_ne!( epoched_validator_set_pre[epoch as usize], @@ -280,59 +276,16 @@ mod tests { } } - { - let epoch = pos_params.unbonding_len + 1; - let expected_stake = - initial_stake.change() - unbond.amount.change(); - assert_eq!( - read_validator_stake( - ctx(), - &pos_params, - &unbond.validator, - Epoch(epoch) - )? - .map(|v| v.change()), - Some(expected_stake), - "The total deltas at after the unbonding offset epoch must be \ - decremented by the unbonded amount - checking in epoch: \ - {epoch}" - ); - assert_eq!( - read_total_stake(ctx(), &pos_params, Epoch(epoch))?.change(), - expected_stake, - "The total deltas at after the unbonding offset epoch must be \ - decremented by the unbonded amount - checking in epoch: \ - {epoch}" - ); - } - - // - `#{staking_token}/balance/#{PoS}` // Check that PoS account balance is unchanged by unbond let pos_balance_post: token::Amount = ctx().read(&pos_balance_key)?.unwrap(); assert_eq!( pos_balance_pre, pos_balance_post, - "Unbonding doesn't affect PoS system balance" + "Unbonding should not affect PoS system balance" ); - // - `#{PoS}/unbond/#{owner}/#{validator}` // Check that the unbond doesn't exist until unbonding offset - - // Outer epoch is end (withdrawable), inner epoch is beginning of let unbond_handle = unbond_handle(&unbond_src, &unbond.validator); - - // let unbonds_post = ctx().read_unbond(&unbond_id)?.unwrap(); - // let bonds_post = ctx().read_bond(&unbond_id)?.unwrap(); - - for epoch in 0..(pos_params.pipeline_len + pos_params.unbonding_len) { - let unbond = unbond_handle.at(&Epoch(epoch)); - - assert!( - unbond.is_empty(ctx())?, - "There should be no unbond until unbonding offset - checking \ - epoch {epoch}" - ); - } let start_epoch = if is_delegation { // This bond was a delegation Epoch::from(pos_params.pipeline_len) @@ -340,62 +293,42 @@ mod tests { // This bond was a genesis validator self-bond Epoch::default() }; - // let end_epoch = Epoch::from(pos_params.unbonding_len - 1); - - // let expected_unbond = if unbond.amount == token::Amount::default() { - // HashMap::new() - // } else { - // HashMap::from_iter([((start_epoch, end_epoch), unbond.amount)]) - // }; + let withdrawable_epoch = pos_params.withdrawable_epoch_offset(); + for epoch in 0..withdrawable_epoch { + assert!( + unbond_handle + .at(&start_epoch) + .get(ctx(), &Epoch(epoch))? + .is_none(), + "There should be no unbond until the withdrawable offset - \ + checking epoch {epoch}" + ); + } // Ensure that the unbond is structured as expected, withdrawable at // pipeline + unbonding + cubic_slash_window offsets let actual_unbond_amount = unbond_handle - .at(&Epoch::from( - pos_params.pipeline_len - + pos_params.unbonding_len - + pos_params.cubic_slashing_window_length, - )) - .get(ctx(), &start_epoch)?; + .at(&start_epoch) + .get(ctx(), &Epoch(withdrawable_epoch))?; assert_eq!( actual_unbond_amount, Some(unbond.amount), - "Delegation at pipeline + unbonding offset should be equal to the \ - unbonded amount" + "Delegation at pipeline + unbonding + cubic window offset should \ + be equal to the unbonded amount" ); - for epoch in start_epoch.0 - ..(pos_params.pipeline_len - + pos_params.unbonding_len - + pos_params.cubic_slashing_window_length) - { + for epoch in start_epoch.0..pos_params.withdrawable_epoch_offset() { let bond_amount = bond_handle.get_sum(ctx(), Epoch(epoch), &pos_params)?; let expected_amount = initial_stake - unbond.amount; assert_eq!( bond_amount, - Some(expected_amount.change()), + Some(expected_amount), "After the tx is applied, the bond should be changed in \ place, checking epoch {epoch}" ); } - // { - // let epoch = pos_params.unbonding_len + 1; - // let bond: Bond = bonds_post.get(epoch).unwrap(); - // let expected_bond = - // HashMap::from_iter([(start_epoch, initial_stake)]); - // assert_eq!( - // bond.pos_deltas, expected_bond, - // "At unbonding offset, the pos deltas should not change, \ - // checking epoch {epoch}" - // ); - // assert_eq!( - // bond.neg_deltas, unbond.amount, - // "At unbonding offset, the unbonded amount should have been \ - // deducted, checking epoch {epoch}" - // ) - // } // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); @@ -410,6 +343,8 @@ mod tests { Ok(()) } + /// Generates an initial validator stake and a unbond, while making sure + /// that the `initial_stake >= unbond.amount`. fn arb_initial_stake_and_unbond() -> impl Strategy { // Generate initial stake @@ -424,8 +359,7 @@ mod tests { ) } - /// Generates an initial validator stake and a unbond, while making sure - /// that the `initial_stake >= unbond.amount`. + /// Generates an arbitrary unbond, with the amount constrained from above. fn arb_unbond( max_amount: u64, ) -> impl Strategy { diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index c8fa649c43..f8e804ef6f 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -12,7 +12,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let slashed = ctx.withdraw_tokens(withdraw.source.as_ref(), &withdraw.validator)?; - if slashed != token::Amount::default() { + if !slashed.is_zero() { debug_log!("New withdrawal slashed for {}", slashed.to_string_native()); } Ok(()) @@ -20,7 +20,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { - use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; + use namada::ledger::pos::{PosParams, PosVP}; + use namada::proof_of_stake::types::GenesisValidator; use namada::proof_of_stake::unbond_handle; use namada::types::dec::Dec; use namada::types::storage::Epoch; @@ -71,7 +72,7 @@ mod tests { ) -> TxResult { // Remove the validator stake threshold for simplicity let pos_params = PosParams { - validator_stake_threshold: token::Amount::default(), + validator_stake_threshold: token::Amount::zero(), ..pos_params }; @@ -89,7 +90,7 @@ mod tests { // If we're withdrawing a delegation, we'll give the initial // stake to the delegation instead of the // validator - token::Amount::default() + token::Amount::zero() } else { initial_stake }, @@ -193,7 +194,7 @@ mod tests { let handle = unbond_handle(&unbond_src, &withdraw.validator); let unbond_pre = - handle.at(&withdraw_epoch).get(ctx(), &bond_epoch).unwrap(); + handle.at(&bond_epoch).get(ctx(), &withdraw_epoch).unwrap(); assert_eq!(unbond_pre, Some(unbonded_amount)); From 2e407e44c53f1277dfa7f2a227f6bab57447c64e Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 4 Oct 2023 18:46:46 +0200 Subject: [PATCH 107/161] Replay protection column family and related methods in `DB` trait and `Storage` --- apps/src/lib/node/ledger/storage/rocksdb.rs | 106 ++++++++++++++++++++ core/src/ledger/storage/mockdb.rs | 59 +++++++++++ core/src/ledger/storage/mod.rs | 41 ++++++++ 3 files changed, 206 insertions(+) diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index 61eb2c32e5..2af790c339 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -12,6 +12,7 @@ //! epoch can start //! - `next_epoch_min_start_time`: minimum block time from which the next //! epoch can start +//! - `replay_protection`: hashes of the processed transactions //! - `pred`: predecessor values of the top-level keys of the same name //! - `tx_queue` //! - `next_epoch_min_start_height` @@ -32,6 +33,9 @@ //! - `epoch`: block epoch //! - `address_gen`: established address generator //! - `header`: block's header +//! - `replay_protection`: hashes of processed tx +//! - `all`: the hashes included up to the last block +//! - `last`: the hashes included in the last block use std::fs::File; use std::io::BufWriter; @@ -73,6 +77,7 @@ const SUBSPACE_CF: &str = "subspace"; const DIFFS_CF: &str = "diffs"; const STATE_CF: &str = "state"; const BLOCK_CF: &str = "block"; +const REPLAY_PROTECTION_CF: &str = "replay_protection"; /// RocksDB handle #[derive(Debug)] @@ -160,6 +165,22 @@ pub fn open( block_cf_opts.set_block_based_table_factory(&table_opts); cfs.push(ColumnFamilyDescriptor::new(BLOCK_CF, block_cf_opts)); + // for replay protection (read/insert-intensive) + let mut replay_protection_cf_opts = Options::default(); + replay_protection_cf_opts + .set_compression_type(rocksdb::DBCompressionType::Zstd); + replay_protection_cf_opts.set_compression_options(0, 0, 0, 1024 * 1024); //FIXME :review these values + replay_protection_cf_opts.set_level_compaction_dynamic_level_bytes(true); + // Prioritize minimizing read amplification + //FIXME: well in theory I never update keys, so probably I can never incour in read amplification (but probably not even in write aplification) + replay_protection_cf_opts + .set_compaction_style(rocksdb::DBCompactionStyle::Level); + replay_protection_cf_opts.set_block_based_table_factory(&table_opts); + cfs.push(ColumnFamilyDescriptor::new( + REPLAY_PROTECTION_CF, + replay_protection_cf_opts, + )); + rocksdb::DB::open_cf_descriptors(&db_opts, path, cfs) .map(RocksDB) .map_err(|e| Error::DBError(e.into_string())) @@ -305,6 +326,7 @@ impl RocksDB { self.dump_it(cf, Some(prefix.clone()), &mut file); // Block + //FIXME: shouldn't this be dumped even if not historic? let cf = self .get_column_family(BLOCK_CF) .expect("Block column family should exist"); @@ -353,6 +375,22 @@ impl RocksDB { self.dump_it(cf, None, &mut file); } + // replay protection + // Dump of replay protection keys is possible only at the last height or the previous one + if height == last_height { + //FIXME: review this (really need to dump replay prot? REally need the all prefix?) + + let cf = self + .get_column_family(REPLAY_PROTECTION_CF) + .expect("Replay protection column family should exist"); + self.dump_it(cf, None, &mut file); + } else if height == last_height - 1 { + let cf = self + .get_column_family(REPLAY_PROTECTION_CF) + .expect("Replay protection column family should exist"); + self.dump_it(cf, Some("all".to_string()), &mut file); + } + println!("Done writing to {}", full_path.to_string_lossy()); } @@ -449,6 +487,11 @@ impl RocksDB { tracing::info!("Removing last block results"); batch.delete_cf(block_cf, format!("results/{}", last_block.height)); + // Delete the tx hashes included in the last block + let reprot_cf = self.get_column_family(REPLAY_PROTECTION_CF)?; + tracing::info!("Removing replay protection hashes"); + batch.delete_cf(reprot_cf, "last".to_string()); + // Execute next step in parallel let batch = Mutex::new(batch); @@ -1055,6 +1098,29 @@ impl DB for RocksDB { Ok(Some((stored_height, merkle_tree_stores))) } + fn has_replay_protection_entry( + &self, + hash: &namada::types::hash::Hash, + ) -> Result { + let replay_protection_cf = + self.get_column_family(REPLAY_PROTECTION_CF)?; + + for prefix in ["last", "all"] { + let key = Key::parse(prefix) + .map_err(Error::KeyError)? + .push(&hash.to_string()) + .map_err(Error::KeyError)?; + if let Some(_) = self + .0 + .get_cf(replay_protection_cf, key.to_string()) + .map_err(|e| Error::DBError(e.into_string()))? + { + return Ok(true); + } + } + Ok(false) + } + fn read_subspace_val(&self, key: &Key) -> Result>> { let subspace_cf = self.get_column_family(SUBSPACE_CF)?; self.0 @@ -1341,6 +1407,46 @@ impl DB for RocksDB { None => Ok(()), } } + + fn write_replay_protection_entry( + &mut self, + batch: &mut Self::WriteBatch, + hash: &namada::types::hash::Hash, + ) -> Result<()> { + let replay_protection_cf = + self.get_column_family(REPLAY_PROTECTION_CF)?; + + let key = Key::parse("last") + .map_err(Error::KeyError)? + .push(&hash.to_string()) + .map_err(Error::KeyError)?; + + batch + .0 + .put_cf(replay_protection_cf, key.to_string(), vec![]); + + Ok(()) + } + + fn delete_replay_protection_entry( + &mut self, + batch: &mut Self::WriteBatch, + hash: &namada::types::hash::Hash, + ) -> Result<()> { + let replay_protection_cf = + self.get_column_family(REPLAY_PROTECTION_CF)?; + + for prefix in ["last", "all"] { + let key = Key::parse(prefix) + .map_err(Error::KeyError)? + .push(&hash.to_string()) + .map_err(Error::KeyError)?; + + batch.0.delete_cf(replay_protection_cf, key.to_string()) + } + + Ok(()) + } } impl<'iter> DBIter<'iter> for RocksDB { diff --git a/core/src/ledger/storage/mockdb.rs b/core/src/ledger/storage/mockdb.rs index 971584e742..52ddc51f14 100644 --- a/core/src/ledger/storage/mockdb.rs +++ b/core/src/ledger/storage/mockdb.rs @@ -14,6 +14,7 @@ use super::{ }; use crate::ledger::storage::types::{self, KVBytes, PrefixIterator}; use crate::types::ethereum_structs; +use crate::types::hash::Hash; #[cfg(feature = "ferveo-tpke")] use crate::types::internal::TxQueue; use crate::types::storage::{ @@ -413,6 +414,25 @@ impl DB for MockDB { Ok(Some((height, merkle_tree_stores))) } + //FIXME: I should dump the content of the last subey into all at the beginning of finalize block before writing any new hash into last. Do I need another function in this trait for that? + fn has_replay_protection_entry(&self, hash: &Hash) -> Result { + let prefix_key = + Key::parse("replay_protection").map_err(Error::KeyError)?; + for prefix in ["last", "all"] { + let key = prefix_key + .push(&prefix.to_string()) + .map_err(Error::KeyError)? + .push(&hash.to_string()) + .map_err(Error::KeyError)?; + + if self.0.borrow().contains_key(&key.to_string()) { + return Ok(true); + } + } + + Ok(false) + } + fn read_subspace_val(&self, key: &Key) -> Result>> { let key = Key::parse("subspace").map_err(Error::KeyError)?.join(key); Ok(self.0.borrow().get(&key.to_string()).cloned()) @@ -540,6 +560,45 @@ impl DB for MockDB { None => Ok(()), } } + + fn write_replay_protection_entry( + &mut self, + _batch: &mut Self::WriteBatch, + hash: &Hash, + ) -> Result<()> { + let key = Key::parse("replay_protection") + .map_err(Error::KeyError)? + .push(&"last".to_string()) + .map_err(Error::KeyError)? + .push(&hash.to_string()) + .map_err(Error::KeyError)?; + + match self.0.borrow_mut().insert(key.to_string(), vec![]) { + Some(_) => Err(Error::DBError(format!( + "Replay protection key {key} already in storage" + ))), + None => Ok(()), + } + } + + fn delete_replay_protection_entry( + &mut self, + _batch: &mut Self::WriteBatch, + hash: &Hash, + ) -> Result<()> { + let key = Key::parse("replay_protection").map_err(Error::KeyError)?; + for prefix in ["last", "all"] { + let key = key + .push(&prefix.to_string()) + .map_err(Error::KeyError)? + .push(&hash.to_string()) + .map_err(Error::KeyError)?; + + self.0.borrow_mut().remove(&key.to_string()); + } + + Ok(()) + } } impl<'iter> DBIter<'iter> for MockDB { diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 81be7e48a6..19f6fb3121 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -286,6 +286,9 @@ pub trait DB: std::fmt::Debug { height: BlockHeight, ) -> Result>; + /// Check if the given replay protection entry exists + fn has_replay_protection_entry(&self, hash: &Hash) -> Result; + /// Read the latest value for account subspace key from the DB fn read_subspace_val(&self, key: &Key) -> Result>>; @@ -353,6 +356,20 @@ pub trait DB: std::fmt::Debug { pruned_epoch: Epoch, pred_epochs: &Epochs, ) -> Result<()>; + + /// Write a replay protection entry + fn write_replay_protection_entry( + &mut self, + batch: &mut Self::WriteBatch, + hash: &Hash, + ) -> Result<()>; + + /// Delete a replay protection entry + fn delete_replay_protection_entry( + &mut self, + batch: &mut Self::WriteBatch, + hash: &Hash, + ) -> Result<()>; } /// A database prefix iterator. @@ -570,6 +587,7 @@ where /// Check if the given key is present in storage. Returns the result and the /// gas cost. pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { + //FIXME: remove all the ifs for replya protection keys if is_replay_protection_key(key) { // Replay protection keys are not included in the merkle // tree @@ -1121,6 +1139,29 @@ where .map(|b| b.height) .unwrap_or_default() } + + /// Check it the given transaction's hash is already present in storage + pub fn has_replay_protection_entry(&self, hash: &Hash) -> Result { + self.db.has_replay_protection_entry(hash) + } + + /// Write the provided tx hash to storage + pub fn write_replay_protection_entry( + &mut self, + batch: &mut D::WriteBatch, + hash: &Hash, + ) -> Result<()> { + self.db.write_replay_protection_entry(batch, hash) + } + + /// Delete the provided tx hash from storage + pub fn delete_replay_protection_entry( + &mut self, + batch: &mut D::WriteBatch, + hash: &Hash, + ) -> Result<()> { + self.db.delete_replay_protection_entry(batch, hash) + } } impl From for Error { From 58b424edaddfa776dabb0c597736750eacf63087 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 5 Oct 2023 18:03:35 +0200 Subject: [PATCH 108/161] Writes replay protection keys under separate storage root --- .../lib/node/ledger/shell/finalize_block.rs | 31 ++++++----- apps/src/lib/node/ledger/shell/mod.rs | 24 +++++---- .../lib/node/ledger/shell/prepare_proposal.rs | 7 +-- .../lib/node/ledger/shell/process_proposal.rs | 7 +-- apps/src/lib/node/ledger/storage/rocksdb.rs | 17 +++--- core/src/ledger/replay_protection.rs | 29 ++++++++--- core/src/ledger/storage/mod.rs | 2 +- core/src/ledger/storage/wl_storage.rs | 19 +++++++ core/src/ledger/storage/write_log.rs | 52 ++++++++++++++++--- shared/src/ledger/protocol/mod.rs | 11 ++-- 10 files changed, 139 insertions(+), 60 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..196895b5e1 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -217,7 +217,9 @@ where .update_header(TxType::Raw) .header_hash(); let tx_hash_key = - replay_protection::get_replay_protection_key(&tx_hash); + replay_protection::get_replay_protection_last_key( + &tx_hash, + ); self.wl_storage .delete(&tx_hash_key) .expect("Error while deleting tx hash from storage"); @@ -516,7 +518,7 @@ where msg { let tx_hash_key = - replay_protection::get_replay_protection_key( + replay_protection::get_replay_protection_last_key( &hash, ); self.wl_storage.delete(&tx_hash_key).expect( @@ -2269,15 +2271,17 @@ mod test_finalize_block { let (wrapper_tx, processed_tx) = mk_wrapper_tx(&shell, &crate::wallet::defaults::albert_keypair()); - let wrapper_hash_key = replay_protection::get_replay_protection_key( - &wrapper_tx.header_hash(), - ); + let wrapper_hash_key = + replay_protection::get_replay_protection_last_key( + &wrapper_tx.header_hash(), + ); let mut decrypted_tx = wrapper_tx; decrypted_tx.update_header(TxType::Raw); - let decrypted_hash_key = replay_protection::get_replay_protection_key( - &decrypted_tx.header_hash(), - ); + let decrypted_hash_key = + replay_protection::get_replay_protection_last_key( + &decrypted_tx.header_hash(), + ); // merkle tree root before finalize_block let root_pre = shell.shell.wl_storage.storage.block.tree.root(); @@ -2361,7 +2365,7 @@ mod test_finalize_block { decrypted_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); // Write inner hash in storage - let inner_hash_key = replay_protection::get_replay_protection_key( + let inner_hash_key = replay_protection::get_replay_protection_last_key( &wrapper_tx.clone().update_header(TxType::Raw).header_hash(), ); shell @@ -2435,10 +2439,11 @@ mod test_finalize_block { None, ))); - let wrapper_hash_key = replay_protection::get_replay_protection_key( - &wrapper.header_hash(), - ); - let inner_hash_key = replay_protection::get_replay_protection_key( + let wrapper_hash_key = + replay_protection::get_replay_protection_last_key( + &wrapper.header_hash(), + ); + let inner_hash_key = replay_protection::get_replay_protection_last_key( &wrapper.clone().update_header(TxType::Raw).header_hash(), ); diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index a1c17fe450..de72c1a81a 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -934,10 +934,8 @@ where ) -> Result<()> { let inner_tx_hash = wrapper.clone().update_header(TxType::Raw).header_hash(); - let inner_hash_key = - replay_protection::get_replay_protection_key(&inner_tx_hash); if temp_wl_storage - .has_key(&inner_hash_key) + .has_replay_protection_entry(&inner_tx_hash) .expect("Error while checking inner tx hash key in storage") { return Err(Error::ReplayAttempt(format!( @@ -947,6 +945,8 @@ where } // Write inner hash to tx WAL + let inner_hash_key = + replay_protection::get_replay_protection_last_key(&inner_tx_hash); temp_wl_storage .write_log .write(&inner_hash_key, vec![]) @@ -955,10 +955,8 @@ where let tx = Tx::try_from(tx_bytes).expect("Deserialization shouldn't fail"); let wrapper_hash = tx.header_hash(); - let wrapper_hash_key = - replay_protection::get_replay_protection_key(&wrapper_hash); if temp_wl_storage - .has_key(&wrapper_hash_key) + .has_replay_protection_entry(&wrapper_hash) .expect("Error while checking wrapper tx hash key in storage") { return Err(Error::ReplayAttempt(format!( @@ -968,6 +966,8 @@ where } // Write wrapper hash to tx WAL + let wrapper_hash_key = + replay_protection::get_replay_protection_last_key(&wrapper_hash); temp_wl_storage .write_log .write(&wrapper_hash_key, vec![]) @@ -1267,7 +1267,9 @@ where inner_tx.update_header(TxType::Raw); let inner_tx_hash = &inner_tx.header_hash(); let inner_hash_key = - replay_protection::get_replay_protection_key(inner_tx_hash); + replay_protection::get_replay_protection_last_key( + inner_tx_hash, + ); if self .wl_storage .storage @@ -1288,7 +1290,9 @@ where .expect("Deserialization shouldn't fail"); let wrapper_hash = hash::Hash(tx.header_hash().0); let wrapper_hash_key = - replay_protection::get_replay_protection_key(&wrapper_hash); + replay_protection::get_replay_protection_last_key( + &wrapper_hash, + ); if self .wl_storage .storage @@ -2466,7 +2470,7 @@ mod tests { // Write wrapper hash to storage let wrapper_hash = wrapper.header_hash(); let wrapper_hash_key = - replay_protection::get_replay_protection_key(&wrapper_hash); + replay_protection::get_replay_protection_last_key(&wrapper_hash); shell .wl_storage .storage @@ -2506,7 +2510,7 @@ mod tests { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash in storage let inner_hash_key = - replay_protection::get_replay_protection_key(&inner_tx_hash); + replay_protection::get_replay_protection_last_key(&inner_tx_hash); shell .wl_storage .storage diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 3687a6d39b..103c90b982 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -1188,7 +1188,7 @@ mod test_prepare_proposal { // Write wrapper hash to storage let wrapper_unsigned_hash = wrapper.header_hash(); - let hash_key = replay_protection::get_replay_protection_key( + let hash_key = replay_protection::get_replay_protection_last_key( &wrapper_unsigned_hash, ); shell @@ -1283,8 +1283,9 @@ mod test_prepare_proposal { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash to storage - let hash_key = - replay_protection::get_replay_protection_key(&inner_unsigned_hash); + let hash_key = replay_protection::get_replay_protection_last_key( + &inner_unsigned_hash, + ); shell .wl_storage .storage diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index ab544de3f8..fbbb03ab81 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -2132,7 +2132,7 @@ mod test_process_proposal { // Write wrapper hash to storage let wrapper_unsigned_hash = wrapper.header_hash(); - let hash_key = replay_protection::get_replay_protection_key( + let hash_key = replay_protection::get_replay_protection_last_key( &wrapper_unsigned_hash, ); shell @@ -2267,8 +2267,9 @@ mod test_process_proposal { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash to storage - let hash_key = - replay_protection::get_replay_protection_key(&inner_unsigned_hash); + let hash_key = replay_protection::get_replay_protection_last_key( + &inner_unsigned_hash, + ); shell .wl_storage .storage diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index 2af790c339..b0e1dda7ea 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -169,10 +169,9 @@ pub fn open( let mut replay_protection_cf_opts = Options::default(); replay_protection_cf_opts .set_compression_type(rocksdb::DBCompressionType::Zstd); - replay_protection_cf_opts.set_compression_options(0, 0, 0, 1024 * 1024); //FIXME :review these values + replay_protection_cf_opts.set_compression_options(0, 0, 0, 1024 * 1024); replay_protection_cf_opts.set_level_compaction_dynamic_level_bytes(true); // Prioritize minimizing read amplification - //FIXME: well in theory I never update keys, so probably I can never incour in read amplification (but probably not even in write aplification) replay_protection_cf_opts .set_compaction_style(rocksdb::DBCompactionStyle::Level); replay_protection_cf_opts.set_block_based_table_factory(&table_opts); @@ -326,7 +325,6 @@ impl RocksDB { self.dump_it(cf, Some(prefix.clone()), &mut file); // Block - //FIXME: shouldn't this be dumped even if not historic? let cf = self .get_column_family(BLOCK_CF) .expect("Block column family should exist"); @@ -376,10 +374,9 @@ impl RocksDB { } // replay protection - // Dump of replay protection keys is possible only at the last height or the previous one + // Dump of replay protection keys is possible only at the last height or + // the previous one if height == last_height { - //FIXME: review this (really need to dump replay prot? REally need the all prefix?) - let cf = self .get_column_family(REPLAY_PROTECTION_CF) .expect("Replay protection column family should exist"); @@ -490,7 +487,7 @@ impl RocksDB { // Delete the tx hashes included in the last block let reprot_cf = self.get_column_family(REPLAY_PROTECTION_CF)?; tracing::info!("Removing replay protection hashes"); - batch.delete_cf(reprot_cf, "last".to_string()); + batch.delete_cf(reprot_cf, "last"); // Execute next step in parallel let batch = Mutex::new(batch); @@ -1110,10 +1107,11 @@ impl DB for RocksDB { .map_err(Error::KeyError)? .push(&hash.to_string()) .map_err(Error::KeyError)?; - if let Some(_) = self + if self .0 - .get_cf(replay_protection_cf, key.to_string()) + .get_pinned_cf(replay_protection_cf, key.to_string()) .map_err(|e| Error::DBError(e.into_string()))? + .is_some() { return Ok(true); } @@ -1420,7 +1418,6 @@ impl DB for RocksDB { .map_err(Error::KeyError)? .push(&hash.to_string()) .map_err(Error::KeyError)?; - batch .0 .put_cf(replay_protection_cf, key.to_string(), vec![]); diff --git a/core/src/ledger/replay_protection.rs b/core/src/ledger/replay_protection.rs index 56537dfbaf..a9890e4587 100644 --- a/core/src/ledger/replay_protection.rs +++ b/core/src/ledger/replay_protection.rs @@ -1,21 +1,34 @@ //! Replay protection storage -use crate::types::address::{Address, InternalAddress}; use crate::types::hash::Hash; use crate::types::storage::{DbKeySeg, Key, KeySeg}; -/// Internal replay protection address -pub const ADDRESS: Address = - Address::Internal(InternalAddress::ReplayProtection); +/// Replay protection storage root +const STORAGE_ROOT: &str = "replay_protection"; +// FIXME: remove the replay protection VP /// Check if a key is a replay protection key pub fn is_replay_protection_key(key: &Key) -> bool { - matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) + matches!(&key.segments[0], DbKeySeg::StringSeg(root) if root == STORAGE_ROOT) } -/// Get the transaction hash key -pub fn get_replay_protection_key(hash: &Hash) -> Key { - Key::from(ADDRESS.to_db_key()) +/// Get the transaction hash key under the last subkey +pub fn get_replay_protection_last_key(hash: &Hash) -> Key { + Key::parse(STORAGE_ROOT) + .expect("Cannot obtain a valid db key") + .push(&"last".to_string()) + .expect("Cannot obtain a valid db key") + .push(&hash.to_string()) + .expect("Cannot obtain a valid db key") +} + +/// Get the transaction hash key under the all subkey +// FIXME: need this? If not remvoe and rename the previous one removeing "last" +pub fn get_replay_protection_all_key(hash: &Hash) -> Key { + Key::parse(STORAGE_ROOT) + .expect("Cannot obtain a valid db key") + .push(&"all".to_string()) + .expect("Cannot obtain a valid db key") .push(&hash.to_string()) .expect("Cannot obtain a valid db key") } diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 19f6fb3121..5f0a3396ba 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -587,7 +587,7 @@ where /// Check if the given key is present in storage. Returns the result and the /// gas cost. pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { - //FIXME: remove all the ifs for replya protection keys + // FIXME: remove all the ifs for replya protection keys if is_replay_protection_key(key) { // Replay protection keys are not included in the merkle // tree diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index 87107a35c9..011336a9db 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -9,6 +9,7 @@ use crate::ledger::storage::{DBIter, Storage, StorageHasher, DB}; use crate::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; use crate::ledger::{gas, parameters, storage_api}; use crate::types::address::Address; +use crate::types::hash::Hash; use crate::types::storage::{self, BlockHeight}; use crate::types::time::DateTimeUtc; @@ -55,6 +56,24 @@ where storage, } } + + /// Check if the given tx hash is present + pub fn has_replay_protection_entry( + &self, + hash: &Hash, + ) -> Result { + let key = + crate::ledger::replay_protection::get_replay_protection_last_key( + hash, + ); + if let Some(write_log::StorageModification::Write { .. }) = + self.write_log.read(&key).0 + { + return Ok(true); + } + + self.storage.has_replay_protection_entry(hash) + } } /// Common trait for [`WlStorage`] and [`TempWlStorage`], used to implement diff --git a/core/src/ledger/storage/write_log.rs b/core/src/ledger/storage/write_log.rs index 84edb8f56d..e209402c64 100644 --- a/core/src/ledger/storage/write_log.rs +++ b/core/src/ledger/storage/write_log.rs @@ -10,12 +10,14 @@ use crate::ledger; use crate::ledger::gas::{ STORAGE_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE, }; +use crate::ledger::replay_protection::is_replay_protection_key; use crate::ledger::storage::traits::StorageHasher; use crate::ledger::storage::Storage; use crate::types::address::{Address, EstablishedAddressGen, InternalAddress}; use crate::types::hash::Hash; use crate::types::ibc::IbcEvent; use crate::types::storage; +use crate::types::storage::KeySeg; use crate::types::token::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, }; @@ -490,17 +492,54 @@ impl WriteLog { + for<'iter> ledger::storage::DBIter<'iter>, H: StorageHasher, { + // FIXME: maybe better to use two new fields in the write log for replay + // protection keys? + let _iter = self.block_write_log.iter(); for (key, entry) in self.block_write_log.iter() { match entry { StorageModification::Write { value } => { - storage - .batch_write_subspace_val(batch, key, value.clone()) - .map_err(Error::StorageError)?; + if is_replay_protection_key(key) { + let hash = key + .last() + .ok_or(Error::StorageError( + ledger::storage::Error::KeyError( + crate::types::storage::Error::EmptyKey, + ), + ))? + .raw() + .parse() + .map_err(|_e| Error::StorageError(ledger::storage::Error::KeyError(crate::types::storage::Error::InvalidKeySeg("Expected valid hash".to_string()))))?; + + storage + .write_replay_protection_entry(batch, &hash) + .map_err(Error::StorageError)?; + } else { + storage + .batch_write_subspace_val(batch, key, value.clone()) + .map_err(Error::StorageError)?; + } } StorageModification::Delete => { - storage - .batch_delete_subspace_val(batch, key) - .map_err(Error::StorageError)?; + if is_replay_protection_key(key) { + let hash = key + .last() + .ok_or(Error::StorageError( + ledger::storage::Error::KeyError( + crate::types::storage::Error::EmptyKey, + ), + ))? + .raw() + .parse() + .map_err(|_e| Error::StorageError(ledger::storage::Error::KeyError(crate::types::storage::Error::InvalidKeySeg("Expected valid hash".to_string()))))?; + + storage + .delete_replay_protection_entry(batch, &hash) + .map_err(Error::StorageError)?; + } else { + storage + .batch_delete_subspace_val(batch, key) + .map_err(Error::StorageError)?; + } } StorageModification::InitAccount { vp_code_hash } => { storage @@ -511,6 +550,7 @@ impl WriteLog { StorageModification::Temp { .. } => {} } } + if let Some(address_gen) = self.address_gen.take() { storage.address_gen = address_gen } diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index a23b026eea..56b777611b 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -33,10 +33,10 @@ use crate::ledger::storage::{DBIter, Storage, StorageHasher, WlStorage, DB}; use crate::ledger::{replay_protection, storage_api}; use crate::proto::{self, Tx}; use crate::types::address::{Address, InternalAddress}; +use crate::types::storage; use crate::types::storage::TxIndex; use crate::types::transaction::protocol::{EthereumTxData, ProtocolTxType}; use crate::types::transaction::{DecryptedTx, TxResult, TxType, VpsResult}; -use crate::types::{hash, storage}; use crate::vm::wasm::{TxCache, VpCache}; use crate::vm::{self, wasm, WasmCacheAccess}; @@ -235,9 +235,8 @@ where // Writes wrapper tx hash to block write log (changes must be persisted even // in case of failure) - let wrapper_hash_key = replay_protection::get_replay_protection_key( - &hash::Hash(tx.header_hash().0), - ); + let wrapper_hash_key = + replay_protection::get_replay_protection_last_key(&tx.header_hash()); shell_params .wl_storage .write(&wrapper_hash_key, ()) @@ -257,8 +256,8 @@ where shell_params.tx_gas_meter.add_tx_size_gas(tx_bytes)?; // If wrapper was succesful, write inner tx hash to storage - let inner_hash_key = replay_protection::get_replay_protection_key( - &hash::Hash(tx.update_header(TxType::Raw).header_hash().0), + let inner_hash_key = replay_protection::get_replay_protection_last_key( + &tx.update_header(TxType::Raw).header_hash(), ); shell_params .wl_storage From 935e55f6f3f83eea7733e7667967985ae363c138 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 5 Oct 2023 18:38:09 +0200 Subject: [PATCH 109/161] Removes hacky solution for replay protection merkle tree --- core/src/ledger/storage/mod.rs | 113 +++++++++++++-------------------- 1 file changed, 43 insertions(+), 70 deletions(-) diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 5f0a3396ba..2adc4caaab 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -27,7 +27,6 @@ pub use wl_storage::{ #[cfg(feature = "wasm-runtime")] pub use self::masp_conversions::update_allowed_conversions; pub use self::masp_conversions::{encode_asset_type, ConversionState}; -use super::replay_protection::is_replay_protection_key; use crate::ledger::eth_bridge::storage::bridge_pool::is_pending_transfer_key; use crate::ledger::gas::{ STORAGE_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE, @@ -587,20 +586,10 @@ where /// Check if the given key is present in storage. Returns the result and the /// gas cost. pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { - // FIXME: remove all the ifs for replya protection keys - if is_replay_protection_key(key) { - // Replay protection keys are not included in the merkle - // tree - Ok(( - self.db.read_subspace_val(key)?.is_some(), - key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, - )) - } else { - Ok(( - self.block.tree.has_key(key)?, - key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, - )) - } + Ok(( + self.block.tree.has_key(key)?, + key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, + )) } /// Returns a value from the specified subspace and the gas cost @@ -685,8 +674,8 @@ where let height = self.block.height.try_to_vec().expect("Encoding failed"); self.block.tree.update(key, height)?; - } else if !is_replay_protection_key(key) { - // Update the merkle tree for all but replay-protection entries + } else { + // Update the merkle tree self.block.tree.update(key, value)?; } @@ -704,9 +693,7 @@ where // but with gas and storage bytes len diff accounting let mut deleted_bytes_len = 0; if self.has_key(key)?.0 { - if !is_replay_protection_key(key) { - self.block.tree.delete(key)?; - } + self.block.tree.delete(key)?; deleted_bytes_len = self.db.delete_subspace_val(self.block.height, key)?; } @@ -815,44 +802,36 @@ where match old.0.cmp(&new.0) { Ordering::Equal => { // the value was updated - if !is_replay_protection_key(&new_key) { - tree.update( - &new_key, - if is_pending_transfer_key(&new_key) { - target_height.try_to_vec().expect( - "Serialization should never \ - fail", - ) - } else { - new.1.clone() - }, - )? - }; + tree.update( + &new_key, + if is_pending_transfer_key(&new_key) { + target_height.try_to_vec().expect( + "Serialization should never fail", + ) + } else { + new.1.clone() + }, + )?; old_diff = old_diff_iter.next(); new_diff = new_diff_iter.next(); } Ordering::Less => { // the value was deleted - if !is_replay_protection_key(&old_key) { - tree.delete(&old_key)?; - } + tree.delete(&old_key)?; old_diff = old_diff_iter.next(); } Ordering::Greater => { // the value was inserted - if !is_replay_protection_key(&new_key) { - tree.update( - &new_key, - if is_pending_transfer_key(&new_key) { - target_height.try_to_vec().expect( - "Serialization should never \ - fail", - ) - } else { - new.1.clone() - }, - )?; - } + tree.update( + &new_key, + if is_pending_transfer_key(&new_key) { + target_height.try_to_vec().expect( + "Serialization should never fail", + ) + } else { + new.1.clone() + }, + )?; new_diff = new_diff_iter.next(); } } @@ -861,9 +840,7 @@ where // the value was deleted let key = Key::parse(old.0.clone()) .expect("the key should be parsable"); - if !is_replay_protection_key(&key) { - tree.delete(&key)?; - } + tree.delete(&key)?; old_diff = old_diff_iter.next(); } (None, Some(new)) => { @@ -871,18 +848,16 @@ where let key = Key::parse(new.0.clone()) .expect("the key should be parsable"); - if !is_replay_protection_key(&key) { - tree.update( - &key, - if is_pending_transfer_key(&key) { - target_height.try_to_vec().expect( - "Serialization should never fail", - ) - } else { - new.1.clone() - }, - )? - }; + tree.update( + &key, + if is_pending_transfer_key(&key) { + target_height + .try_to_vec() + .expect("Serialization should never fail") + } else { + new.1.clone() + }, + )?; new_diff = new_diff_iter.next(); } (None, None) => break, @@ -1076,8 +1051,8 @@ where let height = self.block.height.try_to_vec().expect("Encoding failed"); self.block.tree.update(key, height)?; - } else if !is_replay_protection_key(key) { - // Update the merkle tree for all but replay-protection entries + } else { + // Update the merkle tree self.block.tree.update(key, value)?; } self.db @@ -1092,10 +1067,8 @@ where batch: &mut D::WriteBatch, key: &Key, ) -> Result { - if !is_replay_protection_key(key) { - // Update the merkle tree for all but replay-protection entries - self.block.tree.delete(key)?; - } + // Update the merkle tree + self.block.tree.delete(key)?; self.db .batch_delete_subspace_val(batch, self.block.height, key) } From 61724b4deb2a308c561f7e654da68a8b6416fe64 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 5 Oct 2023 18:45:38 +0200 Subject: [PATCH 110/161] Renames replay protection storage key getter --- .../lib/node/ledger/shell/finalize_block.rs | 31 ++++++++----------- apps/src/lib/node/ledger/shell/mod.rs | 16 ++++------ .../lib/node/ledger/shell/prepare_proposal.rs | 7 ++--- .../lib/node/ledger/shell/process_proposal.rs | 7 ++--- core/src/ledger/replay_protection.rs | 13 +------- core/src/ledger/storage/wl_storage.rs | 4 +-- shared/src/ledger/protocol/mod.rs | 4 +-- 7 files changed, 29 insertions(+), 53 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 196895b5e1..53252065f1 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -217,9 +217,7 @@ where .update_header(TxType::Raw) .header_hash(); let tx_hash_key = - replay_protection::get_replay_protection_last_key( - &tx_hash, - ); + replay_protection::get_replay_protection_key(&tx_hash); self.wl_storage .delete(&tx_hash_key) .expect("Error while deleting tx hash from storage"); @@ -518,7 +516,7 @@ where msg { let tx_hash_key = - replay_protection::get_replay_protection_last_key( + replay_protection::get_replay_protection_key( &hash, ); self.wl_storage.delete(&tx_hash_key).expect( @@ -2271,17 +2269,15 @@ mod test_finalize_block { let (wrapper_tx, processed_tx) = mk_wrapper_tx(&shell, &crate::wallet::defaults::albert_keypair()); - let wrapper_hash_key = - replay_protection::get_replay_protection_last_key( - &wrapper_tx.header_hash(), - ); + let wrapper_hash_key = replay_protection::get_replay_protection_key( + &wrapper_tx.header_hash(), + ); let mut decrypted_tx = wrapper_tx; decrypted_tx.update_header(TxType::Raw); - let decrypted_hash_key = - replay_protection::get_replay_protection_last_key( - &decrypted_tx.header_hash(), - ); + let decrypted_hash_key = replay_protection::get_replay_protection_key( + &decrypted_tx.header_hash(), + ); // merkle tree root before finalize_block let root_pre = shell.shell.wl_storage.storage.block.tree.root(); @@ -2365,7 +2361,7 @@ mod test_finalize_block { decrypted_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); // Write inner hash in storage - let inner_hash_key = replay_protection::get_replay_protection_last_key( + let inner_hash_key = replay_protection::get_replay_protection_key( &wrapper_tx.clone().update_header(TxType::Raw).header_hash(), ); shell @@ -2439,11 +2435,10 @@ mod test_finalize_block { None, ))); - let wrapper_hash_key = - replay_protection::get_replay_protection_last_key( - &wrapper.header_hash(), - ); - let inner_hash_key = replay_protection::get_replay_protection_last_key( + let wrapper_hash_key = replay_protection::get_replay_protection_key( + &wrapper.header_hash(), + ); + let inner_hash_key = replay_protection::get_replay_protection_key( &wrapper.clone().update_header(TxType::Raw).header_hash(), ); diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index de72c1a81a..6b59e96cec 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -946,7 +946,7 @@ where // Write inner hash to tx WAL let inner_hash_key = - replay_protection::get_replay_protection_last_key(&inner_tx_hash); + replay_protection::get_replay_protection_key(&inner_tx_hash); temp_wl_storage .write_log .write(&inner_hash_key, vec![]) @@ -967,7 +967,7 @@ where // Write wrapper hash to tx WAL let wrapper_hash_key = - replay_protection::get_replay_protection_last_key(&wrapper_hash); + replay_protection::get_replay_protection_key(&wrapper_hash); temp_wl_storage .write_log .write(&wrapper_hash_key, vec![]) @@ -1267,9 +1267,7 @@ where inner_tx.update_header(TxType::Raw); let inner_tx_hash = &inner_tx.header_hash(); let inner_hash_key = - replay_protection::get_replay_protection_last_key( - inner_tx_hash, - ); + replay_protection::get_replay_protection_key(inner_tx_hash); if self .wl_storage .storage @@ -1290,9 +1288,7 @@ where .expect("Deserialization shouldn't fail"); let wrapper_hash = hash::Hash(tx.header_hash().0); let wrapper_hash_key = - replay_protection::get_replay_protection_last_key( - &wrapper_hash, - ); + replay_protection::get_replay_protection_key(&wrapper_hash); if self .wl_storage .storage @@ -2470,7 +2466,7 @@ mod tests { // Write wrapper hash to storage let wrapper_hash = wrapper.header_hash(); let wrapper_hash_key = - replay_protection::get_replay_protection_last_key(&wrapper_hash); + replay_protection::get_replay_protection_key(&wrapper_hash); shell .wl_storage .storage @@ -2510,7 +2506,7 @@ mod tests { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash in storage let inner_hash_key = - replay_protection::get_replay_protection_last_key(&inner_tx_hash); + replay_protection::get_replay_protection_key(&inner_tx_hash); shell .wl_storage .storage diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 103c90b982..3687a6d39b 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -1188,7 +1188,7 @@ mod test_prepare_proposal { // Write wrapper hash to storage let wrapper_unsigned_hash = wrapper.header_hash(); - let hash_key = replay_protection::get_replay_protection_last_key( + let hash_key = replay_protection::get_replay_protection_key( &wrapper_unsigned_hash, ); shell @@ -1283,9 +1283,8 @@ mod test_prepare_proposal { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash to storage - let hash_key = replay_protection::get_replay_protection_last_key( - &inner_unsigned_hash, - ); + let hash_key = + replay_protection::get_replay_protection_key(&inner_unsigned_hash); shell .wl_storage .storage diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index fbbb03ab81..ab544de3f8 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -2132,7 +2132,7 @@ mod test_process_proposal { // Write wrapper hash to storage let wrapper_unsigned_hash = wrapper.header_hash(); - let hash_key = replay_protection::get_replay_protection_last_key( + let hash_key = replay_protection::get_replay_protection_key( &wrapper_unsigned_hash, ); shell @@ -2267,9 +2267,8 @@ mod test_process_proposal { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash to storage - let hash_key = replay_protection::get_replay_protection_last_key( - &inner_unsigned_hash, - ); + let hash_key = + replay_protection::get_replay_protection_key(&inner_unsigned_hash); shell .wl_storage .storage diff --git a/core/src/ledger/replay_protection.rs b/core/src/ledger/replay_protection.rs index a9890e4587..c44d76a2ba 100644 --- a/core/src/ledger/replay_protection.rs +++ b/core/src/ledger/replay_protection.rs @@ -13,7 +13,7 @@ pub fn is_replay_protection_key(key: &Key) -> bool { } /// Get the transaction hash key under the last subkey -pub fn get_replay_protection_last_key(hash: &Hash) -> Key { +pub fn get_replay_protection_key(hash: &Hash) -> Key { Key::parse(STORAGE_ROOT) .expect("Cannot obtain a valid db key") .push(&"last".to_string()) @@ -21,14 +21,3 @@ pub fn get_replay_protection_last_key(hash: &Hash) -> Key { .push(&hash.to_string()) .expect("Cannot obtain a valid db key") } - -/// Get the transaction hash key under the all subkey -// FIXME: need this? If not remvoe and rename the previous one removeing "last" -pub fn get_replay_protection_all_key(hash: &Hash) -> Key { - Key::parse(STORAGE_ROOT) - .expect("Cannot obtain a valid db key") - .push(&"all".to_string()) - .expect("Cannot obtain a valid db key") - .push(&hash.to_string()) - .expect("Cannot obtain a valid db key") -} diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index 011336a9db..71914d519c 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -63,9 +63,7 @@ where hash: &Hash, ) -> Result { let key = - crate::ledger::replay_protection::get_replay_protection_last_key( - hash, - ); + crate::ledger::replay_protection::get_replay_protection_key(hash); if let Some(write_log::StorageModification::Write { .. }) = self.write_log.read(&key).0 { diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index 56b777611b..c2b354e944 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -236,7 +236,7 @@ where // Writes wrapper tx hash to block write log (changes must be persisted even // in case of failure) let wrapper_hash_key = - replay_protection::get_replay_protection_last_key(&tx.header_hash()); + replay_protection::get_replay_protection_key(&tx.header_hash()); shell_params .wl_storage .write(&wrapper_hash_key, ()) @@ -256,7 +256,7 @@ where shell_params.tx_gas_meter.add_tx_size_gas(tx_bytes)?; // If wrapper was succesful, write inner tx hash to storage - let inner_hash_key = replay_protection::get_replay_protection_last_key( + let inner_hash_key = replay_protection::get_replay_protection_key( &tx.update_header(TxType::Raw).header_hash(), ); shell_params From 7caac7ff5ead1163e96d95711c8cd759b9a8be80 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 5 Oct 2023 18:55:07 +0200 Subject: [PATCH 111/161] Removes replay protecion internal address and vp --- benches/native_vps.rs | 44 --------------- core/src/ledger/replay_protection.rs | 1 - core/src/types/address.rs | 13 ----- shared/src/ledger/native_vp/mod.rs | 1 - .../src/ledger/native_vp/replay_protection.rs | 53 ------------------- shared/src/ledger/protocol/mod.rs | 15 ------ 6 files changed, 127 deletions(-) delete mode 100644 shared/src/ledger/native_vp/replay_protection.rs diff --git a/benches/native_vps.rs b/benches/native_vps.rs index 77373080c4..763c5f8dc7 100644 --- a/benches/native_vps.rs +++ b/benches/native_vps.rs @@ -23,7 +23,6 @@ use namada::ledger::gas::{TxGasMeter, VpGasMeter}; use namada::ledger::governance::GovernanceVp; use namada::ledger::native_vp::ibc::Ibc; use namada::ledger::native_vp::multitoken::MultitokenVp; -use namada::ledger::native_vp::replay_protection::ReplayProtectionVp; use namada::ledger::native_vp::{Ctx, NativeVp}; use namada::ledger::storage_api::StorageRead; use namada::proto::{Code, Section}; @@ -39,48 +38,6 @@ use namada_benches::{ TX_TRANSFER_WASM, TX_VOTE_PROPOSAL_WASM, }; -fn replay_protection(c: &mut Criterion) { - // Write a random key under the replay protection subspace - let tx = generate_foreign_key_tx(&defaults::albert_keypair()); - let mut shell = BenchShell::default(); - - shell.execute_tx(&tx); - let (verifiers, keys_changed) = shell - .wl_storage - .write_log - .verifiers_and_changed_keys(&BTreeSet::default()); - - let replay_protection = ReplayProtectionVp { - ctx: Ctx::new( - &Address::Internal(InternalAddress::ReplayProtection), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, - &tx, - &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), - &keys_changed, - &verifiers, - shell.vp_wasm_cache.clone(), - ), - }; - - c.bench_function("vp_replay_protection", |b| { - b.iter(|| { - // NOTE: thiv VP will always fail when triggered so don't assert - // here - replay_protection - .validate_tx( - &tx, - replay_protection.ctx.keys_changed, - replay_protection.ctx.verifiers, - ) - .unwrap() - }) - }); -} - fn governance(c: &mut Criterion) { let mut group = c.benchmark_group("vp_governance"); @@ -476,7 +433,6 @@ fn vp_multitoken(c: &mut Criterion) { criterion_group!( native_vps, - replay_protection, governance, // slash_fund, ibc, diff --git a/core/src/ledger/replay_protection.rs b/core/src/ledger/replay_protection.rs index c44d76a2ba..11f51ddda6 100644 --- a/core/src/ledger/replay_protection.rs +++ b/core/src/ledger/replay_protection.rs @@ -6,7 +6,6 @@ use crate::types::storage::{DbKeySeg, Key, KeySeg}; /// Replay protection storage root const STORAGE_ROOT: &str = "replay_protection"; -// FIXME: remove the replay protection VP /// Check if a key is a replay protection key pub fn is_replay_protection_key(key: &Key) -> bool { matches!(&key.segments[0], DbKeySeg::StringSeg(root) if root == STORAGE_ROOT) diff --git a/core/src/types/address.rs b/core/src/types/address.rs index 416b3f059e..7fe431cb86 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -82,8 +82,6 @@ mod internal { "ano::ETH Bridge Address "; pub const ETH_BRIDGE_POOL: &str = "ano::ETH Bridge Pool Address "; - pub const REPLAY_PROTECTION: &str = - "ano::Replay Protection "; pub const MULTITOKEN: &str = "ano::Multitoken "; pub const PGF: &str = @@ -243,9 +241,6 @@ impl Address { eth_addr.to_canonical().replace("0x", ""); format!("{PREFIX_NUT}::{eth_addr}") } - InternalAddress::ReplayProtection => { - internal::REPLAY_PROTECTION.to_string() - } InternalAddress::Multitoken => { internal::MULTITOKEN.to_string() } @@ -329,9 +324,6 @@ impl Address { internal::ETH_BRIDGE_POOL => { Ok(Address::Internal(InternalAddress::EthBridgePool)) } - internal::REPLAY_PROTECTION => { - Ok(Address::Internal(InternalAddress::ReplayProtection)) - } internal::MULTITOKEN => { Ok(Address::Internal(InternalAddress::Multitoken)) } @@ -572,8 +564,6 @@ pub enum InternalAddress { Erc20(EthAddress), /// Non-usable ERC20 tokens Nut(EthAddress), - /// Replay protection contains transactions' hash - ReplayProtection, /// Multitoken Multitoken, /// Pgf @@ -596,7 +586,6 @@ impl Display for InternalAddress { Self::EthBridgePool => "EthBridgePool".to_string(), Self::Erc20(eth_addr) => format!("Erc20: {}", eth_addr), Self::Nut(eth_addr) => format!("Non-usable token: {eth_addr}"), - Self::ReplayProtection => "ReplayProtection".to_string(), Self::Multitoken => "Multitoken".to_string(), Self::Pgf => "PublicGoodFundings".to_string(), } @@ -892,7 +881,6 @@ pub mod testing { InternalAddress::EthBridgePool => {} InternalAddress::Erc20(_) => {} InternalAddress::Nut(_) => {} - InternalAddress::ReplayProtection => {} InternalAddress::Pgf => {} InternalAddress::Multitoken => {} /* Add new addresses in the * `prop_oneof` below. */ @@ -908,7 +896,6 @@ pub mod testing { Just(InternalAddress::EthBridgePool), Just(arb_erc20()), Just(arb_nut()), - Just(InternalAddress::ReplayProtection), Just(InternalAddress::Multitoken), Just(InternalAddress::Pgf), ] diff --git a/shared/src/ledger/native_vp/mod.rs b/shared/src/ledger/native_vp/mod.rs index 31148a1568..1635b4559b 100644 --- a/shared/src/ledger/native_vp/mod.rs +++ b/shared/src/ledger/native_vp/mod.rs @@ -5,7 +5,6 @@ pub mod ethereum_bridge; pub mod ibc; pub mod multitoken; pub mod parameters; -pub mod replay_protection; use std::cell::RefCell; use std::collections::BTreeSet; diff --git a/shared/src/ledger/native_vp/replay_protection.rs b/shared/src/ledger/native_vp/replay_protection.rs deleted file mode 100644 index a2a2a66f36..0000000000 --- a/shared/src/ledger/native_vp/replay_protection.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Native VP for replay protection - -use std::collections::BTreeSet; - -use namada_core::ledger::storage; -use namada_core::types::address::Address; -use namada_core::types::storage::Key; -use thiserror::Error; - -use crate::ledger::native_vp::{self, Ctx, NativeVp}; -use crate::proto::Tx; -use crate::vm::WasmCacheAccess; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Native VP error: {0}")] - NativeVpError(#[from] native_vp::Error), -} - -/// ReplayProtection functions result -pub type Result = std::result::Result; - -/// Replay Protection VP -pub struct ReplayProtectionVp<'a, DB, H, CA> -where - DB: storage::DB + for<'iter> storage::DBIter<'iter>, - H: storage::StorageHasher, - CA: WasmCacheAccess, -{ - /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, -} - -impl<'a, DB, H, CA> NativeVp for ReplayProtectionVp<'a, DB, H, CA> -where - DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, - H: 'static + storage::StorageHasher, - CA: 'static + WasmCacheAccess, -{ - type Error = Error; - - fn validate_tx( - &self, - _tx_data: &Tx, - _keys_changed: &BTreeSet, - _verifiers: &BTreeSet
, - ) -> Result { - // VP should prevent any modification of the subspace. - // Changes are only allowed from protocol - Ok(false) - } -} diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index c2b354e944..aa8b056edf 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -24,7 +24,6 @@ use crate::ledger::native_vp::ethereum_bridge::vp::EthBridge; use crate::ledger::native_vp::ibc::Ibc; use crate::ledger::native_vp::multitoken::MultitokenVp; use crate::ledger::native_vp::parameters::{self, ParametersVp}; -use crate::ledger::native_vp::replay_protection::ReplayProtectionVp; use crate::ledger::native_vp::{self, NativeVp}; use crate::ledger::pgf::PgfVp; use crate::ledger::pos::{self, PosVP}; @@ -83,10 +82,6 @@ pub enum Error { EthBridgeNativeVpError(native_vp::ethereum_bridge::vp::Error), #[error("Ethereum bridge pool native VP error: {0}")] BridgePoolNativeVpError(native_vp::ethereum_bridge::bridge_pool_vp::Error), - #[error("Replay protection native VP error: {0}")] - ReplayProtectionNativeVpError( - crate::ledger::native_vp::replay_protection::Error, - ), #[error("Non usable tokens native VP error: {0}")] NutNativeVpError(native_vp::ethereum_bridge::nut::Error), #[error("Access to an internal address {0} is forbidden")] @@ -933,16 +928,6 @@ where gas_meter = bridge_pool.ctx.gas_meter.into_inner(); result } - InternalAddress::ReplayProtection => { - let replay_protection_vp = - ReplayProtectionVp { ctx }; - let result = replay_protection_vp - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::ReplayProtectionNativeVpError); - gas_meter = - replay_protection_vp.ctx.gas_meter.into_inner(); - result - } InternalAddress::Pgf => { let pgf_vp = PgfVp { ctx }; let result = pgf_vp From 60f0014da32d030a9a0ceaf004ddf515f5cf22d8 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 6 Oct 2023 19:28:46 +0200 Subject: [PATCH 112/161] Updates `DB` and `DBIter` traits for replay protection --- apps/src/lib/node/ledger/storage/rocksdb.rs | 25 +++++++-------- core/src/ledger/storage/mockdb.rs | 35 +++++++++++---------- core/src/ledger/storage/mod.rs | 28 +++++++++++++---- 3 files changed, 52 insertions(+), 36 deletions(-) diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index b0e1dda7ea..ee9a1c658b 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -1409,15 +1409,11 @@ impl DB for RocksDB { fn write_replay_protection_entry( &mut self, batch: &mut Self::WriteBatch, - hash: &namada::types::hash::Hash, + key: &Key, ) -> Result<()> { let replay_protection_cf = self.get_column_family(REPLAY_PROTECTION_CF)?; - let key = Key::parse("last") - .map_err(Error::KeyError)? - .push(&hash.to_string()) - .map_err(Error::KeyError)?; batch .0 .put_cf(replay_protection_cf, key.to_string(), vec![]); @@ -1428,19 +1424,12 @@ impl DB for RocksDB { fn delete_replay_protection_entry( &mut self, batch: &mut Self::WriteBatch, - hash: &namada::types::hash::Hash, + key: &Key, ) -> Result<()> { let replay_protection_cf = self.get_column_family(REPLAY_PROTECTION_CF)?; - for prefix in ["last", "all"] { - let key = Key::parse(prefix) - .map_err(Error::KeyError)? - .push(&hash.to_string()) - .map_err(Error::KeyError)?; - - batch.0.delete_cf(replay_protection_cf, key.to_string()) - } + batch.0.delete_cf(replay_protection_cf, key.to_string()); Ok(()) } @@ -1485,6 +1474,14 @@ impl<'iter> DBIter<'iter> for RocksDB { ) -> PersistentPrefixIterator<'iter> { iter_diffs_prefix(self, height, false) } + + fn iter_replay_protection(&'iter self) -> Self::PrefixIter { + let replay_protection_cf = self + .get_column_family(REPLAY_PROTECTION_CF) + .expect("{REPLAY_PROTECTION_CF} column family should exist"); + + iter_prefix(self, replay_protection_cf, "last".to_string(), None) + } } fn iter_subspace_prefix<'iter>( diff --git a/core/src/ledger/storage/mockdb.rs b/core/src/ledger/storage/mockdb.rs index 52ddc51f14..dba7257236 100644 --- a/core/src/ledger/storage/mockdb.rs +++ b/core/src/ledger/storage/mockdb.rs @@ -414,7 +414,6 @@ impl DB for MockDB { Ok(Some((height, merkle_tree_stores))) } - //FIXME: I should dump the content of the last subey into all at the beginning of finalize block before writing any new hash into last. Do I need another function in this trait for that? fn has_replay_protection_entry(&self, hash: &Hash) -> Result { let prefix_key = Key::parse("replay_protection").map_err(Error::KeyError)?; @@ -564,14 +563,11 @@ impl DB for MockDB { fn write_replay_protection_entry( &mut self, _batch: &mut Self::WriteBatch, - hash: &Hash, + key: &Key, ) -> Result<()> { let key = Key::parse("replay_protection") .map_err(Error::KeyError)? - .push(&"last".to_string()) - .map_err(Error::KeyError)? - .push(&hash.to_string()) - .map_err(Error::KeyError)?; + .join(key); match self.0.borrow_mut().insert(key.to_string(), vec![]) { Some(_) => Err(Error::DBError(format!( @@ -584,18 +580,13 @@ impl DB for MockDB { fn delete_replay_protection_entry( &mut self, _batch: &mut Self::WriteBatch, - hash: &Hash, + key: &Key, ) -> Result<()> { - let key = Key::parse("replay_protection").map_err(Error::KeyError)?; - for prefix in ["last", "all"] { - let key = key - .push(&prefix.to_string()) - .map_err(Error::KeyError)? - .push(&hash.to_string()) - .map_err(Error::KeyError)?; + let key = Key::parse("replay_protection") + .map_err(Error::KeyError)? + .join(key); - self.0.borrow_mut().remove(&key.to_string()); - } + self.0.borrow_mut().remove(&key.to_string()); Ok(()) } @@ -640,6 +631,18 @@ impl<'iter> DBIter<'iter> for MockDB { // Mock DB can read only the latest value for now unimplemented!() } + + fn iter_replay_protection(&'iter self) -> Self::PrefixIter { + let db_prefix = "replay_protection/".to_owned(); + let iter = self.0.borrow().clone().into_iter(); + MockPrefixIterator::new( + MockIterator { + prefix: "last".to_string(), + iter, + }, + db_prefix, + ) + } } /// A prefix iterator base for the [`MockPrefixIterator`]. diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 2adc4caaab..f6c607cb35 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -360,14 +360,14 @@ pub trait DB: std::fmt::Debug { fn write_replay_protection_entry( &mut self, batch: &mut Self::WriteBatch, - hash: &Hash, + key: &Key, ) -> Result<()>; /// Delete a replay protection entry fn delete_replay_protection_entry( &mut self, batch: &mut Self::WriteBatch, - hash: &Hash, + key: &Key, ) -> Result<()>; } @@ -392,6 +392,9 @@ pub trait DBIter<'iter> { /// Read subspace new diffs at a given height fn iter_new_diffs(&'iter self, height: BlockHeight) -> Self::PrefixIter; + + /// Read replay protection storage from the last block + fn iter_replay_protection(&'iter self) -> Self::PrefixIter; } /// Atomic batch write. @@ -1122,18 +1125,31 @@ where pub fn write_replay_protection_entry( &mut self, batch: &mut D::WriteBatch, - hash: &Hash, + key: &Key, ) -> Result<()> { - self.db.write_replay_protection_entry(batch, hash) + self.db.write_replay_protection_entry(batch, key) } /// Delete the provided tx hash from storage pub fn delete_replay_protection_entry( &mut self, batch: &mut D::WriteBatch, - hash: &Hash, + key: &Key, ) -> Result<()> { - self.db.delete_replay_protection_entry(batch, hash) + self.db.delete_replay_protection_entry(batch, key) + } + + /// Iterate the replay protection storage from the last block + pub fn iter_replay_protection( + &self, + ) -> Box + '_> { + Box::new(self.db.iter_replay_protection().map(|(key, _, _)| { + key.rsplit_once('/') + .expect("Missing tx hash in storage key") + .1 + .parse() + .expect("Failed hash conversion") + })) } } From 77dfccb969a5bd628b35801c545fbf89d548606b Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 6 Oct 2023 19:29:19 +0200 Subject: [PATCH 113/161] Refactors replay protection helper functions --- core/src/ledger/replay_protection.rs | 34 ++++++++++++++++++---------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/core/src/ledger/replay_protection.rs b/core/src/ledger/replay_protection.rs index 11f51ddda6..71332d295c 100644 --- a/core/src/ledger/replay_protection.rs +++ b/core/src/ledger/replay_protection.rs @@ -1,22 +1,32 @@ //! Replay protection storage use crate::types::hash::Hash; -use crate::types::storage::{DbKeySeg, Key, KeySeg}; +use crate::types::storage::Key; -/// Replay protection storage root -const STORAGE_ROOT: &str = "replay_protection"; +const ERROR_MSG: &str = "Cannot obtain a valid db key"; -/// Check if a key is a replay protection key -pub fn is_replay_protection_key(key: &Key) -> bool { - matches!(&key.segments[0], DbKeySeg::StringSeg(root) if root == STORAGE_ROOT) +/// Get the transaction hash key under the `last` subkey +pub fn get_replay_protection_last_subkey(hash: &Hash) -> Key { + Key::parse("last") + .expect(ERROR_MSG) + .push(&hash.to_string()) + .expect(ERROR_MSG) +} + +/// Get the transaction hash key under the `all` subkey +pub fn get_replay_protection_all_subkey(hash: &Hash) -> Key { + Key::parse("all") + .expect(ERROR_MSG) + .push(&hash.to_string()) + .expect(ERROR_MSG) } -/// Get the transaction hash key under the last subkey -pub fn get_replay_protection_key(hash: &Hash) -> Key { - Key::parse(STORAGE_ROOT) - .expect("Cannot obtain a valid db key") +/// Get the full transaction hash key under the `last` subkey +pub fn get_replay_protection_last_key(hash: &Hash) -> Key { + Key::parse("replay_protection") + .expect(ERROR_MSG) .push(&"last".to_string()) - .expect("Cannot obtain a valid db key") + .expect(ERROR_MSG) .push(&hash.to_string()) - .expect("Cannot obtain a valid db key") + .expect(ERROR_MSG) } From 2eddc70650965064eda2401606a414d6828ce95e Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 6 Oct 2023 19:31:27 +0200 Subject: [PATCH 114/161] New field in `WriteLog` for replay protection changes --- .../lib/node/ledger/shell/finalize_block.rs | 53 +++--- apps/src/lib/node/ledger/shell/mod.rs | 29 ++- .../lib/node/ledger/shell/prepare_proposal.rs | 7 +- .../lib/node/ledger/shell/process_proposal.rs | 8 +- core/src/ledger/storage/wl_storage.rs | 8 +- core/src/ledger/storage/write_log.rs | 167 +++++++++++++----- shared/src/ledger/protocol/mod.rs | 24 +-- 7 files changed, 187 insertions(+), 109 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..b3b106f0ac 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -11,7 +11,7 @@ use namada::ledger::pos::{namada_proof_of_stake, staking_token_address}; use namada::ledger::storage::EPOCH_SWITCH_BLOCKS_DELAY; use namada::ledger::storage_api::token::credit_tokens; use namada::ledger::storage_api::{pgf, StorageRead, StorageWrite}; -use namada::ledger::{inflation, protocol, replay_protection}; +use namada::ledger::{inflation, protocol}; use namada::proof_of_stake::{ delegator_rewards_products_handle, find_validator_by_raw_hash, read_last_block_proposer_address, read_pos_params, read_total_stake, @@ -216,10 +216,9 @@ where .clone() .update_header(TxType::Raw) .header_hash(); - let tx_hash_key = - replay_protection::get_replay_protection_key(&tx_hash); self.wl_storage - .delete(&tx_hash_key) + .write_log + .delete_tx_hash(tx_hash) .expect("Error while deleting tx hash from storage"); } @@ -515,13 +514,13 @@ where if let Error::TxApply(protocol::Error::GasError(_)) = msg { - let tx_hash_key = - replay_protection::get_replay_protection_key( - &hash, + self.wl_storage + .write_log + .delete_tx_hash(hash) + .expect( + "Error while deleting tx hash key from \ + storage", ); - self.wl_storage.delete(&tx_hash_key).expect( - "Error while deleting tx hash key from storage", - ); } } @@ -539,6 +538,14 @@ where response.events.push(tx_event); } + // Finalize the transactions' hashes from the previous block + for hash in self.wl_storage.storage.iter_replay_protection() { + self.wl_storage + .write_log + .finalize_tx_hashes(hash) + .expect("Failed tx hashes finalization") + } + stats.set_tx_cache_size( self.tx_wasm_cache.get_size(), self.tx_wasm_cache.get_cache_size(), @@ -1045,6 +1052,7 @@ mod test_finalize_block { use namada::core::ledger::governance::storage::vote::{ StorageProposalVote, VoteType, }; + use namada::core::ledger::replay_protection; use namada::eth_bridge::storage::bridge_pool::{ self, get_key_from_hash, get_nonce_key, get_signed_root_key, }; @@ -2269,15 +2277,17 @@ mod test_finalize_block { let (wrapper_tx, processed_tx) = mk_wrapper_tx(&shell, &crate::wallet::defaults::albert_keypair()); - let wrapper_hash_key = replay_protection::get_replay_protection_key( - &wrapper_tx.header_hash(), - ); + let wrapper_hash_key = + replay_protection::get_replay_protection_last_key( + &wrapper_tx.header_hash(), + ); let mut decrypted_tx = wrapper_tx; decrypted_tx.update_header(TxType::Raw); - let decrypted_hash_key = replay_protection::get_replay_protection_key( - &decrypted_tx.header_hash(), - ); + let decrypted_hash_key = + replay_protection::get_replay_protection_last_key( + &decrypted_tx.header_hash(), + ); // merkle tree root before finalize_block let root_pre = shell.shell.wl_storage.storage.block.tree.root(); @@ -2361,7 +2371,7 @@ mod test_finalize_block { decrypted_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); // Write inner hash in storage - let inner_hash_key = replay_protection::get_replay_protection_key( + let inner_hash_key = replay_protection::get_replay_protection_last_key( &wrapper_tx.clone().update_header(TxType::Raw).header_hash(), ); shell @@ -2435,10 +2445,11 @@ mod test_finalize_block { None, ))); - let wrapper_hash_key = replay_protection::get_replay_protection_key( - &wrapper.header_hash(), - ); - let inner_hash_key = replay_protection::get_replay_protection_key( + let wrapper_hash_key = + replay_protection::get_replay_protection_last_key( + &wrapper.header_hash(), + ); + let inner_hash_key = replay_protection::get_replay_protection_last_key( &wrapper.clone().update_header(TxType::Raw).header_hash(), ); diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 6b59e96cec..daf79e2788 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -48,7 +48,7 @@ use namada::ledger::storage::{ EPOCH_SWITCH_BLOCKS_DELAY, }; use namada::ledger::storage_api::{self, StorageRead}; -use namada::ledger::{parameters, pos, protocol, replay_protection}; +use namada::ledger::{parameters, pos, protocol}; use namada::proof_of_stake::{self, process_slashes, read_pos_params, slash}; use namada::proto::{self, Section, Tx}; use namada::types::address::Address; @@ -63,7 +63,7 @@ use namada::types::transaction::{ hash_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, EllipticCurve, PairingEngine, TxType, WrapperTx, }; -use namada::types::{address, hash, token}; +use namada::types::{address, token}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::{WasmCacheAccess, WasmCacheRwAccess}; use num_derive::{FromPrimitive, ToPrimitive}; @@ -945,11 +945,9 @@ where } // Write inner hash to tx WAL - let inner_hash_key = - replay_protection::get_replay_protection_key(&inner_tx_hash); temp_wl_storage .write_log - .write(&inner_hash_key, vec![]) + .write_tx_hash(inner_tx_hash) .expect("Couldn't write inner transaction hash to write log"); let tx = @@ -966,11 +964,9 @@ where } // Write wrapper hash to tx WAL - let wrapper_hash_key = - replay_protection::get_replay_protection_key(&wrapper_hash); temp_wl_storage .write_log - .write(&wrapper_hash_key, vec![]) + .write_tx_hash(wrapper_hash) .expect("Couldn't write wrapper tx hash to write log"); Ok(()) @@ -1266,14 +1262,11 @@ where let mut inner_tx = tx; inner_tx.update_header(TxType::Raw); let inner_tx_hash = &inner_tx.header_hash(); - let inner_hash_key = - replay_protection::get_replay_protection_key(inner_tx_hash); if self .wl_storage .storage - .has_key(&inner_hash_key) + .has_replay_protection_entry(inner_tx_hash) .expect("Error while checking inner tx hash key in storage") - .0 { response.code = ErrorCodes::ReplayTx.into(); response.log = format!( @@ -1286,17 +1279,14 @@ where let tx = Tx::try_from(tx_bytes) .expect("Deserialization shouldn't fail"); - let wrapper_hash = hash::Hash(tx.header_hash().0); - let wrapper_hash_key = - replay_protection::get_replay_protection_key(&wrapper_hash); + let wrapper_hash = &tx.header_hash(); if self .wl_storage .storage - .has_key(&wrapper_hash_key) + .has_replay_protection_entry(wrapper_hash) .expect( "Error while checking wrapper tx hash key in storage", ) - .0 { response.code = ErrorCodes::ReplayTx.into(); response.log = format!( @@ -2323,6 +2313,7 @@ mod abciplus_mempool_tests { #[cfg(test)] mod tests { + use namada::ledger::replay_protection; use namada::proof_of_stake::Epoch; use namada::proto::{Code, Data, Section, Signature, Tx}; use namada::types::transaction::{Fee, WrapperTx}; @@ -2466,7 +2457,7 @@ mod tests { // Write wrapper hash to storage let wrapper_hash = wrapper.header_hash(); let wrapper_hash_key = - replay_protection::get_replay_protection_key(&wrapper_hash); + replay_protection::get_replay_protection_last_key(&wrapper_hash); shell .wl_storage .storage @@ -2506,7 +2497,7 @@ mod tests { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash in storage let inner_hash_key = - replay_protection::get_replay_protection_key(&inner_tx_hash); + replay_protection::get_replay_protection_last_key(&inner_tx_hash); shell .wl_storage .storage diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 3687a6d39b..103c90b982 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -1188,7 +1188,7 @@ mod test_prepare_proposal { // Write wrapper hash to storage let wrapper_unsigned_hash = wrapper.header_hash(); - let hash_key = replay_protection::get_replay_protection_key( + let hash_key = replay_protection::get_replay_protection_last_key( &wrapper_unsigned_hash, ); shell @@ -1283,8 +1283,9 @@ mod test_prepare_proposal { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash to storage - let hash_key = - replay_protection::get_replay_protection_key(&inner_unsigned_hash); + let hash_key = replay_protection::get_replay_protection_last_key( + &inner_unsigned_hash, + ); shell .wl_storage .storage diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index ab544de3f8..a56136047d 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -988,6 +988,7 @@ mod test_process_proposal { #[cfg(feature = "abcipp")] use assert_matches::assert_matches; + use namada::ledger::replay_protection; use namada::ledger::storage_api::StorageWrite; use namada::proto::{ Code, Data, Section, SignableEthMessage, Signature, Signed, @@ -2132,7 +2133,7 @@ mod test_process_proposal { // Write wrapper hash to storage let wrapper_unsigned_hash = wrapper.header_hash(); - let hash_key = replay_protection::get_replay_protection_key( + let hash_key = replay_protection::get_replay_protection_last_key( &wrapper_unsigned_hash, ); shell @@ -2267,8 +2268,9 @@ mod test_process_proposal { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash to storage - let hash_key = - replay_protection::get_replay_protection_key(&inner_unsigned_hash); + let hash_key = replay_protection::get_replay_protection_last_key( + &inner_unsigned_hash, + ); shell .wl_storage .storage diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index 71914d519c..f8111fa18a 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -57,16 +57,12 @@ where } } - /// Check if the given tx hash is present + /// Check if the given tx hash has already been processed pub fn has_replay_protection_entry( &self, hash: &Hash, ) -> Result { - let key = - crate::ledger::replay_protection::get_replay_protection_key(hash); - if let Some(write_log::StorageModification::Write { .. }) = - self.write_log.read(&key).0 - { + if self.write_log.has_replay_protection_key(hash) { return Ok(true); } diff --git a/core/src/ledger/storage/write_log.rs b/core/src/ledger/storage/write_log.rs index e209402c64..a62af90fe6 100644 --- a/core/src/ledger/storage/write_log.rs +++ b/core/src/ledger/storage/write_log.rs @@ -10,14 +10,15 @@ use crate::ledger; use crate::ledger::gas::{ STORAGE_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE, }; -use crate::ledger::replay_protection::is_replay_protection_key; +use crate::ledger::replay_protection::{ + get_replay_protection_all_subkey, get_replay_protection_last_subkey, +}; use crate::ledger::storage::traits::StorageHasher; use crate::ledger::storage::Storage; use crate::types::address::{Address, EstablishedAddressGen, InternalAddress}; use crate::types::hash::Hash; use crate::types::ibc::IbcEvent; use crate::types::storage; -use crate::types::storage::KeySeg; use crate::types::token::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, }; @@ -38,6 +39,8 @@ pub enum Error { DeleteVp, #[error("Trying to write a temporary value after deleting")] WriteTempAfterDelete, + #[error("Replay protection key: {0}")] + ReplayProtection(String), } /// Result for functions that may fail @@ -68,6 +71,17 @@ pub enum StorageModification { }, } +#[derive(Debug, Clone)] +/// A replay protection storage modification +enum ReProtStorageModification { + /// Write an entry + Write, + /// Delete an entry + Delete, + /// Finalize an entry + Finalize, +} + /// The write log storage #[derive(Debug, Clone)] pub struct WriteLog { @@ -89,6 +103,9 @@ pub struct WriteLog { tx_precommit_write_log: HashMap, /// The IBC events for the current transaction ibc_events: BTreeSet, + /// Storage modifications for the replay protection storage, always + /// committed regardless of the result of the transaction + replay_protection: HashMap, } /// Write log prefix iterator @@ -115,6 +132,7 @@ impl Default for WriteLog { tx_write_log: HashMap::with_capacity(100), tx_precommit_write_log: HashMap::with_capacity(100), ibc_events: BTreeSet::new(), + replay_protection: HashMap::with_capacity(1_000), } } } @@ -492,54 +510,17 @@ impl WriteLog { + for<'iter> ledger::storage::DBIter<'iter>, H: StorageHasher, { - // FIXME: maybe better to use two new fields in the write log for replay - // protection keys? - let _iter = self.block_write_log.iter(); for (key, entry) in self.block_write_log.iter() { match entry { StorageModification::Write { value } => { - if is_replay_protection_key(key) { - let hash = key - .last() - .ok_or(Error::StorageError( - ledger::storage::Error::KeyError( - crate::types::storage::Error::EmptyKey, - ), - ))? - .raw() - .parse() - .map_err(|_e| Error::StorageError(ledger::storage::Error::KeyError(crate::types::storage::Error::InvalidKeySeg("Expected valid hash".to_string()))))?; - - storage - .write_replay_protection_entry(batch, &hash) - .map_err(Error::StorageError)?; - } else { - storage - .batch_write_subspace_val(batch, key, value.clone()) - .map_err(Error::StorageError)?; - } + storage + .batch_write_subspace_val(batch, key, value.clone()) + .map_err(Error::StorageError)?; } StorageModification::Delete => { - if is_replay_protection_key(key) { - let hash = key - .last() - .ok_or(Error::StorageError( - ledger::storage::Error::KeyError( - crate::types::storage::Error::EmptyKey, - ), - ))? - .raw() - .parse() - .map_err(|_e| Error::StorageError(ledger::storage::Error::KeyError(crate::types::storage::Error::InvalidKeySeg("Expected valid hash".to_string()))))?; - - storage - .delete_replay_protection_entry(batch, &hash) - .map_err(Error::StorageError)?; - } else { - storage - .batch_delete_subspace_val(batch, key) - .map_err(Error::StorageError)?; - } + storage + .batch_delete_subspace_val(batch, key) + .map_err(Error::StorageError)?; } StorageModification::InitAccount { vp_code_hash } => { storage @@ -551,6 +532,41 @@ impl WriteLog { } } + for (hash, entry) in self.replay_protection.iter() { + match entry { + ReProtStorageModification::Write => storage + .write_replay_protection_entry( + batch, + // Can only write tx hashes to the previous block, no + // further + &get_replay_protection_last_subkey(hash), + ) + .map_err(Error::StorageError)?, + ReProtStorageModification::Delete => storage + .delete_replay_protection_entry( + batch, + // Can only delete tx hashes from the previous block, + // no further + &get_replay_protection_last_subkey(hash), + ) + .map_err(Error::StorageError)?, + ReProtStorageModification::Finalize => { + storage + .write_replay_protection_entry( + batch, + &get_replay_protection_all_subkey(hash), + ) + .map_err(Error::StorageError)?; + storage + .delete_replay_protection_entry( + batch, + &get_replay_protection_last_subkey(hash), + ) + .map_err(Error::StorageError)? + } + } + } + if let Some(address_gen) = self.address_gen.take() { storage.address_gen = address_gen } @@ -648,6 +664,67 @@ impl WriteLog { let iter = matches.into_iter(); PrefixIter { iter } } + + /// Check if the given tx hash has already been processed + pub fn has_replay_protection_key(&self, hash: &Hash) -> bool { + match self.replay_protection.get(hash) { + Some(v) => !matches!(v, ReProtStorageModification::Delete), + None => false, + } + } + + /// Write the transaction hash + pub fn write_tx_hash(&mut self, hash: Hash) -> Result<()> { + match self + .replay_protection + .insert(hash, ReProtStorageModification::Write) + { + // Cannot write an hash if other requests have already been + // committed for the same hash + Some(_) => Err(Error::ReplayProtection( + "Requested a write over a previous request".to_string(), + )), + None => Ok(()), + } + } + + /// Remove the transaction hash + pub fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { + if let Some(ReProtStorageModification::Write) = self + .replay_protection + .insert(hash, ReProtStorageModification::Delete) + { + // Cannot delete an hash that still has to be written to + // storage, instead allow overwriting other deletes or finalize + // requests + return Err(Error::ReplayProtection( + "Requested a delete of an hash not yet committed to storage" + .to_string(), + )); + } + + Ok(()) + } + + /// Move the transaction hash of the previous block to the list of all + /// blocks. This functions should be called at the end of the block + /// processing + pub fn finalize_tx_hashes(&mut self, hash: Hash) -> Result<()> { + if let Some(ReProtStorageModification::Write) = self + .replay_protection + .insert(hash, ReProtStorageModification::Finalize) + { + // Cannot finalize a tx hash that has to be written in this + // block, else avoid finalizing if a delete request or another + // finalize request has been committed + return Err(Error::ReplayProtection( + "Requested a finalize on a hash not yet committed to storage" + .to_string(), + )); + } + + Ok(()) + } } #[cfg(test)] diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index aa8b056edf..ddeecde52e 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -7,7 +7,7 @@ use eyre::{eyre, WrapErr}; use masp_primitives::transaction::Transaction; use namada_core::ledger::gas::TxGasMeter; use namada_core::ledger::storage::wl_storage::WriteLogAndStorage; -use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; +use namada_core::ledger::storage_api::StorageRead; use namada_core::proto::Section; use namada_core::types::hash::Hash; use namada_core::types::storage::Key; @@ -228,15 +228,15 @@ where let mut changed_keys = BTreeSet::default(); let mut tx: Tx = tx_bytes.try_into().unwrap(); - // Writes wrapper tx hash to block write log (changes must be persisted even - // in case of failure) - let wrapper_hash_key = - replay_protection::get_replay_protection_key(&tx.header_hash()); + // Writes wrapper tx hash shell_params .wl_storage - .write(&wrapper_hash_key, ()) + .write_log_mut() + .write_tx_hash(tx.header_hash()) .expect("Error while writing tx hash to storage"); - changed_keys.insert(wrapper_hash_key); + changed_keys.insert(replay_protection::get_replay_protection_last_key( + &tx.header_hash(), + )); // Charge fee before performing any fallible operations charge_fee( @@ -251,14 +251,14 @@ where shell_params.tx_gas_meter.add_tx_size_gas(tx_bytes)?; // If wrapper was succesful, write inner tx hash to storage - let inner_hash_key = replay_protection::get_replay_protection_key( - &tx.update_header(TxType::Raw).header_hash(), - ); shell_params .wl_storage - .write(&inner_hash_key, ()) + .write_log_mut() + .write_tx_hash(tx.update_header(TxType::Raw).header_hash()) .expect("Error while writing tx hash to storage"); - changed_keys.insert(inner_hash_key); + changed_keys.insert(replay_protection::get_replay_protection_last_key( + &tx.update_header(TxType::Raw).header_hash(), + )); Ok(changed_keys) } From daeae3e8ce0154ed676e7a91e8aae6809a3de873 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 9 Oct 2023 16:03:55 +0200 Subject: [PATCH 115/161] Improves replay protection `WriteLog` API --- core/src/ledger/storage/wl_storage.rs | 2 +- core/src/ledger/storage/write_log.rs | 55 +++++++++++++++------------ 2 files changed, 32 insertions(+), 25 deletions(-) diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index f8111fa18a..c987af1695 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -62,7 +62,7 @@ where &self, hash: &Hash, ) -> Result { - if self.write_log.has_replay_protection_key(hash) { + if self.write_log.has_replay_protection_entry(hash) { return Ok(true); } diff --git a/core/src/ledger/storage/write_log.rs b/core/src/ledger/storage/write_log.rs index a62af90fe6..ce66d7f5fa 100644 --- a/core/src/ledger/storage/write_log.rs +++ b/core/src/ledger/storage/write_log.rs @@ -571,6 +571,7 @@ impl WriteLog { storage.address_gen = address_gen } self.block_write_log.clear(); + self.replay_protection.clear(); Ok(()) } @@ -666,7 +667,7 @@ impl WriteLog { } /// Check if the given tx hash has already been processed - pub fn has_replay_protection_key(&self, hash: &Hash) -> bool { + pub fn has_replay_protection_entry(&self, hash: &Hash) -> bool { match self.replay_protection.get(hash) { Some(v) => !matches!(v, ReProtStorageModification::Delete), None => false, @@ -675,52 +676,58 @@ impl WriteLog { /// Write the transaction hash pub fn write_tx_hash(&mut self, hash: Hash) -> Result<()> { - match self + if self .replay_protection .insert(hash, ReProtStorageModification::Write) + .is_some() { // Cannot write an hash if other requests have already been // committed for the same hash - Some(_) => Err(Error::ReplayProtection( - "Requested a write over a previous request".to_string(), - )), - None => Ok(()), + return Err(Error::ReplayProtection(format!( + "Requested a write on hash {hash} over a previous request" + ))); } + + Ok(()) } + // FIXME: add a unit test to check the values inside the write log and + // storage for replay protection after a block /// Remove the transaction hash pub fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { - if let Some(ReProtStorageModification::Write) = self + match self .replay_protection .insert(hash, ReProtStorageModification::Delete) { + None => Ok(()), + // Allow overwriting a previous finalize request + Some(ReProtStorageModification::Finalize) => Ok(()), + Some(_) => // Cannot delete an hash that still has to be written to - // storage, instead allow overwriting other deletes or finalize - // requests - return Err(Error::ReplayProtection( - "Requested a delete of an hash not yet committed to storage" - .to_string(), - )); + // storage or has already been deleted + { + Err(Error::ReplayProtection(format!( + "Requested a delete on hash {hash} not yet committed to \ + storage" + ))) + } } - - Ok(()) } /// Move the transaction hash of the previous block to the list of all - /// blocks. This functions should be called at the end of the block + /// blocks. This functions should be called at the beginning of the block /// processing pub fn finalize_tx_hashes(&mut self, hash: Hash) -> Result<()> { - if let Some(ReProtStorageModification::Write) = self + if self .replay_protection .insert(hash, ReProtStorageModification::Finalize) + .is_some() { - // Cannot finalize a tx hash that has to be written in this - // block, else avoid finalizing if a delete request or another - // finalize request has been committed - return Err(Error::ReplayProtection( - "Requested a finalize on a hash not yet committed to storage" - .to_string(), - )); + // Cannot finalize an hash if other requests have already been + // committed for the same hash + return Err(Error::ReplayProtection(format!( + "Requested a finalize on hash {hash} over a previous request" + ))); } Ok(()) From 2661d86aa667525019ea211934742cfa36a6a305 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 9 Oct 2023 16:05:03 +0200 Subject: [PATCH 116/161] Fixes unit tests --- .../lib/node/ledger/shell/finalize_block.rs | 65 +++++++------ apps/src/lib/node/ledger/shell/mod.rs | 12 ++- .../lib/node/ledger/shell/process_proposal.rs | 12 ++- core/src/ledger/storage/write_log.rs | 94 ++++++++++++++++++- 4 files changed, 145 insertions(+), 38 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index b3b106f0ac..5c6afae151 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -36,6 +36,8 @@ use crate::facade::tendermint_proto::abci::{ }; use crate::node::ledger::shell::stats::InternalStats; +// FIXME: optimization with replay proteciton key removal + impl Shell where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, @@ -87,6 +89,14 @@ where self.wl_storage.storage.update_epoch_blocks_delay ); + // Finalize the transactions' hashes from the previous block + for hash in self.wl_storage.storage.iter_replay_protection() { + self.wl_storage + .write_log + .finalize_tx_hashes(hash) + .expect("Failed tx hashes finalization") + } + if new_epoch { namada::ledger::storage::update_allowed_conversions( &mut self.wl_storage, @@ -538,14 +548,6 @@ where response.events.push(tx_event); } - // Finalize the transactions' hashes from the previous block - for hash in self.wl_storage.storage.iter_replay_protection() { - self.wl_storage - .write_log - .finalize_tx_hashes(hash) - .expect("Failed tx hashes finalization") - } - stats.set_tx_cache_size( self.tx_wasm_cache.get_size(), self.tx_wasm_cache.get_cache_size(), @@ -2281,7 +2283,7 @@ mod test_finalize_block { replay_protection::get_replay_protection_last_key( &wrapper_tx.header_hash(), ); - let mut decrypted_tx = wrapper_tx; + let mut decrypted_tx = wrapper_tx.clone(); decrypted_tx.update_header(TxType::Raw); let decrypted_hash_key = @@ -2314,8 +2316,20 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check transactions' hashes in storage - assert!(shell.shell.wl_storage.has_key(&wrapper_hash_key).unwrap()); - assert!(shell.shell.wl_storage.has_key(&decrypted_hash_key).unwrap()); + assert!( + shell + .shell + .wl_storage + .write_log + .has_replay_protection_entry(&wrapper_tx.header_hash()) + ); + assert!( + shell + .shell + .wl_storage + .write_log + .has_replay_protection_entry(&decrypted_tx.header_hash()) + ); // Check that non of the hashes is present in the merkle tree assert!( !shell @@ -2345,6 +2359,8 @@ mod test_finalize_block { fn test_remove_tx_hash() { let (mut shell, _, _, _) = setup(); let keypair = gen_keypair(); + let mut batch = + namada::core::ledger::storage::testing::TestStorage::batch(); let mut wasm_path = top_level_directory(); wasm_path.push("wasm_for_tests/tx_no_op.wasm"); @@ -2371,13 +2387,17 @@ mod test_finalize_block { decrypted_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); // Write inner hash in storage + let inner_hash_subkey = + replay_protection::get_replay_protection_last_subkey( + &wrapper_tx.clone().update_header(TxType::Raw).header_hash(), + ); let inner_hash_key = replay_protection::get_replay_protection_last_key( &wrapper_tx.clone().update_header(TxType::Raw).header_hash(), ); shell .wl_storage .storage - .write(&inner_hash_key, vec![]) + .write_replay_protection_entry(&mut batch, &inner_hash_subkey) .expect("Test failed"); let processed_tx = ProcessedTx { @@ -2445,14 +2465,6 @@ mod test_finalize_block { None, ))); - let wrapper_hash_key = - replay_protection::get_replay_protection_last_key( - &wrapper.header_hash(), - ); - let inner_hash_key = replay_protection::get_replay_protection_last_key( - &wrapper.clone().update_header(TxType::Raw).header_hash(), - ); - let processed_tx = ProcessedTx { tx: wrapper.to_bytes(), result: TxResult { @@ -2477,15 +2489,12 @@ mod test_finalize_block { assert!( shell .wl_storage - .has_key(&wrapper_hash_key) - .expect("Test failed") + .write_log + .has_replay_protection_entry(&wrapper.header_hash()) ); - assert!( - !shell - .wl_storage - .has_key(&inner_hash_key) - .expect("Test failed") - ) + assert!(!shell.wl_storage.write_log.has_replay_protection_entry( + &wrapper.update_header(TxType::Raw).header_hash() + )) } // Test that if the fee payer doesn't have enough funds for fee payment the diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index daf79e2788..81aa225655 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -2455,13 +2455,15 @@ mod tests { ))); // Write wrapper hash to storage + let mut batch = + namada::core::ledger::storage::testing::TestStorage::batch(); let wrapper_hash = wrapper.header_hash(); let wrapper_hash_key = - replay_protection::get_replay_protection_last_key(&wrapper_hash); + replay_protection::get_replay_protection_last_subkey(&wrapper_hash); shell .wl_storage .storage - .write(&wrapper_hash_key, wrapper_hash) + .write_replay_protection_entry(&mut batch, &wrapper_hash_key) .expect("Test failed"); // Try wrapper tx replay attack @@ -2497,11 +2499,13 @@ mod tests { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash in storage let inner_hash_key = - replay_protection::get_replay_protection_last_key(&inner_tx_hash); + replay_protection::get_replay_protection_last_subkey( + &inner_tx_hash, + ); shell .wl_storage .storage - .write(&inner_hash_key, inner_tx_hash) + .write_replay_protection_entry(&mut batch, &inner_hash_key) .expect("Test failed"); // Try inner tx replay attack diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index a56136047d..50950b60f0 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -2132,14 +2132,16 @@ mod test_process_proposal { ))); // Write wrapper hash to storage + let mut batch = + namada::core::ledger::storage::testing::TestStorage::batch(); let wrapper_unsigned_hash = wrapper.header_hash(); - let hash_key = replay_protection::get_replay_protection_last_key( + let hash_key = replay_protection::get_replay_protection_last_subkey( &wrapper_unsigned_hash, ); shell .wl_storage .storage - .write(&hash_key, vec![]) + .write_replay_protection_entry(&mut batch, &hash_key) .expect("Test failed"); // Run validation @@ -2268,13 +2270,15 @@ mod test_process_proposal { wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash to storage - let hash_key = replay_protection::get_replay_protection_last_key( + let mut batch = + namada::core::ledger::storage::testing::TestStorage::batch(); + let hash_key = replay_protection::get_replay_protection_last_subkey( &inner_unsigned_hash, ); shell .wl_storage .storage - .write(&hash_key, vec![]) + .write_replay_protection_entry(&mut batch, &hash_key) .expect("Test failed"); // Run validation diff --git a/core/src/ledger/storage/write_log.rs b/core/src/ledger/storage/write_log.rs index ce66d7f5fa..45acfd9c55 100644 --- a/core/src/ledger/storage/write_log.rs +++ b/core/src/ledger/storage/write_log.rs @@ -691,8 +691,6 @@ impl WriteLog { Ok(()) } - // FIXME: add a unit test to check the values inside the write log and - // storage for replay protection after a block /// Remove the transaction hash pub fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { match self @@ -958,6 +956,98 @@ mod tests { assert_eq!(value, None); } + #[test] + fn test_replay_protection_commit() { + let mut storage = + crate::ledger::storage::testing::TestStorage::default(); + let mut write_log = WriteLog::default(); + let mut batch = crate::ledger::storage::testing::TestStorage::batch(); + + // write some replay protection keys + write_log + .write_tx_hash(Hash::sha256("tx1".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx2".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx3".as_bytes())) + .unwrap(); + + // commit a block + write_log + .commit_block(&mut storage, &mut batch) + .expect("commit failed"); + + assert!(write_log.replay_protection.is_empty()); + for tx in ["tx1", "tx2", "tx3"] { + assert!( + storage + .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) + .expect("read failed") + ); + } + + // write some replay protection keys + write_log + .write_tx_hash(Hash::sha256("tx4".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx5".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx6".as_bytes())) + .unwrap(); + + // delete previous hash + write_log + .delete_tx_hash(Hash::sha256("tx1".as_bytes())) + .unwrap(); + + // finalize previous hashes + for tx in ["tx2", "tx3"] { + write_log + .finalize_tx_hashes(Hash::sha256(tx.as_bytes())) + .unwrap(); + } + + // commit a block + write_log + .commit_block(&mut storage, &mut batch) + .expect("commit failed"); + + assert!(write_log.replay_protection.is_empty()); + for tx in ["tx2", "tx3", "tx4", "tx5", "tx6"] { + assert!( + storage + .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) + .expect("read failed") + ); + } + assert!( + !storage + .has_replay_protection_entry(&Hash::sha256("tx1".as_bytes())) + .expect("read failed") + ); + + // try to delete finalized hash which shouldn't work + write_log + .delete_tx_hash(Hash::sha256("tx2".as_bytes())) + .unwrap(); + + // commit a block + write_log + .commit_block(&mut storage, &mut batch) + .expect("commit failed"); + + assert!(write_log.replay_protection.is_empty()); + assert!( + storage + .has_replay_protection_entry(&Hash::sha256("tx2".as_bytes())) + .expect("read failed") + ); + } + prop_compose! { fn arb_verifiers_changed_key_tx_all_key() (verifiers_from_tx in testing::arb_verifiers_from_tx()) From b2ec6a365f4da9e0ae1e9142b6a4fedbbf97f98f Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 9 Oct 2023 17:50:42 +0200 Subject: [PATCH 117/161] Renames `finalize_tx_hashes` --- apps/src/lib/node/ledger/shell/finalize_block.rs | 2 +- core/src/ledger/storage/write_log.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 5c6afae151..5772c2c74b 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -93,7 +93,7 @@ where for hash in self.wl_storage.storage.iter_replay_protection() { self.wl_storage .write_log - .finalize_tx_hashes(hash) + .finalize_tx_hash(hash) .expect("Failed tx hashes finalization") } diff --git a/core/src/ledger/storage/write_log.rs b/core/src/ledger/storage/write_log.rs index 45acfd9c55..e302baa9fe 100644 --- a/core/src/ledger/storage/write_log.rs +++ b/core/src/ledger/storage/write_log.rs @@ -715,7 +715,7 @@ impl WriteLog { /// Move the transaction hash of the previous block to the list of all /// blocks. This functions should be called at the beginning of the block /// processing - pub fn finalize_tx_hashes(&mut self, hash: Hash) -> Result<()> { + pub fn finalize_tx_hash(&mut self, hash: Hash) -> Result<()> { if self .replay_protection .insert(hash, ReProtStorageModification::Finalize) @@ -1007,7 +1007,7 @@ mod tests { // finalize previous hashes for tx in ["tx2", "tx3"] { write_log - .finalize_tx_hashes(Hash::sha256(tx.as_bytes())) + .finalize_tx_hash(Hash::sha256(tx.as_bytes())) .unwrap(); } From 4a7ec74e2df760fd807c51eb4c7b7d7f24dfe96d Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 10 Oct 2023 16:00:08 +0200 Subject: [PATCH 118/161] Removes wrapper hash when committed inner tx --- .../lib/node/ledger/shell/finalize_block.rs | 260 +++++++++++------- 1 file changed, 154 insertions(+), 106 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 5772c2c74b..cfeed01311 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -36,8 +36,6 @@ use crate::facade::tendermint_proto::abci::{ }; use crate::node::ledger::shell::stats::InternalStats; -// FIXME: optimization with replay proteciton key removal - impl Shell where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, @@ -285,7 +283,7 @@ where continue; } - let (mut tx_event, tx_unsigned_hash, mut tx_gas_meter, wrapper) = + let (mut tx_event, embedding_wrapper, mut tx_gas_meter, wrapper) = match &tx_header.tx_type { TxType::Wrapper(wrapper) => { stats.increment_wrapper_txs(); @@ -295,7 +293,7 @@ where } TxType::Decrypted(inner) => { // We remove the corresponding wrapper tx from the queue - let mut tx_in_queue = self + let tx_in_queue = self .wl_storage .storage .tx_queue @@ -332,12 +330,7 @@ where ( event, - Some( - tx_in_queue - .tx - .update_header(TxType::Raw) - .header_hash(), - ), + Some(tx_in_queue.tx), TxGasMeter::new_from_sub_limit(tx_in_queue.gas), None, ) @@ -443,6 +436,16 @@ where .map_err(Error::TxApply) { Ok(result) => { + if let Some(wrapper) = embedding_wrapper { + // If transaction is decrypted remove the corresponding + // wrapper hash from storage which is not needed anymore + self.wl_storage + .write_log + .delete_tx_hash(wrapper.header_hash()) + .expect( + "Error while deleting tx hash key from storage", + ); + } if result.is_accepted() { if let EventType::Accepted = tx_event.event_type { // Wrapper transaction @@ -520,13 +523,26 @@ where // If transaction type is Decrypted and failed because of // out of gas, remove its hash from storage to allow // rewrapping it - if let Some(hash) = tx_unsigned_hash { + if let Some(mut wrapper) = embedding_wrapper { if let Error::TxApply(protocol::Error::GasError(_)) = msg { + let raw_header_hash = wrapper + .update_header(TxType::Raw) + .header_hash(); + self.wl_storage + .write_log + .delete_tx_hash(raw_header_hash) + .expect( + "Error while deleting tx hash key from \ + storage", + ); + } else { + // Remove the wrapper hash which is not needed + // anymore self.wl_storage .write_log - .delete_tx_hash(hash) + .delete_tx_hash(wrapper.header_hash()) .expect( "Error while deleting tx hash key from \ storage", @@ -2353,148 +2369,180 @@ mod test_finalize_block { ); } - /// Test that if a decrypted transaction fails because of out-of-gas, its - /// hash is removed from storage to allow rewrapping it + /// Test replay protection hash handling #[test] - fn test_remove_tx_hash() { + fn test_tx_hash_handling() { let (mut shell, _, _, _) = setup(); let keypair = gen_keypair(); let mut batch = namada::core::ledger::storage::testing::TestStorage::batch(); - let mut wasm_path = top_level_directory(); - wasm_path.push("wasm_for_tests/tx_no_op.wasm"); - let tx_code = std::fs::read(wasm_path) - .expect("Expected a file at given code path"); - let mut wrapper_tx = + let (wrapper_tx, _) = mk_wrapper_tx(&shell, &keypair); + let (wrapper_tx_2, _) = mk_wrapper_tx(&shell, &keypair); + let mut invalid_wrapper_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::zero(), + amount_per_gas_unit: 0.into(), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), + 0.into(), None, )))); - wrapper_tx.header.chain_id = shell.chain_id.clone(); - wrapper_tx.set_code(Code::new(tx_code)); - wrapper_tx.set_data(Data::new( + invalid_wrapper_tx.header.chain_id = shell.chain_id.clone(); + invalid_wrapper_tx + .set_code(Code::new("wasm_code".as_bytes().to_owned())); + invalid_wrapper_tx.set_data(Data::new( "Encrypted transaction data".as_bytes().to_owned(), )); + invalid_wrapper_tx.add_section(Section::Signature(Signature::new( + invalid_wrapper_tx.sechashes(), + [(0, keypair)].into_iter().collect(), + None, + ))); + + let wrapper_hash = wrapper_tx.header_hash(); + let wrapper_2_hash = wrapper_tx_2.header_hash(); + let invalid_wrapper_hash = invalid_wrapper_tx.header_hash(); let mut decrypted_tx = wrapper_tx.clone(); + let mut decrypted_tx_2 = wrapper_tx_2.clone(); decrypted_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); + decrypted_tx_2.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); + let decrypted_hash = + wrapper_tx.clone().update_header(TxType::Raw).header_hash(); + let decrypted_2_hash = wrapper_tx_2 + .clone() + .update_header(TxType::Raw) + .header_hash(); + let decrypted_3_hash = invalid_wrapper_tx + .clone() + .update_header(TxType::Raw) + .header_hash(); + + // Write wrapper and inner hashes in storage + for hash in [ + &decrypted_hash, + &wrapper_hash, + &decrypted_2_hash, + &wrapper_2_hash, + ] { + let hash_subkey = + replay_protection::get_replay_protection_last_subkey(hash); + shell + .wl_storage + .storage + .write_replay_protection_entry(&mut batch, &hash_subkey) + .expect("Test failed"); + } - // Write inner hash in storage - let inner_hash_subkey = - replay_protection::get_replay_protection_last_subkey( - &wrapper_tx.clone().update_header(TxType::Raw).header_hash(), - ); - let inner_hash_key = replay_protection::get_replay_protection_last_key( - &wrapper_tx.clone().update_header(TxType::Raw).header_hash(), - ); - shell - .wl_storage - .storage - .write_replay_protection_entry(&mut batch, &inner_hash_subkey) - .expect("Test failed"); - - let processed_tx = ProcessedTx { + // Invalid wrapper tx that should lead to a commitment of the wrapper + // hash and no commitment of the inner hash + let mut processed_txs = vec![ProcessedTx { + tx: invalid_wrapper_tx.to_bytes(), + result: TxResult { + code: ErrorCodes::Ok.into(), + info: "".into(), + }, + }]; + // Out of gas error triggering inner hash removal + processed_txs.push(ProcessedTx { tx: decrypted_tx.to_bytes(), result: TxResult { code: ErrorCodes::Ok.into(), info: "".into(), }, - }; + }); + // Wasm error that still leads to inner hash commitment and wrapper hash + // removal + processed_txs.push(ProcessedTx { + tx: decrypted_tx_2.to_bytes(), + result: TxResult { + code: ErrorCodes::Ok.into(), + info: "".into(), + }, + }); shell.enqueue_tx(wrapper_tx, Gas::default()); + shell.enqueue_tx(wrapper_tx_2, GAS_LIMIT_MULTIPLIER.into()); // merkle tree root before finalize_block let root_pre = shell.shell.wl_storage.storage.block.tree.root(); let event = &shell .finalize_block(FinalizeBlock { - txs: vec![processed_tx], + txs: processed_txs, ..Default::default() }) - .expect("Test failed")[0]; + .expect("Test failed"); // the merkle tree root should not change after finalize_block let root_post = shell.shell.wl_storage.storage.block.tree.root(); assert_eq!(root_pre.0, root_post.0); - // Check inner tx hash has been removed from storage - assert_eq!(event.event_type.to_string(), String::from("applied")); - let code = event.attributes.get("code").expect("Testfailed").as_str(); + // Check first inner tx hash has been removed from storage but + // corresponding wrapper hash is still there Check second inner + // tx is still there and corresponding wrapper hash has been removed + // since useless + assert_eq!(event[0].event_type.to_string(), String::from("accepted")); + let code = event[0] + .attributes + .get("code") + .expect("Test failed") + .as_str(); + assert_eq!(code, String::from(ErrorCodes::InvalidTx).as_str()); + assert_eq!(event[1].event_type.to_string(), String::from("applied")); + let code = event[1] + .attributes + .get("code") + .expect("Test failed") + .as_str(); + assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); + assert_eq!(event[2].event_type.to_string(), String::from("applied")); + let code = event[2] + .attributes + .get("code") + .expect("Test failed") + .as_str(); assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); + assert!( + shell + .wl_storage + .write_log + .has_replay_protection_entry(&invalid_wrapper_hash) + ); assert!( !shell .wl_storage - .has_key(&inner_hash_key) - .expect("Test failed") - ) - } - - #[test] - /// Test that the hash of the wrapper transaction is committed to storage - /// even if the wrapper tx fails. The inner transaction hash must instead be - /// removed - fn test_commits_hash_if_wrapper_failure() { - let (mut shell, _, _, _) = setup(); - let keypair = gen_keypair(); - - let mut wrapper = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: 0.into(), - token: shell.wl_storage.storage.native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - 0.into(), - None, - )))); - wrapper.header.chain_id = shell.chain_id.clone(); - wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned())); - wrapper.set_data(Data::new( - "Encrypted transaction data".as_bytes().to_owned(), - )); - wrapper.add_section(Section::Signature(Signature::new( - wrapper.sechashes(), - [(0, keypair)].into_iter().collect(), - None, - ))); - - let processed_tx = ProcessedTx { - tx: wrapper.to_bytes(), - result: TxResult { - code: ErrorCodes::Ok.into(), - info: "".into(), - }, - }; - - let event = &shell - .finalize_block(FinalizeBlock { - txs: vec![processed_tx], - ..Default::default() - }) - .expect("Test failed")[0]; - - // Check wrapper hash has been committed to storage even if it failed. - // Check that, instead, the inner hash has been removed - assert_eq!(event.event_type.to_string(), String::from("accepted")); - let code = event.attributes.get("code").expect("Testfailed").as_str(); - assert_eq!(code, String::from(ErrorCodes::InvalidTx).as_str()); - + .write_log + .has_replay_protection_entry(&decrypted_3_hash) + ); + assert!( + !shell + .wl_storage + .write_log + .has_replay_protection_entry(&decrypted_hash) + ); assert!( shell + .wl_storage + .storage + .has_replay_protection_entry(&wrapper_hash) + .expect("test failed") + ); + assert!( + shell + .wl_storage + .storage + .has_replay_protection_entry(&decrypted_2_hash) + .expect("test failed") + ); + assert!( + !shell .wl_storage .write_log - .has_replay_protection_entry(&wrapper.header_hash()) + .has_replay_protection_entry(&wrapper_2_hash) ); - assert!(!shell.wl_storage.write_log.has_replay_protection_entry( - &wrapper.update_header(TxType::Raw).header_hash() - )) } // Test that if the fee payer doesn't have enough funds for fee payment the From 1e06990152de9d4cd9392d724601d0c02a11056a Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 10 Oct 2023 18:08:22 +0200 Subject: [PATCH 119/161] Writes only one hash at a time for replay protection --- .../lib/node/ledger/shell/finalize_block.rs | 117 ++++++------------ shared/src/ledger/protocol/mod.rs | 24 ++-- shared/src/ledger/queries/shell.rs | 1 + 3 files changed, 49 insertions(+), 93 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index cfeed01311..71801c817d 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -211,23 +211,17 @@ where tx_event["gas_used"] = "0".into(); response.events.push(tx_event); // if the rejected tx was decrypted, remove it - // from the queue of txs to be processed and remove the hash - // from storage + // from the queue of txs to be processed, remove its hash + // from storage and write the hash of the corresponding wrapper if let TxType::Decrypted(_) = &tx_header.tx_type { - let tx_hash = self + let wrapper_tx = self .wl_storage .storage .tx_queue .pop() .expect("Missing wrapper tx in queue") - .tx - .clone() - .update_header(TxType::Raw) - .header_hash(); - self.wl_storage - .write_log - .delete_tx_hash(tx_hash) - .expect("Error while deleting tx hash from storage"); + .tx; + self.allow_tx_replay(wrapper_tx); } #[cfg(not(any(feature = "abciplus", feature = "abcipp")))] @@ -436,16 +430,6 @@ where .map_err(Error::TxApply) { Ok(result) => { - if let Some(wrapper) = embedding_wrapper { - // If transaction is decrypted remove the corresponding - // wrapper hash from storage which is not needed anymore - self.wl_storage - .write_log - .delete_tx_hash(wrapper.header_hash()) - .expect( - "Error while deleting tx hash key from storage", - ); - } if result.is_accepted() { if let EventType::Accepted = tx_event.event_type { // Wrapper transaction @@ -523,31 +507,20 @@ where // If transaction type is Decrypted and failed because of // out of gas, remove its hash from storage to allow // rewrapping it - if let Some(mut wrapper) = embedding_wrapper { + if let Some(wrapper) = embedding_wrapper { if let Error::TxApply(protocol::Error::GasError(_)) = msg { - let raw_header_hash = wrapper - .update_header(TxType::Raw) - .header_hash(); - self.wl_storage - .write_log - .delete_tx_hash(raw_header_hash) - .expect( - "Error while deleting tx hash key from \ - storage", - ); - } else { - // Remove the wrapper hash which is not needed - // anymore - self.wl_storage - .write_log - .delete_tx_hash(wrapper.header_hash()) - .expect( - "Error while deleting tx hash key from \ - storage", - ); + self.allow_tx_replay(wrapper); } + } else if let Some(wrapper) = wrapper { + // If transaction type was Wrapper and failed, write its + // hash to storage to prevent + // replay + self.wl_storage + .write_log + .write_tx_hash(wrapper.header_hash()) + .expect("Error while writing tx hash to storage"); } tx_event["gas_used"] = @@ -985,6 +958,20 @@ where } Ok(()) } + + // Allow to replay a specific wasm transaction. Needs as argument the + // corresponding wrapper transaction to avoid replay of that in the process + fn allow_tx_replay(&mut self, mut wrapper_tx: Tx) { + self.wl_storage + .write_log + .write_tx_hash(wrapper_tx.header_hash()) + .expect("Error while deleting tx hash from storage"); + + self.wl_storage + .write_log + .delete_tx_hash(wrapper_tx.update_header(TxType::Raw).header_hash()) + .expect("Error while deleting tx hash from storage"); + } } /// Convert ABCI vote info to PoS vote info. Any info which fails the conversion @@ -2295,11 +2282,7 @@ mod test_finalize_block { let (wrapper_tx, processed_tx) = mk_wrapper_tx(&shell, &crate::wallet::defaults::albert_keypair()); - let wrapper_hash_key = - replay_protection::get_replay_protection_last_key( - &wrapper_tx.header_hash(), - ); - let mut decrypted_tx = wrapper_tx.clone(); + let mut decrypted_tx = wrapper_tx; decrypted_tx.update_header(TxType::Raw); let decrypted_hash_key = @@ -2331,14 +2314,7 @@ mod test_finalize_block { let root_post = shell.shell.wl_storage.storage.block.tree.root(); assert_eq!(root_pre.0, root_post.0); - // Check transactions' hashes in storage - assert!( - shell - .shell - .wl_storage - .write_log - .has_replay_protection_entry(&wrapper_tx.header_hash()) - ); + // Check transaction's hash in storage assert!( shell .shell @@ -2346,17 +2322,7 @@ mod test_finalize_block { .write_log .has_replay_protection_entry(&decrypted_tx.header_hash()) ); - // Check that non of the hashes is present in the merkle tree - assert!( - !shell - .shell - .wl_storage - .storage - .block - .tree - .has_key(&wrapper_hash_key) - .unwrap() - ); + // Check that the hash is present in the merkle tree assert!( !shell .shell @@ -2421,13 +2387,8 @@ mod test_finalize_block { .update_header(TxType::Raw) .header_hash(); - // Write wrapper and inner hashes in storage - for hash in [ - &decrypted_hash, - &wrapper_hash, - &decrypted_2_hash, - &wrapper_2_hash, - ] { + // Write inner hashes in storage + for hash in [&decrypted_hash, &decrypted_2_hash] { let hash_subkey = replay_protection::get_replay_protection_last_subkey(hash); shell @@ -2446,7 +2407,8 @@ mod test_finalize_block { info: "".into(), }, }]; - // Out of gas error triggering inner hash removal + // Out of gas error triggering inner hash removal and wrapper hash + // insert processed_txs.push(ProcessedTx { tx: decrypted_tx.to_bytes(), result: TxResult { @@ -2454,8 +2416,8 @@ mod test_finalize_block { info: "".into(), }, }); - // Wasm error that still leads to inner hash commitment and wrapper hash - // removal + // Wasm error that still leads to inner hash commitment and no wrapper + // hash insert processed_txs.push(ProcessedTx { tx: decrypted_tx_2.to_bytes(), result: TxResult { @@ -2526,9 +2488,8 @@ mod test_finalize_block { assert!( shell .wl_storage - .storage + .write_log .has_replay_protection_entry(&wrapper_hash) - .expect("test failed") ); assert!( shell diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index ddeecde52e..b00d75ff1f 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -164,9 +164,12 @@ where apply_protocol_tx(protocol_tx.tx, tx.data(), wl_storage) } TxType::Wrapper(ref wrapper) => { + let fee_unshielding_transaction = + get_fee_unshielding_transaction(&tx, wrapper); let changed_keys = apply_wrapper_tx( + tx, wrapper, - get_fee_unshielding_transaction(&tx, wrapper), + fee_unshielding_transaction, tx_bytes, ShellParams { tx_gas_meter, @@ -207,12 +210,14 @@ where } /// Performs the required operation on a wrapper transaction: -/// - replay protection /// - fee payment /// - gas accounting +/// - replay protection /// -/// Returns the set of changed storage keys. +/// Returns the set of changed storage keys. The caller should write the hash of +/// the wrapper header to storage in case of failure. pub(crate) fn apply_wrapper_tx<'a, D, H, CA, WLS>( + mut tx: Tx, wrapper: &WrapperTx, fee_unshield_transaction: Option, tx_bytes: &[u8], @@ -226,17 +231,6 @@ where WLS: WriteLogAndStorage, { let mut changed_keys = BTreeSet::default(); - let mut tx: Tx = tx_bytes.try_into().unwrap(); - - // Writes wrapper tx hash - shell_params - .wl_storage - .write_log_mut() - .write_tx_hash(tx.header_hash()) - .expect("Error while writing tx hash to storage"); - changed_keys.insert(replay_protection::get_replay_protection_last_key( - &tx.header_hash(), - )); // Charge fee before performing any fallible operations charge_fee( @@ -257,7 +251,7 @@ where .write_tx_hash(tx.update_header(TxType::Raw).header_hash()) .expect("Error while writing tx hash to storage"); changed_keys.insert(replay_protection::get_replay_protection_last_key( - &tx.update_header(TxType::Raw).header_hash(), + &tx.header_hash(), )); Ok(changed_keys) diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index a766846916..0e45996ebf 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -122,6 +122,7 @@ where let mut tx_gas_meter = TxGasMeter::new(wrapper.gas_limit.to_owned()); protocol::apply_wrapper_tx( + tx.clone(), &wrapper, None, &request.data, From 37a66026afa2dc1c72b6af8901324d01fccbaaa3 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 13 Oct 2023 20:00:03 +0200 Subject: [PATCH 120/161] Fixes check for replay protection keys --- apps/src/lib/node/ledger/shell/finalize_block.rs | 6 ++++++ core/src/ledger/storage/wl_storage.rs | 5 +++-- core/src/ledger/storage/write_log.rs | 14 +++++++------- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 71801c817d..b0f2bc893c 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -2321,6 +2321,7 @@ mod test_finalize_block { .wl_storage .write_log .has_replay_protection_entry(&decrypted_tx.header_hash()) + .unwrap_or_default() ); // Check that the hash is present in the merkle tree assert!( @@ -2472,24 +2473,28 @@ mod test_finalize_block { .wl_storage .write_log .has_replay_protection_entry(&invalid_wrapper_hash) + .unwrap_or_default() ); assert!( !shell .wl_storage .write_log .has_replay_protection_entry(&decrypted_3_hash) + .unwrap_or_default() ); assert!( !shell .wl_storage .write_log .has_replay_protection_entry(&decrypted_hash) + .unwrap_or_default() ); assert!( shell .wl_storage .write_log .has_replay_protection_entry(&wrapper_hash) + .unwrap_or_default() ); assert!( shell @@ -2503,6 +2508,7 @@ mod test_finalize_block { .wl_storage .write_log .has_replay_protection_entry(&wrapper_2_hash) + .unwrap_or_default() ); } diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index c987af1695..4e102e766b 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -62,8 +62,9 @@ where &self, hash: &Hash, ) -> Result { - if self.write_log.has_replay_protection_entry(hash) { - return Ok(true); + if let Some(present) = self.write_log.has_replay_protection_entry(hash) + { + return Ok(present); } self.storage.has_replay_protection_entry(hash) diff --git a/core/src/ledger/storage/write_log.rs b/core/src/ledger/storage/write_log.rs index e302baa9fe..4364028171 100644 --- a/core/src/ledger/storage/write_log.rs +++ b/core/src/ledger/storage/write_log.rs @@ -666,12 +666,12 @@ impl WriteLog { PrefixIter { iter } } - /// Check if the given tx hash has already been processed - pub fn has_replay_protection_entry(&self, hash: &Hash) -> bool { - match self.replay_protection.get(hash) { - Some(v) => !matches!(v, ReProtStorageModification::Delete), - None => false, - } + /// Check if the given tx hash has already been processed. Returns `None` if + /// the key is not known. + pub fn has_replay_protection_entry(&self, hash: &Hash) -> Option { + self.replay_protection + .get(hash) + .map(|action| !matches!(action, ReProtStorageModification::Delete)) } /// Write the transaction hash @@ -714,7 +714,7 @@ impl WriteLog { /// Move the transaction hash of the previous block to the list of all /// blocks. This functions should be called at the beginning of the block - /// processing + /// processing, before any other replay protection operation is done pub fn finalize_tx_hash(&mut self, hash: Hash) -> Result<()> { if self .replay_protection From 914b6f5bd37648304c1bb028ceb5f5b4dbf5341c Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 13 Oct 2023 20:00:34 +0200 Subject: [PATCH 121/161] Reworks replay protection check --- apps/src/lib/node/ledger/shell/mod.rs | 39 ++++++++----------- .../lib/node/ledger/shell/prepare_proposal.rs | 9 +++-- .../lib/node/ledger/shell/process_proposal.rs | 17 +++----- 3 files changed, 29 insertions(+), 36 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 81aa225655..d81446baef 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -929,47 +929,42 @@ where pub fn replay_protection_checks( &self, wrapper: &Tx, - tx_bytes: &[u8], temp_wl_storage: &mut TempWlStorage, ) -> Result<()> { - let inner_tx_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); + let wrapper_hash = wrapper.header_hash(); if temp_wl_storage - .has_replay_protection_entry(&inner_tx_hash) - .expect("Error while checking inner tx hash key in storage") + .has_replay_protection_entry(&wrapper_hash) + .expect("Error while checking wrapper tx hash key in storage") { return Err(Error::ReplayAttempt(format!( - "Inner transaction hash {} already in storage", - &inner_tx_hash, + "Wrapper transaction hash {} already in storage", + wrapper_hash ))); } - // Write inner hash to tx WAL + // Write wrapper hash to tx WAL temp_wl_storage .write_log - .write_tx_hash(inner_tx_hash) - .expect("Couldn't write inner transaction hash to write log"); + .write_tx_hash(wrapper_hash) + .map_err(|e| Error::ReplayAttempt(e.to_string()))?; - let tx = - Tx::try_from(tx_bytes).expect("Deserialization shouldn't fail"); - let wrapper_hash = tx.header_hash(); + let inner_tx_hash = + wrapper.clone().update_header(TxType::Raw).header_hash(); if temp_wl_storage - .has_replay_protection_entry(&wrapper_hash) - .expect("Error while checking wrapper tx hash key in storage") + .has_replay_protection_entry(&inner_tx_hash) + .expect("Error while checking inner tx hash key in storage") { return Err(Error::ReplayAttempt(format!( - "Wrapper transaction hash {} already in storage", - wrapper_hash + "Inner transaction hash {} already in storage", + &inner_tx_hash, ))); } - // Write wrapper hash to tx WAL + // Write inner hash to tx WAL temp_wl_storage .write_log - .write_tx_hash(wrapper_hash) - .expect("Couldn't write wrapper tx hash to write log"); - - Ok(()) + .write_tx_hash(inner_tx_hash) + .map_err(|e| Error::ReplayAttempt(e.to_string())) } /// If a handle to an Ethereum oracle was provided to the [`Shell`], attempt diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 103c90b982..20b9105875 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -245,8 +245,11 @@ where let mut tx_gas_meter = TxGasMeter::new(wrapper.gas_limit); tx_gas_meter.add_tx_size_gas(tx_bytes).map_err(|_| ())?; - // Check replay protection - self.replay_protection_checks(&tx, tx_bytes, temp_wl_storage) + // Check replay protection, safe to do here. Even if the tx is a + // replay attempt, we can leave its hashes in the write log since, + // having we already checked the signature, no other tx with the + // same hash can ba deemed valid + self.replay_protection_checks(&tx, temp_wl_storage) .map_err(|_| ())?; // Check fees @@ -1314,7 +1317,7 @@ mod test_prepare_proposal { let (shell, _recv, _, _) = test_utils::setup(); let keypair = crate::wallet::defaults::daewon_keypair(); - let keypair_2 = crate::wallet::defaults::daewon_keypair(); + let keypair_2 = crate::wallet::defaults::albert_keypair(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 50950b60f0..6f1c7c83b9 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -878,11 +878,9 @@ where } } else { // Replay protection checks - if let Err(e) = self.replay_protection_checks( - &tx, - tx_bytes, - temp_wl_storage, - ) { + if let Err(e) = + self.replay_protection_checks(&tx, temp_wl_storage) + { return TxResult { code: ErrorCodes::ReplayTx.into(), info: e.to_string(), @@ -2227,12 +2225,9 @@ mod test_process_proposal { assert_eq!( response[1].result.info, format!( - "Transaction replay attempt: Inner transaction hash \ + "Transaction replay attempt: Wrapper transaction hash \ {} already in storage", - wrapper - .clone() - .update_header(TxType::Raw) - .header_hash(), + wrapper.header_hash(), ) ); } @@ -2311,7 +2306,7 @@ mod test_process_proposal { let (shell, _recv, _, _) = test_utils::setup(); let keypair = crate::wallet::defaults::daewon_keypair(); - let keypair_2 = crate::wallet::defaults::daewon_keypair(); + let keypair_2 = crate::wallet::defaults::albert_keypair(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( From 673c99a1a391f1bfeeeb6fceb97970a953038005 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 28 Aug 2023 19:10:11 +0200 Subject: [PATCH 122/161] Removes useless checks on decrypted tx, error codes and unit tests --- apps/src/lib/node/ledger/shell/mod.rs | 35 ++-- .../lib/node/ledger/shell/process_proposal.rs | 151 ------------------ tests/src/e2e/ledger_tests.rs | 4 +- 3 files changed, 16 insertions(+), 174 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index bd58e06af8..5c393ad85c 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -146,22 +146,19 @@ impl From for TxResult { #[derive(Debug, Copy, Clone, FromPrimitive, ToPrimitive, PartialEq, Eq)] pub enum ErrorCodes { Ok = 0, - InvalidDecryptedChainId = 1, - ExpiredDecryptedTx = 2, - DecryptedTxGasLimit = 3, - WasmRuntimeError = 4, - InvalidTx = 5, - InvalidSig = 6, - InvalidOrder = 7, - ExtraTxs = 8, - Undecryptable = 9, - AllocationError = 10, - ReplayTx = 11, - InvalidChainId = 12, - ExpiredTx = 13, - TxGasLimit = 14, - FeeError = 15, - InvalidVoteExtension = 16, + WasmRuntimeError = 1, + InvalidTx = 2, + InvalidSig = 3, + InvalidOrder = 4, + ExtraTxs = 5, + Undecryptable = 6, + AllocationError = 7, + ReplayTx = 8, + InvalidChainId = 9, + ExpiredTx = 10, + TxGasLimit = 11, + FeeError = 12, + InvalidVoteExtension = 13, } impl ErrorCodes { @@ -172,11 +169,7 @@ impl ErrorCodes { // NOTE: pattern match on all `ErrorCodes` variants, in order // to catch potential bugs when adding new codes match self { - Ok - | InvalidDecryptedChainId - | ExpiredDecryptedTx - | WasmRuntimeError - | DecryptedTxGasLimit => true, + Ok | WasmRuntimeError => true, InvalidTx | InvalidSig | InvalidOrder | ExtraTxs | Undecryptable | AllocationError | ReplayTx | InvalidChainId | ExpiredTx | TxGasLimit | FeeError | InvalidVoteExtension => false, diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 281bd04399..e79d3dba42 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -735,37 +735,6 @@ where wrapper.tx.clone(), privkey, ) { - // FIXME: remove these first 2 checks (also from - // prepare proposal if they are there and finalize - // block?) Tx chain id - if wrapper.tx.header.chain_id != self.chain_id { - return TxResult { - code: ErrorCodes::InvalidDecryptedChainId - .into(), - info: format!( - "Decrypted tx carries a wrong chain \ - id: expected {}, found {}", - self.chain_id, - wrapper.tx.header.chain_id - ), - }; - } - - // Tx expiration - if let Some(exp) = wrapper.tx.header.expiration { - if block_time > exp { - return TxResult { - code: ErrorCodes::ExpiredDecryptedTx - .into(), - info: format!( - "Decrypted tx expired at {:#?}, \ - block time: {:#?}", - exp, block_time - ), - }; - } - } - TxResult { code: ErrorCodes::Ok.into(), info: "Process Proposal accepted this \ @@ -2418,70 +2387,6 @@ mod test_process_proposal { } } - /// Test that a decrypted transaction with a mismatching chain id gets - /// rejected without rejecting the entire block - #[test] - fn test_decrypted_wrong_chain_id() { - let (mut shell, _recv, _, _) = test_utils::setup(); - let keypair = crate::wallet::defaults::daewon_keypair(); - - let wrong_chain_id = ChainId("Wrong chain id".to_string()); - let mut wrapper = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: token::Amount::zero(), - token: shell.wl_storage.storage.native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - wrapper.header.chain_id = wrong_chain_id.clone(); - wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned())); - wrapper - .set_data(Data::new("new transaction data".as_bytes().to_owned())); - let mut decrypted = wrapper.clone(); - - decrypted.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - decrypted.add_section(Section::Signature(Signature::new( - decrypted.sechashes(), - [(0, keypair)].into_iter().collect(), - None, - ))); - let gas_limit = Gas::from(wrapper.header.wrapper().unwrap().gas_limit) - .checked_sub(Gas::from(wrapper.to_bytes().len() as u64)) - .unwrap(); - let wrapper_in_queue = TxInQueue { - tx: wrapper, - gas: gas_limit, - }; - shell.wl_storage.storage.tx_queue.push(wrapper_in_queue); - - // Run validation - let request = ProcessProposal { - txs: vec![decrypted.to_bytes()], - }; - - match shell.process_proposal(request) { - Ok(response) => { - assert_eq!( - response[0].result.code, - u32::from(ErrorCodes::InvalidDecryptedChainId) - ); - assert_eq!( - response[0].result.info, - format!( - "Decrypted tx carries a wrong chain id: expected {}, \ - found {}", - shell.chain_id, wrong_chain_id - ) - ) - } - Err(_) => panic!("Test failed"), - } - } - /// Test that an expired wrapper transaction causes a block rejection #[test] fn test_expired_wrapper() { @@ -2524,62 +2429,6 @@ mod test_process_proposal { } } - /// Test that an expired decrypted transaction is correctly marked as so - /// without rejecting the entire block - #[test] - fn test_expired_decrypted() { - let (mut shell, _recv, _, _) = test_utils::setup(); - let keypair = crate::wallet::defaults::daewon_keypair(); - - let mut wrapper = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: token::Amount::zero(), - token: shell.wl_storage.storage.native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - wrapper.header.chain_id = shell.chain_id.clone(); - wrapper.header.expiration = Some(DateTimeUtc::default()); - wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned())); - wrapper - .set_data(Data::new("new transaction data".as_bytes().to_owned())); - let mut decrypted = wrapper.clone(); - - decrypted.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - decrypted.add_section(Section::Signature(Signature::new( - decrypted.sechashes(), - [(0, keypair)].into_iter().collect(), - None, - ))); - let gas_limit = Gas::from(wrapper.header.wrapper().unwrap().gas_limit) - .checked_sub(Gas::from(wrapper.to_bytes().len() as u64)) - .unwrap(); - let wrapper_in_queue = TxInQueue { - tx: wrapper, - gas: gas_limit, - }; - shell.wl_storage.storage.tx_queue.push(wrapper_in_queue); - - // Run validation - let request = ProcessProposal { - txs: vec![decrypted.to_bytes()], - }; - match shell.process_proposal(request) { - Ok(response) => { - assert_eq!(response.len(), 1); - assert_eq!( - response[0].result.code, - u32::from(ErrorCodes::ExpiredDecryptedTx) - ); - } - Err(_) => panic!("Test failed"), - } - } - /// Check that a tx requiring more gas than the block limit causes a block /// rejection #[test] diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 2f5bbe4ea7..92edce853a 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -986,7 +986,7 @@ fn invalid_transactions() -> Result<()> { client.exp_string("Transaction accepted")?; client.exp_string("Transaction applied")?; client.exp_string("Transaction is invalid")?; - client.exp_string(r#""code": "5"#)?; + client.exp_string(r#""code": "2"#)?; client.assert_success(); let mut ledger = bg_ledger.foreground(); @@ -1042,7 +1042,7 @@ fn invalid_transactions() -> Result<()> { client.exp_string("Error trying to apply a transaction")?; - client.exp_string(r#""code": "4"#)?; + client.exp_string(r#""code": "1"#)?; client.assert_success(); Ok(()) From 39c9b43af75bd1433843d60c208ddac057d55ebc Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 29 Aug 2023 12:04:13 +0200 Subject: [PATCH 123/161] Adds raw header signature in tests --- core/src/types/transaction/mod.rs | 2 +- shared/src/ledger/native_vp/ibc/mod.rs | 151 ++++++++++++---------- tests/src/vm_host_env/mod.rs | 2 + wasm/wasm_source/src/vp_implicit.rs | 58 ++++++--- wasm/wasm_source/src/vp_testnet_faucet.rs | 18 ++- wasm/wasm_source/src/vp_user.rs | 100 ++++++++------ wasm/wasm_source/src/vp_validator.rs | 100 ++++++++------ 7 files changed, 261 insertions(+), 170 deletions(-) diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 8acb9e6c7e..0102a228eb 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -234,7 +234,7 @@ mod test_process_tx { .set_data(Data::new("transaction data".as_bytes().to_owned())) .clone(); tx.add_section(Section::Signature(Signature::new( - vec![*tx.code_sechash(), *tx.data_sechash()], + vec![tx.raw_header_hash(), *tx.code_sechash(), *tx.data_sechash()], [(0, gen_keypair())].into_iter().collect(), None, ))); diff --git a/shared/src/ledger/native_vp/ibc/mod.rs b/shared/src/ledger/native_vp/ibc/mod.rs index 3b6521905b..94fca6063e 100644 --- a/shared/src/ledger/native_vp/ibc/mod.rs +++ b/shared/src/ledger/native_vp/ibc/mod.rs @@ -244,8 +244,8 @@ pub fn get_dummy_header() -> crate::types::storage::Header { /// A dummy validator used for testing #[cfg(any(test, feature = "testing"))] -pub fn get_dummy_genesis_validator() --> namada_proof_of_stake::types::GenesisValidator { +pub fn get_dummy_genesis_validator( +) -> namada_proof_of_stake::types::GenesisValidator { use crate::core::types::address::testing::established_address_1; use crate::core::types::dec::Dec; use crate::core::types::key::testing::common_sk_from_simple_seed; @@ -724,7 +724,11 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![ + outer_tx.header_hash(), + *outer_tx.code_sechash(), + *outer_tx.data_sechash(), + ], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -742,10 +746,9 @@ mod tests { let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -950,10 +953,9 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1037,7 +1039,11 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![ + outer_tx.header_hash(), + *outer_tx.code_sechash(), + *outer_tx.data_sechash(), + ], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1061,10 +1067,9 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1284,10 +1289,9 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1371,7 +1375,11 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![ + outer_tx.header_hash(), + *outer_tx.code_sechash(), + *outer_tx.data_sechash(), + ], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1394,10 +1402,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1459,7 +1466,11 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![ + outer_tx.header_hash(), + *outer_tx.code_sechash(), + *outer_tx.data_sechash(), + ], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1482,10 +1493,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1584,7 +1594,11 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![ + outer_tx.header_hash(), + *outer_tx.code_sechash(), + *outer_tx.data_sechash(), + ], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1607,10 +1621,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1708,7 +1721,11 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![ + outer_tx.header_hash(), + *outer_tx.code_sechash(), + *outer_tx.data_sechash(), + ], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1731,10 +1748,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1817,7 +1833,11 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![ + outer_tx.header_hash(), + *outer_tx.code_sechash(), + *outer_tx.data_sechash(), + ], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1840,10 +1860,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1944,10 +1963,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed")); } // skip test_close_init_channel() and test_close_confirm_channel() since it @@ -2086,10 +2104,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -2274,10 +2291,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -2421,10 +2437,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -2572,10 +2587,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -2724,9 +2738,8 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed")); } } diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index 68ebd76dff..a5ef2d54f1 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -476,6 +476,7 @@ mod tests { signed_tx_data .verify_signatures( &[ + signed_tx_data.header_hash(), *signed_tx_data.data_sechash(), *signed_tx_data.code_sechash(), ], @@ -495,6 +496,7 @@ mod tests { signed_tx_data .verify_signatures( &[ + signed_tx_data.header_hash(), *signed_tx_data.data_sechash(), *signed_tx_data.code_sechash(), ], diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/wasm_source/src/vp_implicit.rs index 215dccf421..3c6ec99c26 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/wasm_source/src/vp_implicit.rs @@ -536,7 +536,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -547,10 +547,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a debit transfer without a valid signature is rejected. @@ -672,7 +676,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -684,10 +688,14 @@ mod tests { let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a transfer on with accounts other than self is accepted. @@ -756,8 +764,8 @@ mod tests { /// Generates a keypair, derive an implicit address from it and generate /// a storage key inside its storage. - fn arb_account_storage_subspace_key() - -> impl Strategy { + fn arb_account_storage_subspace_key( + ) -> impl Strategy { // Generate a keypair key::testing::arb_common_keypair().prop_flat_map(|sk| { let pk = sk.ref_to(); @@ -943,10 +951,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - !validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(!validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } #[test] @@ -988,7 +1000,7 @@ mod tests { tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -998,9 +1010,13 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } } diff --git a/wasm/wasm_source/src/vp_testnet_faucet.rs b/wasm/wasm_source/src/vp_testnet_faucet.rs index 7298c0b126..091af51372 100644 --- a/wasm/wasm_source/src/vp_testnet_faucet.rs +++ b/wasm/wasm_source/src/vp_testnet_faucet.rs @@ -267,7 +267,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -277,10 +277,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } prop_compose! { @@ -400,7 +404,7 @@ mod tests { tx_data.set_data(Data::new(solution_bytes)); tx_data.set_code(Code::new(vec![])); tx_data.add_section(Section::Signature(Signature::new( - vec![*tx_data.data_sechash(), *tx_data.code_sechash()], + vec![tx_data.raw_header_hash(), *tx_data.data_sechash(), *tx_data.code_sechash()], [(0, target_key)].into_iter().collect(), None, ))); @@ -454,7 +458,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index a334576b53..5bb0593563 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -393,7 +393,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -403,10 +403,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a PoS action that must be authorized is rejected without a @@ -562,7 +566,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -572,10 +576,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a transfer on with accounts other than self is accepted. @@ -724,7 +732,7 @@ mod tests { tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -811,7 +819,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -821,10 +829,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a validity predicate update is rejected if not whitelisted @@ -866,7 +878,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -876,10 +888,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - !validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(!validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a validity predicate update is accepted if whitelisted @@ -922,7 +938,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -932,10 +948,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a tx is rejected if not whitelisted @@ -978,7 +998,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -988,10 +1008,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - !validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(!validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } #[test] @@ -1034,7 +1058,7 @@ mod tests { tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -1044,9 +1068,13 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } } diff --git a/wasm/wasm_source/src/vp_validator.rs b/wasm/wasm_source/src/vp_validator.rs index f929a8a0d1..29e0c6f3d2 100644 --- a/wasm/wasm_source/src/vp_validator.rs +++ b/wasm/wasm_source/src/vp_validator.rs @@ -400,7 +400,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -410,10 +410,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a PoS action that must be authorized is rejected without a @@ -580,7 +584,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -590,10 +594,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a transfer on with accounts other than self is accepted. @@ -742,7 +750,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -828,7 +836,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -838,10 +846,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a validity predicate update is rejected if not whitelisted @@ -883,7 +895,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -893,10 +905,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - !validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(!validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a validity predicate update is accepted if whitelisted @@ -939,7 +955,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -949,10 +965,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } /// Test that a tx is rejected if not whitelisted @@ -995,7 +1015,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -1005,10 +1025,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - !validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(!validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } #[test] @@ -1051,7 +1075,7 @@ mod tests { tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -1061,9 +1085,13 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } } From 3dbd59a5056285d2fb676b16d8605b28c8a5ddf9 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 29 Aug 2023 20:57:55 +0200 Subject: [PATCH 124/161] Client signs the raw transaction header --- core/src/proto/types.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index b5935e504b..5bd16d61cb 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -1494,7 +1494,8 @@ impl Tx { public_keys_index_map: &AccountPublicKeysMap, signer: Option
, ) -> Vec { - let targets = self.inner_section_targets(); + let mut targets = vec![self.header_hash()]; + targets.extend(self.inner_section_targets()); let mut signatures = Vec::new(); let section = Signature::new( targets, From bf5ca7aa137bd1b92e436e7756b9104959717076 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Fri, 6 Oct 2023 11:07:01 +0100 Subject: [PATCH 125/161] Mock ledger services in integration tests This enables processing Ethereum events, broadcasting protocol transactions, among other things, while running integration tests. --- .../lib/node/ledger/ethereum_oracle/mod.rs | 141 +++++--- .../ledger/ethereum_oracle/test_tools/mod.rs | 20 +- .../src/lib/node/ledger/shell/testing/node.rs | 306 ++++++++++++++++-- tests/src/integration/masp.rs | 17 +- tests/src/integration/setup.rs | 54 +++- 5 files changed, 430 insertions(+), 108 deletions(-) diff --git a/apps/src/lib/node/ledger/ethereum_oracle/mod.rs b/apps/src/lib/node/ledger/ethereum_oracle/mod.rs index 6980778c07..6c5a251a4e 100644 --- a/apps/src/lib/node/ledger/ethereum_oracle/mod.rs +++ b/apps/src/lib/node/ledger/ethereum_oracle/mod.rs @@ -7,6 +7,7 @@ use std::ops::ControlFlow; use async_trait::async_trait; use ethabi::Address; use ethbridge_events::{event_codecs, EventKind}; +use itertools::Either; use namada::core::hints; use namada::core::types::ethereum_structs; use namada::eth_bridge::ethers; @@ -75,13 +76,6 @@ pub trait RpcClient { /// Ethereum event log. type Log: IntoEthAbiLog; - /// Whether we should stop running the Ethereum oracle - /// if a call to [`Self::check_events_in_block`] fails. - /// - /// This is only useful for testing purposes. In general, - /// no implementation should override this constant. - const EXIT_ON_EVENTS_FAILURE: bool = true; - /// Instantiate a new client, pointing to the /// given RPC url. fn new_client(rpc_url: &str) -> Self @@ -108,6 +102,10 @@ pub trait RpcClient { backoff: Duration, deadline: Instant, ) -> Result; + + /// Given its current state, check if this RPC client + /// may recover from the given [`enum@Error`]. + fn may_recover(&self, error: &Error) -> bool; } #[async_trait(?Send)] @@ -172,6 +170,14 @@ impl RpcClient for Provider { }, } } + + #[inline(always)] + fn may_recover(&self, error: &Error) -> bool { + !matches!( + error, + Error::Timeout | Error::Channel(_, _) | Error::CheckEvents(_, _, _) + ) + } } /// A client that can talk to geth and parse @@ -197,7 +203,7 @@ impl Oracle { /// Construct a new [`Oracle`]. Note that it can not do anything until it /// has been sent a configuration via the passed in `control` channel. pub fn new( - url: &str, + client_or_url: Either, sender: BoundedSender, last_processed_block: last_processed_block::Sender, backoff: Duration, @@ -205,7 +211,10 @@ impl Oracle { control: control::Receiver, ) -> Self { Self { - client: C::new_client(url), + client: match client_or_url { + Either::Left(client) => client, + Either::Right(url) => C::new_client(url), + }, sender, backoff, ceiling, @@ -275,7 +284,7 @@ pub fn run_oracle( tracing::info!(?url, "Ethereum event oracle is starting"); let oracle = Oracle::::new( - &url, + Either::Right(&url), sender, last_processed_block, DEFAULT_BACKOFF, @@ -300,6 +309,75 @@ pub fn run_oracle( .with_no_cleanup() } +/// Determine what action to take after attempting to +/// process events contained in an Ethereum block. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum ProcessEventAction { + /// No events could be processed at this time, so we must keep + /// polling for new events. + ContinuePollingEvents, + /// Some error occurred while processing Ethereum events in + /// the current height. We must halt the oracle. + HaltOracle, + /// The current Ethereum block height has been processed. + /// We must advance to the next Ethereum height. + ProceedToNextBlock, +} + +impl ProcessEventAction { + /// Check whether the action commands a new block to be processed. + #[inline] + pub fn process_new_block(&self) -> bool { + matches!(self, Self::ProceedToNextBlock) + } +} + +impl ProcessEventAction { + /// Handles the requested oracle action, translating it to a format + /// understood by the set of [`Sleep`] abstractions. + fn handle(self) -> ControlFlow, ()> { + match self { + ProcessEventAction::ContinuePollingEvents => { + ControlFlow::Continue(()) + } + ProcessEventAction::HaltOracle => ControlFlow::Break(Err(())), + ProcessEventAction::ProceedToNextBlock => { + ControlFlow::Break(Ok(())) + } + } + } +} + +/// Tentatively process a batch of Ethereum events. +pub(crate) async fn try_process_eth_events( + oracle: &Oracle, + config: &Config, + next_block_to_process: ðereum_structs::BlockHeight, +) -> ProcessEventAction { + process_events_in_block(next_block_to_process, oracle, config) + .await + .map_or_else( + |error| { + if oracle.client.may_recover(&error) { + tracing::debug!( + %error, + block = ?next_block_to_process, + "Error while trying to process Ethereum block" + ); + ProcessEventAction::ContinuePollingEvents + } else { + tracing::error!( + reason = %error, + block = ?next_block_to_process, + "The Ethereum oracle has disconnected" + ); + ProcessEventAction::HaltOracle + } + }, + |()| ProcessEventAction::ProceedToNextBlock, + ) +} + /// Given an oracle, watch for new Ethereum events, processing /// them into Namada native types. /// @@ -334,43 +412,8 @@ async fn run_oracle_aux(mut oracle: Oracle) { ); let res = Sleep { strategy: Constant(oracle.backoff) }.run(|| async { tokio::select! { - result = process(&oracle, &config, next_block_to_process.clone()) => { - match result { - Ok(()) => { - ControlFlow::Break(Ok(())) - }, - Err( - reason @ ( - Error::Timeout - | Error::Channel(_, _) - | Error::CheckEvents(_, _, _) - ) - ) => { - // the oracle is unresponsive, we don't want the test to end - if !C::EXIT_ON_EVENTS_FAILURE - && matches!(&reason, Error::CheckEvents(_, _, _)) - { - tracing::debug!("Allowing the Ethereum oracle to keep running"); - return ControlFlow::Continue(()); - } - tracing::error!( - %reason, - block = ?next_block_to_process, - "The Ethereum oracle has disconnected" - ); - ControlFlow::Break(Err(())) - } - Err(error) => { - // this is a recoverable error, hence the debug log, - // to avoid spamming info logs - tracing::debug!( - %error, - block = ?next_block_to_process, - "Error while trying to process Ethereum block" - ); - ControlFlow::Continue(()) - } - } + action = try_process_eth_events(&oracle, &config, &next_block_to_process) => { + action.handle() }, _ = oracle.sender.closed() => { tracing::info!( @@ -400,10 +443,10 @@ async fn run_oracle_aux(mut oracle: Oracle) { /// Checks if the given block has any events relating to the bridge, and if so, /// sends them to the oracle's `sender` channel -async fn process( +async fn process_events_in_block( + block_to_process: ðereum_structs::BlockHeight, oracle: &Oracle, config: &Config, - block_to_process: ethereum_structs::BlockHeight, ) -> Result<(), Error> { let mut queue: Vec = vec![]; let pending = &mut queue; diff --git a/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs b/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs index 9a2454be17..0479445b7f 100644 --- a/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs +++ b/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs @@ -57,7 +57,7 @@ pub mod event_log { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod mock_web3_client { use std::borrow::Cow; use std::fmt::Debug; @@ -102,7 +102,7 @@ pub mod mock_web3_client { /// reason is for interior mutability. pub struct Web3Client(Arc>); - /// Command sender for [`Web3`] instances. + /// Command sender for [`TestOracle`] instances. pub struct Web3Controller(Arc>); impl Web3Controller { @@ -148,8 +148,6 @@ pub mod mock_web3_client { impl RpcClient for Web3Client { type Log = ethabi::RawLog; - const EXIT_ON_EVENTS_FAILURE: bool = false; - #[cold] fn new_client(_: &str) -> Self where @@ -184,14 +182,15 @@ pub mod mock_web3_client { } if client.last_block_processed.as_ref() < Some(&block_to_check) { - client - .blocks_processed - .send(block_to_check.clone()) - .unwrap(); + _ = client.blocks_processed.send(block_to_check.clone()); client.last_block_processed = Some(block_to_check); } Ok(logs) } else { + tracing::debug!( + "No events to be processed by the Test Ethereum oracle, \ + as it has been artificially set as unresponsive" + ); Err(Error::CheckEvents( ty.into(), addr, @@ -209,6 +208,11 @@ pub mod mock_web3_client { let height = self.0.lock().unwrap().latest_block_height.clone(); Ok(SyncStatus::AtHeight(height)) } + + #[inline(always)] + fn may_recover(&self, _: &Error) -> bool { + true + } } impl Web3Client { diff --git a/apps/src/lib/node/ledger/shell/testing/node.rs b/apps/src/lib/node/ledger/shell/testing/node.rs index 034ac80845..9b66b750c2 100644 --- a/apps/src/lib/node/ledger/shell/testing/node.rs +++ b/apps/src/lib/node/ledger/shell/testing/node.rs @@ -1,11 +1,16 @@ +use std::future::poll_fn; use std::mem::ManuallyDrop; use std::path::PathBuf; use std::str::FromStr; use std::sync::{Arc, Mutex}; +use std::task::Poll; use color_eyre::eyre::{Report, Result}; use data_encoding::HEXUPPER; +use itertools::Either; use lazy_static::lazy_static; +use namada::core::types::ethereum_structs; +use namada::eth_bridge::oracle::config::Config as OracleConfig; use namada::ledger::events::log::dumb_queries; use namada::ledger::queries::{ EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, @@ -23,13 +28,15 @@ use namada::sdk::queries::Client; use namada::tendermint_proto::abci::VoteInfo; use namada::tendermint_rpc::endpoint::abci_info; use namada::tendermint_rpc::SimpleRequest; +use namada::types::control_flow::time::Duration; +use namada::types::ethereum_events::EthereumEvent; use namada::types::hash::Hash; use namada::types::key::tm_consensus_key_raw_hash; use namada::types::storage::{BlockHash, BlockHeight, Epoch, Header}; use namada::types::time::DateTimeUtc; use num_traits::cast::FromPrimitive; use regex::Regex; -use tokio::sync::mpsc::UnboundedReceiver; +use tokio::sync::mpsc; use crate::facade::tendermint_proto::abci::response_process_proposal::ProposalStatus; use crate::facade::tendermint_proto::abci::{ @@ -38,14 +45,193 @@ use crate::facade::tendermint_proto::abci::{ use crate::facade::tendermint_rpc::endpoint::abci_info::AbciInfo; use crate::facade::tendermint_rpc::error::Error as RpcError; use crate::facade::{tendermint, tendermint_rpc}; +use crate::node::ledger::ethereum_oracle::test_tools::mock_web3_client::{ + TestOracle, Web3Client, Web3Controller, +}; +use crate::node::ledger::ethereum_oracle::{ + control, last_processed_block, try_process_eth_events, +}; use crate::node::ledger::shell::testing::utils::TestDir; -use crate::node::ledger::shell::{ErrorCodes, Shell}; +use crate::node::ledger::shell::{ErrorCodes, EthereumOracleChannels, Shell}; use crate::node::ledger::shims::abcipp_shim_types::shim::request::{ FinalizeBlock, ProcessedTx, }; use crate::node::ledger::shims::abcipp_shim_types::shim::response::TxResult; use crate::node::ledger::storage; +/// Mock services data returned by [`mock_services`]. +pub struct MockServicesPackage { + /// Whether to automatically drive mock services or not. + pub auto_drive_services: bool, + /// Mock services stored by the [`MockNode`]. + pub services: MockServices, + /// Handlers to mock services stored by the [`Shell`]. + pub shell_handlers: MockServiceShellHandlers, + /// Handler to the mock services controller. + pub controller: MockServicesController, +} + +/// Mock services config. +pub struct MockServicesCfg { + /// Whether to automatically drive mock services or not. + pub auto_drive_services: bool, + /// Whether to enable the Ethereum oracle or not. + pub enable_eth_oracle: bool, +} + +/// Instantiate mock services for a node. +pub fn mock_services(cfg: MockServicesCfg) -> MockServicesPackage { + let (_, eth_client) = Web3Client::setup(); + let (eth_sender, eth_receiver) = mpsc::channel(1000); + let (last_processed_block_sender, last_processed_block_receiver) = + last_processed_block::channel(); + let (control_sender, control_receiver) = control::channel(); + let eth_oracle_controller = eth_client.controller(); + let oracle = TestOracle::new( + Either::Left(eth_client), + eth_sender.clone(), + last_processed_block_sender, + Duration::from_millis(5), + Duration::from_secs(30), + control_receiver, + ); + let eth_oracle_channels = EthereumOracleChannels::new( + eth_receiver, + control_sender, + last_processed_block_receiver, + ); + let (tx_broadcaster, tx_receiver) = mpsc::unbounded_channel(); + let ethereum_oracle = MockEthOracle { + oracle, + config: Default::default(), + next_block_to_process: tokio::sync::RwLock::new(Default::default()), + }; + MockServicesPackage { + auto_drive_services: cfg.auto_drive_services, + services: MockServices { + ethereum_oracle, + tx_receiver: tokio::sync::Mutex::new(tx_receiver), + }, + shell_handlers: MockServiceShellHandlers { + tx_broadcaster: tx_broadcaster.clone(), + eth_oracle_channels: cfg + .enable_eth_oracle + .then_some(eth_oracle_channels), + }, + controller: MockServicesController { + eth_oracle: eth_oracle_controller, + eth_events: eth_sender, + tx_broadcaster, + }, + } +} + +/// Controller of various mock node services. +pub struct MockServicesController { + /// Ethereum oracle controller. + pub eth_oracle: Web3Controller, + /// Handler to the Ethereum oracle sender channel. + /// + /// Bypasses the Ethereum oracle service and sends + /// events directly to the [`Shell`]. + pub eth_events: mpsc::Sender, + /// Transaction broadcaster handle. + pub tx_broadcaster: mpsc::UnboundedSender>, +} + +/// Service handlers to be passed to a [`Shell`], when building +/// a mock node. +pub struct MockServiceShellHandlers { + /// Transaction broadcaster handle. + pub tx_broadcaster: mpsc::UnboundedSender>, + /// Ethereum oracle channel handlers. + pub eth_oracle_channels: Option, +} + +/// Services mocking the operation of the ledger's various async tasks. +pub struct MockServices { + /// Receives transactions that are supposed to be broadcasted + /// to the network. + tx_receiver: tokio::sync::Mutex>>, + /// Mock Ethereum oracle, that processes blocks from Ethereum + /// in order to find events emitted by a transaction to vote on. + ethereum_oracle: MockEthOracle, +} + +/// Actions to be performed by the mock node, as a result +/// of driving [`MockServices`]. +pub enum MockServiceAction { + /// The ledger should broadcast new transactions. + BroadcastTxs(Vec>), + /// Progress to the next Ethereum block to process. + IncrementEthHeight, +} + +impl MockServices { + /// Drive the internal state machine of the mock node's services. + async fn drive(&self) -> Vec { + let mut actions = vec![]; + + // process new eth events + // NOTE: this may result in a deadlock, if the events + // sent to the shell exceed the capacity of the oracle's + // events channel! + if self.ethereum_oracle.drive().await { + actions.push(MockServiceAction::IncrementEthHeight); + } + + // receive txs from the broadcaster + let txs = { + let mut txs = vec![]; + let mut tx_receiver = self.tx_receiver.lock().await; + + while let Some(tx) = poll_fn(|cx| match tx_receiver.poll_recv(cx) { + Poll::Pending => Poll::Ready(None), + poll => poll, + }) + .await + { + txs.push(tx); + } + + txs + }; + if !txs.is_empty() { + actions.push(MockServiceAction::BroadcastTxs(txs)); + } + + actions + } +} + +/// Mock Ethereum oracle used for testing purposes. +struct MockEthOracle { + /// The inner oracle. + oracle: TestOracle, + /// The inner oracle's configuration. + config: OracleConfig, + /// The inner oracle's next block to process. + next_block_to_process: tokio::sync::RwLock, +} + +impl MockEthOracle { + /// Updates the state of the Ethereum oracle. + /// + /// This includes sending any confirmed Ethereum events to + /// the shell and updating the height of the next Ethereum + /// block to process. Upon a successfully processed block, + /// this functions returns `true`. + async fn drive(&self) -> bool { + try_process_eth_events( + &self.oracle, + &self.config, + &*self.next_block_to_process.read().await, + ) + .await + .process_new_block() + } +} + /// Status of tx #[derive(Debug, Clone, PartialEq, Eq)] pub enum NodeResults { @@ -61,8 +247,9 @@ pub struct MockNode { pub shell: Arc>>, pub test_dir: ManuallyDrop, pub keep_temp: bool, - pub _broadcast_recv: UnboundedReceiver>, pub results: Arc>>, + pub services: Arc, + pub auto_drive_services: bool, } impl Drop for MockNode { @@ -82,6 +269,34 @@ impl Drop for MockNode { } impl MockNode { + pub async fn handle_service_action(&self, action: MockServiceAction) { + match action { + MockServiceAction::BroadcastTxs(txs) => { + self.submit_txs(txs); + } + MockServiceAction::IncrementEthHeight => { + *self + .services + .ethereum_oracle + .next_block_to_process + .write() + .await += 1.into(); + } + } + } + + pub async fn drive_mock_services(&self) { + for action in self.services.drive().await { + self.handle_service_action(action).await; + } + } + + async fn drive_mock_services_bg(&self) { + if self.auto_drive_services { + self.drive_mock_services().await; + } + } + pub fn genesis_dir(&self) -> PathBuf { self.test_dir .path() @@ -179,20 +394,43 @@ impl MockNode { pub fn finalize_and_commit(&self) { let (proposer_address, votes) = self.prepare_request(); - let mut req = FinalizeBlock { - hash: BlockHash([0u8; 32]), - header: Header { - hash: Hash([0; 32]), - time: DateTimeUtc::now(), - next_validators_hash: Hash([0; 32]), - }, - byzantine_validators: vec![], - txs: vec![], - proposer_address, - votes, - }; - req.header.time = DateTimeUtc::now(); let mut locked = self.shell.lock().unwrap(); + + // build finalize block abci request + let req = { + // check if we have protocol txs to be included + // in the finalize block request + let txs = { + let req = RequestPrepareProposal { + proposer_address: proposer_address.clone(), + ..Default::default() + }; + let txs = locked.prepare_proposal(req).txs; + + txs.into_iter() + .map(|tx| ProcessedTx { + tx, + result: TxResult { + code: 0, + info: String::new(), + }, + }) + .collect() + }; + FinalizeBlock { + hash: BlockHash([0u8; 32]), + header: Header { + hash: Hash([0; 32]), + time: DateTimeUtc::now(), + next_validators_hash: Hash([0; 32]), + }, + byzantine_validators: vec![], + txs, + proposer_address, + votes, + } + }; + locked.finalize_block(req).expect("Test failed"); locked.commit(); } @@ -213,19 +451,19 @@ impl MockNode { /// Send a tx through Process Proposal and Finalize Block /// and register the results. - fn submit_tx(&self, tx_bytes: Vec) { - // The block space allocator disallows txs in certain blocks. + fn submit_txs(&self, txs: Vec>) { + // The block space allocator disallows encrypted txs in certain blocks. // Advance to block height that allows txs. self.advance_to_allowed_block(); let (proposer_address, votes) = self.prepare_request(); let req = RequestProcessProposal { - txs: vec![tx_bytes.clone()], + txs: txs.clone(), proposer_address: proposer_address.clone(), ..Default::default() }; let mut locked = self.shell.lock().unwrap(); - let mut result = locked.process_proposal(req); + let result = locked.process_proposal(req); let mut errors: Vec<_> = result .tx_results @@ -252,10 +490,11 @@ impl MockNode { next_validators_hash: Hash([0; 32]), }, byzantine_validators: vec![], - txs: vec![ProcessedTx { - tx: tx_bytes, - result: result.tx_results.remove(0), - }], + txs: txs + .into_iter() + .zip(result.tx_results.into_iter()) + .map(|(tx, result)| ProcessedTx { tx, result }) + .collect(), proposer_address, votes, }; @@ -322,6 +561,7 @@ impl<'a> Client for &'a MockNode { height: Option, prove: bool, ) -> std::result::Result { + self.drive_mock_services_bg().await; let rpc = RPC; let data = data.unwrap_or_default(); let latest_height = { @@ -367,6 +607,7 @@ impl<'a> Client for &'a MockNode { /// `/abci_info`: get information about the ABCI application. async fn abci_info(&self) -> Result { + self.drive_mock_services_bg().await; let locked = self.shell.lock().unwrap(); Ok(AbciInfo { data: "Namada".to_string(), @@ -398,6 +639,7 @@ impl<'a> Client for &'a MockNode { tx: namada::tendermint::abci::Transaction, ) -> Result { + self.drive_mock_services_bg().await; let mut resp = tendermint_rpc::endpoint::broadcast::tx_sync::Response { code: Default::default(), data: Default::default(), @@ -405,9 +647,10 @@ impl<'a> Client for &'a MockNode { hash: tendermint::abci::transaction::Hash::new([0; 32]), }; let tx_bytes: Vec = tx.into(); - self.submit_tx(tx_bytes); + self.submit_txs(vec![tx_bytes]); if !self.success() { - resp.code = tendermint::abci::Code::Err(1337); // TODO: submit_tx should return the correct error code + message + // TODO: submit_txs should return the correct error code + message + resp.code = tendermint::abci::Code::Err(1337); return Ok(resp); } else { self.clear_results(); @@ -417,11 +660,13 @@ impl<'a> Client for &'a MockNode { proposer_address, ..Default::default() }; - let tx_bytes = { + let txs = { let locked = self.shell.lock().unwrap(); - locked.prepare_proposal(req).txs.remove(0) + locked.prepare_proposal(req).txs }; - self.submit_tx(tx_bytes); + if !txs.is_empty() { + self.submit_txs(txs); + } Ok(resp) } @@ -434,6 +679,7 @@ impl<'a> Client for &'a MockNode { _order: namada::tendermint_rpc::Order, ) -> Result { + self.drive_mock_services_bg().await; let matcher = parse_tm_query(query); let borrowed = self.shell.lock().unwrap(); // we store an index into the event log as a block @@ -503,6 +749,7 @@ impl<'a> Client for &'a MockNode { where H: Into + Send, { + self.drive_mock_services_bg().await; let height = height.into(); let encoded_event = EncodedEvent(height.value()); let locked = self.shell.lock().unwrap(); @@ -561,6 +808,7 @@ impl<'a> Client for &'a MockNode { /// Returns empty result (200 OK) on success, no response in case of an /// error. async fn health(&self) -> Result<(), RpcError> { + self.drive_mock_services_bg().await; Ok(()) } } diff --git a/tests/src/integration/masp.rs b/tests/src/integration/masp.rs index ecd1b34465..d0a3121ce4 100644 --- a/tests/src/integration/masp.rs +++ b/tests/src/integration/masp.rs @@ -33,7 +33,7 @@ fn masp_incentives() -> Result<()> { // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. - let mut node = setup::setup()?; + let (mut node, _services) = setup::setup()?; // Wait till epoch boundary let ep0 = node.next_epoch(); // Send 20 BTC from Albert to PA @@ -767,7 +767,7 @@ fn masp_pinned_txs() -> Result<()> { // Download the shielded pool parameters before starting node let _ = CLIShieldedUtils::new::(PathBuf::new()); - let mut node = setup::setup()?; + let (mut node, _services) = setup::setup()?; // Wait till epoch boundary let _ep0 = node.next_epoch(); @@ -935,7 +935,7 @@ fn masp_txs_and_queries() -> Result<()> { Err(&'static str), } - let mut node = setup::setup()?; + let (mut node, _services) = setup::setup()?; _ = node.next_epoch(); let txs_args = vec![ // 0. Attempt to spend 10 BTC at SK(A) to PA(B) @@ -1230,7 +1230,7 @@ fn masp_txs_and_queries() -> Result<()> { /// 3. Submit a new wrapper with an invalid unshielding tx and assert the /// failure #[test] -fn wrapper_fee_unshielding() { +fn wrapper_fee_unshielding() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node @@ -1238,7 +1238,7 @@ fn wrapper_fee_unshielding() { // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. - let mut node = setup::setup().unwrap(); + let (mut node, _services) = setup::setup()?; _ = node.next_epoch(); // 1. Shield some tokens @@ -1262,8 +1262,7 @@ fn wrapper_fee_unshielding() { "--ledger-address", validator_one_rpc, ], - ) - .unwrap(); + )?; node.assert_success(); _ = node.next_epoch(); @@ -1288,8 +1287,7 @@ fn wrapper_fee_unshielding() { "--ledger-address", validator_one_rpc, ], - ) - .unwrap(); + )?; node.assert_success(); // 3. Invalid unshielding @@ -1320,4 +1318,5 @@ fn wrapper_fee_unshielding() { .is_err(); assert!(tx_run); + Ok(()) } diff --git a/tests/src/integration/setup.rs b/tests/src/integration/setup.rs index df74c5f6f1..9bbd00eef9 100644 --- a/tests/src/integration/setup.rs +++ b/tests/src/integration/setup.rs @@ -11,10 +11,13 @@ use namada_apps::config::genesis::genesis_config::GenesisConfig; use namada_apps::config::TendermintMode; use namada_apps::facade::tendermint::Timeout; use namada_apps::facade::tendermint_proto::google::protobuf::Timestamp; -use namada_apps::node::ledger::shell::testing::node::MockNode; +use namada_apps::node::ledger::shell::testing::node::{ + mock_services, MockNode, MockServicesCfg, MockServicesController, + MockServicesPackage, +}; use namada_apps::node::ledger::shell::testing::utils::TestDir; use namada_apps::node::ledger::shell::Shell; -use namada_core::types::address::Address; +use namada_core::types::address::nam; use namada_core::types::chain::{ChainId, ChainIdPrefix}; use toml::value::Table; @@ -26,14 +29,14 @@ use crate::e2e::setup::{ const ENV_VAR_KEEP_TEMP: &str = "NAMADA_INT_KEEP_TEMP"; /// Setup a network with a single genesis validator node. -pub fn setup() -> Result { +pub fn setup() -> Result<(MockNode, MockServicesController)> { initialize_genesis(|genesis| genesis) } /// Setup folders with genesis, configs, wasm, etc. pub fn initialize_genesis( mut update_genesis: impl FnMut(GenesisConfig) -> GenesisConfig, -) -> Result { +) -> Result<(MockNode, MockServicesController)> { let working_dir = std::fs::canonicalize("..").unwrap(); let keep_temp = match std::env::var(ENV_VAR_KEEP_TEMP) { Ok(val) => val.to_ascii_lowercase() != "false", @@ -81,7 +84,23 @@ pub fn initialize_genesis( }, ); - create_node(test_dir, &genesis, keep_temp) + let auto_drive_services = { + // NB: for now, the only condition that + // dictates whether mock services should + // be enabled is if the Ethereum bridge + // is enabled at genesis + genesis.ethereum_bridge_params.is_some() + }; + let enable_eth_oracle = { + // NB: we only enable the oracle if the + // Ethereum bridge is enabled at genesis + genesis.ethereum_bridge_params.is_some() + }; + let services_cfg = MockServicesCfg { + auto_drive_services, + enable_eth_oracle, + }; + create_node(test_dir, &genesis, keep_temp, services_cfg) } /// Create a mock ledger node. @@ -89,7 +108,8 @@ fn create_node( base_dir: TestDir, genesis: &GenesisConfig, keep_temp: bool, -) -> Result { + services_cfg: MockServicesCfg, +) -> Result<(MockNode, MockServicesController)> { // look up the chain id from the global file. let chain_id = if let toml::Value::String(chain_id) = toml::from_str::
( @@ -119,26 +139,32 @@ fn create_node( ); // instantiate and initialize the ledger node. - let (sender, recv) = tokio::sync::mpsc::unbounded_channel(); + let MockServicesPackage { + auto_drive_services, + services, + shell_handlers, + controller, + } = mock_services(services_cfg); let node = MockNode { shell: Arc::new(Mutex::new(Shell::new( config::Ledger::new( base_dir.path(), chain_id.clone(), - TendermintMode::Validator + TendermintMode::Validator, ), wasm_dir, - sender, - None, + shell_handlers.tx_broadcaster, + shell_handlers.eth_oracle_channels, None, 50 * 1024 * 1024, // 50 kiB 50 * 1024 * 1024, // 50 kiB - Address::from_str("atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5").unwrap(), + nam(), ))), test_dir: ManuallyDrop::new(base_dir), keep_temp, - _broadcast_recv: recv, + services: Arc::new(services), results: Arc::new(Mutex::new(vec![])), + auto_drive_services, }; let init_req = namada_apps::facade::tower_abci::request::InitChain { time: Some(Timestamp { @@ -156,8 +182,10 @@ fn create_node( locked .init_chain(init_req, 1) .map_err(|e| eyre!("Failed to initialize ledger: {:?}", e))?; + // set the height of the first block (should be 1) + locked.wl_storage.storage.block.height = 1.into(); locked.commit(); } - Ok(node) + Ok((node, controller)) } From aed3c8fc027e4b516754085217c7de75144b88f0 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Mon, 16 Oct 2023 13:22:54 +0100 Subject: [PATCH 126/161] Improve reading flow of mock node service defs --- .../src/lib/node/ledger/shell/testing/node.rs | 212 +++++++++--------- 1 file changed, 106 insertions(+), 106 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/testing/node.rs b/apps/src/lib/node/ledger/shell/testing/node.rs index 9b66b750c2..810d2b0d1b 100644 --- a/apps/src/lib/node/ledger/shell/testing/node.rs +++ b/apps/src/lib/node/ledger/shell/testing/node.rs @@ -59,6 +59,112 @@ use crate::node::ledger::shims::abcipp_shim_types::shim::request::{ use crate::node::ledger::shims::abcipp_shim_types::shim::response::TxResult; use crate::node::ledger::storage; +/// Mock Ethereum oracle used for testing purposes. +struct MockEthOracle { + /// The inner oracle. + oracle: TestOracle, + /// The inner oracle's configuration. + config: OracleConfig, + /// The inner oracle's next block to process. + next_block_to_process: tokio::sync::RwLock, +} + +impl MockEthOracle { + /// Updates the state of the Ethereum oracle. + /// + /// This includes sending any confirmed Ethereum events to + /// the shell and updating the height of the next Ethereum + /// block to process. Upon a successfully processed block, + /// this functions returns `true`. + async fn drive(&self) -> bool { + try_process_eth_events( + &self.oracle, + &self.config, + &*self.next_block_to_process.read().await, + ) + .await + .process_new_block() + } +} + +/// Services mocking the operation of the ledger's various async tasks. +pub struct MockServices { + /// Receives transactions that are supposed to be broadcasted + /// to the network. + tx_receiver: tokio::sync::Mutex>>, + /// Mock Ethereum oracle, that processes blocks from Ethereum + /// in order to find events emitted by a transaction to vote on. + ethereum_oracle: MockEthOracle, +} + +/// Actions to be performed by the mock node, as a result +/// of driving [`MockServices`]. +pub enum MockServiceAction { + /// The ledger should broadcast new transactions. + BroadcastTxs(Vec>), + /// Progress to the next Ethereum block to process. + IncrementEthHeight, +} + +impl MockServices { + /// Drive the internal state machine of the mock node's services. + async fn drive(&self) -> Vec { + let mut actions = vec![]; + + // process new eth events + // NOTE: this may result in a deadlock, if the events + // sent to the shell exceed the capacity of the oracle's + // events channel! + if self.ethereum_oracle.drive().await { + actions.push(MockServiceAction::IncrementEthHeight); + } + + // receive txs from the broadcaster + let txs = { + let mut txs = vec![]; + let mut tx_receiver = self.tx_receiver.lock().await; + + while let Some(tx) = poll_fn(|cx| match tx_receiver.poll_recv(cx) { + Poll::Pending => Poll::Ready(None), + poll => poll, + }) + .await + { + txs.push(tx); + } + + txs + }; + if !txs.is_empty() { + actions.push(MockServiceAction::BroadcastTxs(txs)); + } + + actions + } +} + +/// Controller of various mock node services. +pub struct MockServicesController { + /// Ethereum oracle controller. + pub eth_oracle: Web3Controller, + /// Handler to the Ethereum oracle sender channel. + /// + /// Bypasses the Ethereum oracle service and sends + /// events directly to the [`Shell`]. + pub eth_events: mpsc::Sender, + /// Transaction broadcaster handle. + pub tx_broadcaster: mpsc::UnboundedSender>, +} + +/// Service handlers to be passed to a [`Shell`], when building +/// a mock node. +pub struct MockServiceShellHandlers { + /// Transaction broadcaster handle. + pub tx_broadcaster: mpsc::UnboundedSender>, + /// Ethereum oracle channel handlers. + pub eth_oracle_channels: Option, +} + /// Mock services data returned by [`mock_services`]. pub struct MockServicesPackage { /// Whether to automatically drive mock services or not. @@ -126,112 +232,6 @@ pub fn mock_services(cfg: MockServicesCfg) -> MockServicesPackage { } } -/// Controller of various mock node services. -pub struct MockServicesController { - /// Ethereum oracle controller. - pub eth_oracle: Web3Controller, - /// Handler to the Ethereum oracle sender channel. - /// - /// Bypasses the Ethereum oracle service and sends - /// events directly to the [`Shell`]. - pub eth_events: mpsc::Sender, - /// Transaction broadcaster handle. - pub tx_broadcaster: mpsc::UnboundedSender>, -} - -/// Service handlers to be passed to a [`Shell`], when building -/// a mock node. -pub struct MockServiceShellHandlers { - /// Transaction broadcaster handle. - pub tx_broadcaster: mpsc::UnboundedSender>, - /// Ethereum oracle channel handlers. - pub eth_oracle_channels: Option, -} - -/// Services mocking the operation of the ledger's various async tasks. -pub struct MockServices { - /// Receives transactions that are supposed to be broadcasted - /// to the network. - tx_receiver: tokio::sync::Mutex>>, - /// Mock Ethereum oracle, that processes blocks from Ethereum - /// in order to find events emitted by a transaction to vote on. - ethereum_oracle: MockEthOracle, -} - -/// Actions to be performed by the mock node, as a result -/// of driving [`MockServices`]. -pub enum MockServiceAction { - /// The ledger should broadcast new transactions. - BroadcastTxs(Vec>), - /// Progress to the next Ethereum block to process. - IncrementEthHeight, -} - -impl MockServices { - /// Drive the internal state machine of the mock node's services. - async fn drive(&self) -> Vec { - let mut actions = vec![]; - - // process new eth events - // NOTE: this may result in a deadlock, if the events - // sent to the shell exceed the capacity of the oracle's - // events channel! - if self.ethereum_oracle.drive().await { - actions.push(MockServiceAction::IncrementEthHeight); - } - - // receive txs from the broadcaster - let txs = { - let mut txs = vec![]; - let mut tx_receiver = self.tx_receiver.lock().await; - - while let Some(tx) = poll_fn(|cx| match tx_receiver.poll_recv(cx) { - Poll::Pending => Poll::Ready(None), - poll => poll, - }) - .await - { - txs.push(tx); - } - - txs - }; - if !txs.is_empty() { - actions.push(MockServiceAction::BroadcastTxs(txs)); - } - - actions - } -} - -/// Mock Ethereum oracle used for testing purposes. -struct MockEthOracle { - /// The inner oracle. - oracle: TestOracle, - /// The inner oracle's configuration. - config: OracleConfig, - /// The inner oracle's next block to process. - next_block_to_process: tokio::sync::RwLock, -} - -impl MockEthOracle { - /// Updates the state of the Ethereum oracle. - /// - /// This includes sending any confirmed Ethereum events to - /// the shell and updating the height of the next Ethereum - /// block to process. Upon a successfully processed block, - /// this functions returns `true`. - async fn drive(&self) -> bool { - try_process_eth_events( - &self.oracle, - &self.config, - &*self.next_block_to_process.read().await, - ) - .await - .process_new_block() - } -} - /// Status of tx #[derive(Debug, Clone, PartialEq, Eq)] pub enum NodeResults { From 9c69e4b5c6b31e3606551eb3620a437f9d316de0 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Mon, 16 Oct 2023 09:20:27 +0100 Subject: [PATCH 127/161] Changelog for #1976 --- .changelog/unreleased/testing/1976-int-test-services.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/testing/1976-int-test-services.md diff --git a/.changelog/unreleased/testing/1976-int-test-services.md b/.changelog/unreleased/testing/1976-int-test-services.md new file mode 100644 index 0000000000..aaeecc3db5 --- /dev/null +++ b/.changelog/unreleased/testing/1976-int-test-services.md @@ -0,0 +1,2 @@ +- Mock ledger services in integration tests + ([\#1976](https://github.com/anoma/namada/pull/1976)) \ No newline at end of file From a77cb5ca68187dc401c84a2b919e5afa503baedc Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 16 Oct 2023 15:45:15 +0200 Subject: [PATCH 128/161] Expose replay protection methods from `WlStorage` --- .../lib/node/ledger/shell/finalize_block.rs | 4 +-- apps/src/lib/node/ledger/shell/mod.rs | 3 +- core/src/ledger/storage/wl_storage.rs | 28 +++++++++++++++++++ core/src/ledger/storage/write_log.rs | 4 +-- shared/src/ledger/protocol/mod.rs | 1 - 5 files changed, 32 insertions(+), 8 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index b0f2bc893c..2d14e3134d 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -8,6 +8,7 @@ use namada::ledger::events::EventType; use namada::ledger::gas::{GasMetering, TxGasMeter}; use namada::ledger::parameters::storage as params_storage; use namada::ledger::pos::{namada_proof_of_stake, staking_token_address}; +use namada::ledger::storage::wl_storage::WriteLogAndStorage; use namada::ledger::storage::EPOCH_SWITCH_BLOCKS_DELAY; use namada::ledger::storage_api::token::credit_tokens; use namada::ledger::storage_api::{pgf, StorageRead, StorageWrite}; @@ -518,7 +519,6 @@ where // hash to storage to prevent // replay self.wl_storage - .write_log .write_tx_hash(wrapper.header_hash()) .expect("Error while writing tx hash to storage"); } @@ -963,12 +963,10 @@ where // corresponding wrapper transaction to avoid replay of that in the process fn allow_tx_replay(&mut self, mut wrapper_tx: Tx) { self.wl_storage - .write_log .write_tx_hash(wrapper_tx.header_hash()) .expect("Error while deleting tx hash from storage"); self.wl_storage - .write_log .delete_tx_hash(wrapper_tx.update_header(TxType::Raw).header_hash()) .expect("Error while deleting tx hash from storage"); } diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index d81446baef..5315f16d8d 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -42,6 +42,7 @@ use namada::ledger::protocol::{ apply_wasm_tx, get_fee_unshielding_transaction, get_transfer_hash_from_storage, ShellParams, }; +use namada::ledger::storage::wl_storage::WriteLogAndStorage; use namada::ledger::storage::write_log::WriteLog; use namada::ledger::storage::{ DBIter, Sha256Hasher, Storage, StorageHasher, TempWlStorage, WlStorage, DB, @@ -944,7 +945,6 @@ where // Write wrapper hash to tx WAL temp_wl_storage - .write_log .write_tx_hash(wrapper_hash) .map_err(|e| Error::ReplayAttempt(e.to_string()))?; @@ -962,7 +962,6 @@ where // Write inner hash to tx WAL temp_wl_storage - .write_log .write_tx_hash(inner_tx_hash) .map_err(|e| Error::ReplayAttempt(e.to_string())) } diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index 4e102e766b..d18262c4f3 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -92,6 +92,12 @@ pub trait WriteLogAndStorage { /// reference to `WriteLog` when in need of both (avoids complain from the /// borrow checker) fn split_borrow(&mut self) -> (&mut WriteLog, &Storage); + + /// Write the provided tx hash to storage. + fn write_tx_hash( + &mut self, + hash: Hash, + ) -> crate::ledger::storage::write_log::Result<()>; } impl WriteLogAndStorage for WlStorage @@ -117,6 +123,13 @@ where fn split_borrow(&mut self) -> (&mut WriteLog, &Storage) { (&mut self.write_log, &self.storage) } + + fn write_tx_hash( + &mut self, + hash: Hash, + ) -> crate::ledger::storage::write_log::Result<()> { + self.write_log.write_tx_hash(hash) + } } impl WriteLogAndStorage for TempWlStorage<'_, D, H> @@ -142,6 +155,13 @@ where fn split_borrow(&mut self) -> (&mut WriteLog, &Storage) { (&mut self.write_log, (self.storage)) } + + fn write_tx_hash( + &mut self, + hash: Hash, + ) -> crate::ledger::storage::write_log::Result<()> { + self.write_log.write_tx_hash(hash) + } } impl WlStorage @@ -235,6 +255,14 @@ where } Ok(new_epoch) } + + /// Delete the provided transaction's hash from storage. + pub fn delete_tx_hash( + &mut self, + hash: Hash, + ) -> crate::ledger::storage::write_log::Result<()> { + self.write_log.delete_tx_hash(hash) + } } /// Prefix iterator for [`WlStorage`]. diff --git a/core/src/ledger/storage/write_log.rs b/core/src/ledger/storage/write_log.rs index 4364028171..e563374146 100644 --- a/core/src/ledger/storage/write_log.rs +++ b/core/src/ledger/storage/write_log.rs @@ -675,7 +675,7 @@ impl WriteLog { } /// Write the transaction hash - pub fn write_tx_hash(&mut self, hash: Hash) -> Result<()> { + pub(crate) fn write_tx_hash(&mut self, hash: Hash) -> Result<()> { if self .replay_protection .insert(hash, ReProtStorageModification::Write) @@ -692,7 +692,7 @@ impl WriteLog { } /// Remove the transaction hash - pub fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { + pub(crate) fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { match self .replay_protection .insert(hash, ReProtStorageModification::Delete) diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index b00d75ff1f..c1304bb01d 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -247,7 +247,6 @@ where // If wrapper was succesful, write inner tx hash to storage shell_params .wl_storage - .write_log_mut() .write_tx_hash(tx.update_header(TxType::Raw).header_hash()) .expect("Error while writing tx hash to storage"); changed_keys.insert(replay_protection::get_replay_protection_last_key( From 4fca2774f7b98844e9552dabfcfb8a556bec284c Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 11 Oct 2023 11:55:56 +0200 Subject: [PATCH 129/161] Changelog #1977 --- .../unreleased/improvements/1977-replay-protection-storage.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1977-replay-protection-storage.md diff --git a/.changelog/unreleased/improvements/1977-replay-protection-storage.md b/.changelog/unreleased/improvements/1977-replay-protection-storage.md new file mode 100644 index 0000000000..0686adca5f --- /dev/null +++ b/.changelog/unreleased/improvements/1977-replay-protection-storage.md @@ -0,0 +1,2 @@ +- Reduced the storage consumption of replay protection. + ([\#1977](https://github.com/anoma/namada/pull/1977)) \ No newline at end of file From d19e36545b36ae231abf8e86443b064af3eaad6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 16 Oct 2023 07:26:38 +0200 Subject: [PATCH 130/161] evil make: skip pos_state_machine_test in CI --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 23669c8d42..dc67c3c431 100644 --- a/Makefile +++ b/Makefile @@ -132,7 +132,8 @@ test-coverage: $(cargo) +$(nightly) llvm-cov --output-dir target \ --features namada/testing \ --html \ - -- --skip e2e -Z unstable-options --report-time + -- --skip e2e --skip pos_state_machine_test \ + -Z unstable-options --report-time # NOTE: `TEST_FILTER` is prepended with `e2e::`. Since filters in `cargo test` # work with a substring search, TEST_FILTER only works if it contains a string From d4fc7173c22793c1c7ea62b317827f8652a94b4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 16 Oct 2023 09:06:17 +0200 Subject: [PATCH 131/161] evil bench/vps: credit source before bond tx --- benches/vps.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benches/vps.rs b/benches/vps.rs index 6efaf78e4c..ee81b13d08 100644 --- a/benches/vps.rs +++ b/benches/vps.rs @@ -277,7 +277,7 @@ fn vp_implicit(c: &mut Criterion) { shell.commit(); } - if bench_name == "transfer" { + if bench_name == "transfer" || bench_name == "pos" { // Transfer some tokens to the implicit address shell.execute_tx(&received_transfer); shell.wl_storage.commit_tx(); From ed55676e2057f7c147a56a26525f790e53fff5a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 16 Oct 2023 12:58:12 +0200 Subject: [PATCH 132/161] evil test/e2e/slashing: fix flakiness --- tests/src/e2e/ledger_tests.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 158c670945..1db027c0ab 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -3313,6 +3313,8 @@ fn double_signing_gets_slashed() -> Result<()> { .exp_regex(r"Slashing [a-z0-9]+ for Duplicate vote in epoch [0-9]+") .unwrap(); println!("\n{res}\n"); + // Wait to commit a block + validator_1.exp_regex(r"Committed block hash.*, height: [0-9]+")?; let bg_validator_1 = validator_1.background(); let exp_processing_epoch = Epoch::from_str(res.split(' ').last().unwrap()) @@ -3322,9 +3324,6 @@ fn double_signing_gets_slashed() -> Result<()> { + 1u64; // Query slashes - // let tx_args = ["slashes", "--node", &validator_one_rpc]; - // let client = run!(test, Bin::Client, tx_args, Some(40))?; - let mut client = run!( test, Bin::Client, From bcd7557610a0a7e106f6c732e923bdb8c3b259d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 17 Oct 2023 12:59:56 +0200 Subject: [PATCH 133/161] remove dbg prints --- proof_of_stake/src/lib.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 0fadd6c728..41ea1e70d4 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -3054,12 +3054,11 @@ where { // TODO: our method of applying slashes is not correct! This needs review - println!("FN BOND AMOUNT"); let params = read_pos_params(storage)?; // TODO: apply rewards let slashes = find_validator_slashes(storage, &bond_id.validator)?; - dbg!(&slashes); + // dbg!(&slashes); let slash_rates = slashes .iter() @@ -3068,7 +3067,7 @@ where *tot_rate = cmp::min(Dec::one(), *tot_rate + slash.rate); map }); - dbg!(&slash_rates); + // dbg!(&slash_rates); // Accumulate incoming redelegations slashes from source validator, if any. // This ensures that if there're slashes on both src validator and dest @@ -3125,13 +3124,13 @@ where *redelegation_slashes.entry(redelegation_end).or_default() += delta - slashed_delta; } - dbg!(&redelegation_slashes); + // dbg!(&redelegation_slashes); let bonds = bond_handle(&bond_id.source, &bond_id.validator).get_data_handler(); let mut total_active = token::Amount::zero(); for next in bonds.iter(storage)? { - let (bond_epoch, delta) = dbg!(next?); + let (bond_epoch, delta) = next?; if bond_epoch > epoch { continue; } @@ -3167,7 +3166,7 @@ where // } total_active += slashed_delta; } - dbg!(&total_active); + // dbg!(&total_active); // Add unbonds that are still contributing to stake let unbonds = unbond_handle(&bond_id.source, &bond_id.validator); @@ -3203,7 +3202,7 @@ where total_active += slashed_delta; } } - dbg!(&total_active); + // dbg!(&total_active); if bond_id.validator != bond_id.source { // Add outgoing redelegations that are still contributing to the source @@ -3250,7 +3249,7 @@ where total_active += slashed_delta; } } - dbg!(&total_active); + // dbg!(&total_active); // Add outgoing redelegation unbonds that are still contributing to // the source validator's stake @@ -3306,7 +3305,7 @@ where } } } - dbg!(&total_active); + // dbg!(&total_active); Ok(total_active) } From c38d82203fbaa39e55b1c80d06ff792cb7fdd959 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 16 Oct 2023 07:26:38 +0200 Subject: [PATCH 134/161] make: skip pos_state_machine_test in CI --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 74100d3e1b..6c45dcb02d 100644 --- a/Makefile +++ b/Makefile @@ -132,7 +132,8 @@ test-coverage: $(cargo) +$(nightly) llvm-cov --output-dir target \ --features namada/testing \ --html \ - -- --skip e2e -Z unstable-options --report-time + -- --skip e2e --skip pos_state_machine_test \ + -Z unstable-options --report-time # NOTE: `TEST_FILTER` is prepended with `e2e::`. Since filters in `cargo test` # work with a substring search, TEST_FILTER only works if it contains a string From efeebfd9d79001374e763f59d3b6a7bbcfd9ea9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 16 Oct 2023 09:06:17 +0200 Subject: [PATCH 135/161] bench/vps: credit source before bond tx --- benches/vps.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benches/vps.rs b/benches/vps.rs index 6efaf78e4c..ee81b13d08 100644 --- a/benches/vps.rs +++ b/benches/vps.rs @@ -277,7 +277,7 @@ fn vp_implicit(c: &mut Criterion) { shell.commit(); } - if bench_name == "transfer" { + if bench_name == "transfer" || bench_name == "pos" { // Transfer some tokens to the implicit address shell.execute_tx(&received_transfer); shell.wl_storage.commit_tx(); From 2075e8e4162a17c3d19109c1a275869aead97bd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 16 Oct 2023 12:58:12 +0200 Subject: [PATCH 136/161] test/e2e/slashing: fix flakiness --- tests/src/e2e/ledger_tests.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 98bf27bc6c..b398baec2d 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -3315,6 +3315,8 @@ fn double_signing_gets_slashed() -> Result<()> { .exp_regex(r"Slashing [a-z0-9]+ for Duplicate vote in epoch [0-9]+") .unwrap(); println!("\n{res}\n"); + // Wait to commit a block + validator_1.exp_regex(r"Committed block hash.*, height: [0-9]+")?; let bg_validator_1 = validator_1.background(); let exp_processing_epoch = Epoch::from_str(res.split(' ').last().unwrap()) @@ -3324,9 +3326,6 @@ fn double_signing_gets_slashed() -> Result<()> { + 1u64; // Query slashes - // let tx_args = ["slashes", "--node", &validator_one_rpc]; - // let client = run!(test, Bin::Client, tx_args, Some(40))?; - let mut client = run!( test, Bin::Client, From a9964d6bac7d6b66a0a2bfc96b784b78658e6e39 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Mon, 16 Oct 2023 08:05:15 +0200 Subject: [PATCH 137/161] Integrated PD controller support. --- apps/src/lib/config/genesis.rs | 14 + .../lib/node/ledger/shell/finalize_block.rs | 5 +- apps/src/lib/node/ledger/shell/init_chain.rs | 24 ++ apps/src/lib/node/ledger/shell/mod.rs | 2 + apps/src/lib/node/ledger/storage/mod.rs | 2 + {shared => core}/src/ledger/inflation.rs | 23 +- core/src/ledger/mod.rs | 1 + core/src/ledger/storage/masp_conversions.rs | 196 ++++++++++- core/src/ledger/storage/mod.rs | 4 +- core/src/types/token.rs | 184 +++++++++- genesis/e2e-tests-single-node.toml | 35 ++ scripts/generator.sh | 318 +++++++----------- shared/src/ledger/mod.rs | 1 - ...33AC0E46C83B093EAF35D1A0537CE81D282FB9.bin | Bin 16958 -> 0 bytes ...9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin | Bin 7448 -> 7448 bytes ...D76149D3088F539CF8372D404609B89B095EF7.bin | Bin 7448 -> 7448 bytes ...B062D269F657017C578484081762FB65D9D52E.bin | Bin 9184 -> 0 bytes ...FBD6BDE6A1BF0BAE7D72FDACCC888A32094C72.bin | Bin 9184 -> 0 bytes ...7C98D1E5AAAA9988F26B1A47090ACCE693572F.bin | Bin 7448 -> 7448 bytes ...869989A13906D683BC96E27EF50FC037156E25.bin | Bin 9589 -> 0 bytes ...9181FA326C06FCA9A49B5A5C394C75942820E.bin} | Bin 18732 -> 15573 bytes ...CE9FF2DA066496E6664F56EB28F67D75C21911.bin | Bin 9589 -> 0 bytes ...A700BB49387329F8FD049D5F66C95B11B55ADE.bin | Bin 0 -> 22648 bytes ...4B1C762CEA0436B0ECA42C52A830A0FD66BC00.bin | Bin 0 -> 10312 bytes ...5CE2E0F12C98370D2CDFD6A75236522A4235F5.bin | Bin 24899 -> 0 bytes ...E905E9DAFDAC88A291E7F1756931C8A85441E6.bin | Bin 19839 -> 0 bytes ...8DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin | Bin 7448 -> 0 bytes ...344FFFAA6CA273027CD480AEA68DDED57D88CA.bin | Bin 7448 -> 7448 bytes ...C7F5019CA3DF4CB89D10CB4E38DA9CDE3A9A0A.bin | Bin 0 -> 13799 bytes 29 files changed, 588 insertions(+), 221 deletions(-) rename {shared => core}/src/ledger/inflation.rs (92%) delete mode 100644 test_fixtures/masp_proofs/1226362759E8F5050B9954234033AC0E46C83B093EAF35D1A0537CE81D282FB9.bin delete mode 100644 test_fixtures/masp_proofs/47AAF805508239C602AD831876B062D269F657017C578484081762FB65D9D52E.bin delete mode 100644 test_fixtures/masp_proofs/85BDAF3AC6C282F8C767109FBDFBD6BDE6A1BF0BAE7D72FDACCC888A32094C72.bin delete mode 100644 test_fixtures/masp_proofs/8D37EB2E5C3BD60B88B1257DFF869989A13906D683BC96E27EF50FC037156E25.bin rename test_fixtures/masp_proofs/{DA50E59A47A7BE9BC8BFF03D9E755E2583731052033322E25250C780EE322BF4.bin => 917B7AD5FD4F2F0CB33924511A59181FA326C06FCA9A49B5A5C394C75942820E.bin} (50%) delete mode 100644 test_fixtures/masp_proofs/992543B7B7B6B9DCB328590D37CE9FF2DA066496E6664F56EB28F67D75C21911.bin create mode 100644 test_fixtures/masp_proofs/A08264B610C5903A47D48E90ABA700BB49387329F8FD049D5F66C95B11B55ADE.bin create mode 100644 test_fixtures/masp_proofs/A9D6D90370C747C254D4DD4A2D4B1C762CEA0436B0ECA42C52A830A0FD66BC00.bin delete mode 100644 test_fixtures/masp_proofs/DFDFB1EDE901241995311122C25CE2E0F12C98370D2CDFD6A75236522A4235F5.bin delete mode 100644 test_fixtures/masp_proofs/ED30921582F7DCEA42D960F73DE905E9DAFDAC88A291E7F1756931C8A85441E6.bin delete mode 100644 test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin create mode 100644 test_fixtures/masp_proofs/F3FE67606FCCCE54C3BCF643F0C7F5019CA3DF4CB89D10CB4E38DA9CDE3A9A0A.bin diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index b7281fd4ce..eac5f5bdca 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -213,6 +213,8 @@ pub mod genesis_config { pub vp: Option, // Initial balances held by accounts defined elsewhere. pub balances: Option>, + // Token parameters + pub parameters: Option, } #[derive(Clone, Debug, Deserialize, Serialize)] @@ -404,6 +406,9 @@ pub mod genesis_config { implicit_accounts: &HashMap, ) -> TokenAccount { TokenAccount { + last_locked_ratio: Dec::zero(), + last_inflation: token::Amount::zero(), + parameters: config.parameters.as_ref().unwrap().to_owned(), address: Address::decode(config.address.as_ref().unwrap()).unwrap(), denom: config.denom, balances: config @@ -818,6 +823,12 @@ pub struct TokenAccount { /// Accounts' balances of this token #[derivative(PartialOrd = "ignore", Ord = "ignore")] pub balances: HashMap, + /// Token parameters + pub parameters: token::Parameters, + /// Token inflation from the last epoch (read + write for every epoch) + pub last_inflation: token::Amount, + /// Token shielded ratio from the last epoch (read + write for every epoch) + pub last_locked_ratio: Dec, } #[derive( @@ -1100,6 +1111,9 @@ pub fn genesis(num_validators: u64) -> Genesis { .into_iter() .map(|(k, v)| (k, token::Amount::from_uint(v, denom).unwrap())) .collect(), + parameters: token::Parameters::default(), + last_inflation: token::Amount::zero(), + last_locked_ratio: Dec::zero(), }) .collect(); Genesis { diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..346a3f916b 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -3,6 +3,7 @@ use std::collections::HashMap; use data_encoding::HEXUPPER; +use namada::core::ledger::inflation; use namada::core::ledger::pgf::ADDRESS as pgf_address; use namada::ledger::events::EventType; use namada::ledger::gas::{GasMetering, TxGasMeter}; @@ -11,7 +12,7 @@ use namada::ledger::pos::{namada_proof_of_stake, staking_token_address}; use namada::ledger::storage::EPOCH_SWITCH_BLOCKS_DELAY; use namada::ledger::storage_api::token::credit_tokens; use namada::ledger::storage_api::{pgf, StorageRead, StorageWrite}; -use namada::ledger::{inflation, protocol, replay_protection}; +use namada::ledger::{protocol, replay_protection}; use namada::proof_of_stake::{ delegator_rewards_products_handle, find_validator_by_raw_hash, read_last_block_proposer_address, read_pos_params, read_total_stake, @@ -665,6 +666,7 @@ where let pos_controller = inflation::RewardsController { locked_tokens: pos_locked_supply, total_tokens, + total_native_tokens: total_tokens, locked_ratio_target: pos_locked_ratio_target, locked_ratio_last: pos_last_staked_ratio, max_reward_rate: pos_max_inflation_rate, @@ -676,6 +678,7 @@ where let _masp_controller = inflation::RewardsController { locked_tokens: masp_locked_supply, total_tokens, + total_native_tokens: total_tokens, locked_ratio_target: masp_locked_ratio_target, locked_ratio_last: masp_locked_ratio_last, max_reward_rate: masp_max_inflation_rate, diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index d6b2efe4dd..46d4222983 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -330,14 +330,38 @@ where address, denom, balances, + parameters, + last_inflation, + last_locked_ratio, } in accounts { + // Init token parameters and last inflation and caching rates + parameters.init_storage(&address, &mut self.wl_storage); + self.wl_storage + .write(&token::masp_last_inflation(&address), last_inflation) + .unwrap(); + self.wl_storage + .write( + &token::masp_last_locked_ratio(&address), + last_locked_ratio, + ) + .unwrap(); // associate a token with its denomination. write_denom(&mut self.wl_storage, &address, denom).unwrap(); + + let mut total_balance_for_token = token::Amount::default(); for (owner, amount) in balances { + total_balance_for_token += amount; credit_tokens(&mut self.wl_storage, &address, &owner, amount) .unwrap(); } + // Write the total amount of tokens for the ratio + self.wl_storage + .write( + &token::minted_balance_key(&address), + total_balance_for_token, + ) + .unwrap(); } } diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index a1c17fe450..8230ce2a76 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -1980,6 +1980,7 @@ mod test_utils { /// We test that on shell shutdown, the tx queue gets persisted in a DB, and /// on startup it is read successfully + #[cfg(feature = "testing")] #[test] fn test_tx_queue_persistence() { let base_dir = tempdir().unwrap().as_ref().canonicalize().unwrap(); @@ -2017,6 +2018,7 @@ mod test_utils { .storage .begin_block(BlockHash::default(), BlockHeight(1)) .expect("begin_block failed"); + token::testing::init_token_storage(&mut shell.wl_storage, 60); let keypair = gen_keypair(); // enqueue a wrapper tx let mut wrapper = diff --git a/apps/src/lib/node/ledger/storage/mod.rs b/apps/src/lib/node/ledger/storage/mod.rs index 659f561cce..43617879c8 100644 --- a/apps/src/lib/node/ledger/storage/mod.rs +++ b/apps/src/lib/node/ledger/storage/mod.rs @@ -118,6 +118,7 @@ mod tests { assert_eq!(result, None); } + #[cfg(feature = "testing")] #[test] fn test_commit_block() { let db_path = @@ -144,6 +145,7 @@ mod tests { storage.block.pred_epochs.new_epoch(BlockHeight(100)); // make wl_storage to update conversion for a new epoch let mut wl_storage = WlStorage::new(WriteLog::default(), storage); + namada::types::token::testing::init_token_storage(&mut wl_storage, 60); update_allowed_conversions(&mut wl_storage) .expect("update conversions failed"); wl_storage.commit_block().expect("commit failed"); diff --git a/shared/src/ledger/inflation.rs b/core/src/ledger/inflation.rs similarity index 92% rename from shared/src/ledger/inflation.rs rename to core/src/ledger/inflation.rs index da2d5ed1e1..46e2c7ba8f 100644 --- a/shared/src/ledger/inflation.rs +++ b/core/src/ledger/inflation.rs @@ -2,8 +2,7 @@ //! proof-of-stake, providing liquity to shielded asset pools, and public goods //! funding. -use namada_core::types::dec::Dec; - +use crate::types::dec::Dec; use crate::types::token; /// The domains of inflation @@ -30,6 +29,8 @@ pub struct RewardsController { pub locked_tokens: token::Amount, /// Total token supply pub total_tokens: token::Amount, + /// Total native token supply + pub total_native_tokens: token::Amount, /// PD target locked ratio pub locked_ratio_target: Dec, /// PD last locked ratio @@ -52,6 +53,7 @@ impl RewardsController { let Self { locked_tokens, total_tokens, + total_native_tokens, locked_ratio_target, locked_ratio_last, max_reward_rate, @@ -67,10 +69,12 @@ impl RewardsController { .expect("Should not fail to convert token Amount to Dec"); let total = Dec::try_from(total_tokens.raw_amount()) .expect("Should not fail to convert token Amount to Dec"); + let total_native = Dec::try_from(total_native_tokens.raw_amount()) + .expect("Should not fail to convert token Amount to Dec"); let epochs_py: Dec = epochs_per_year.into(); let locked_ratio = locked / total; - let max_inflation = total * max_reward_rate / epochs_py; + let max_inflation = total_native * max_reward_rate / epochs_py; let p_gain = p_gain_nom * max_inflation; let d_gain = d_gain_nom * max_inflation; @@ -114,9 +118,8 @@ impl RewardsController { mod test { use std::str::FromStr; - use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; - use super::*; + use crate::types::token::NATIVE_MAX_DECIMAL_PLACES; #[test] fn test_inflation_calc_up() { @@ -131,6 +134,11 @@ mod test { NATIVE_MAX_DECIMAL_PLACES, ) .unwrap(), + total_native_tokens: token::Amount::from_uint( + 4_000, + NATIVE_MAX_DECIMAL_PLACES, + ) + .unwrap(), locked_ratio_target: Dec::from_str("0.66666666").unwrap(), locked_ratio_last: Dec::from_str("0.5").unwrap(), max_reward_rate: Dec::from_str("0.1").unwrap(), @@ -202,6 +210,11 @@ mod test { NATIVE_MAX_DECIMAL_PLACES, ) .unwrap(), + total_native_tokens: token::Amount::from_uint( + 1_000, + NATIVE_MAX_DECIMAL_PLACES, + ) + .unwrap(), locked_ratio_target: Dec::from_str("0.66666666").unwrap(), locked_ratio_last: Dec::from_str("0.9").unwrap(), max_reward_rate: Dec::from_str("0.1").unwrap(), diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs index 890d58044d..301cf78e08 100644 --- a/core/src/ledger/mod.rs +++ b/core/src/ledger/mod.rs @@ -5,6 +5,7 @@ pub mod gas; pub mod governance; #[cfg(any(feature = "abciplus", feature = "abcipp"))] pub mod ibc; +pub mod inflation; pub mod parameters; pub mod pgf; pub mod replay_protection; diff --git a/core/src/ledger/storage/masp_conversions.rs b/core/src/ledger/storage/masp_conversions.rs index f5d7553516..5b195dd29d 100644 --- a/core/src/ledger/storage/masp_conversions.rs +++ b/core/src/ledger/storage/masp_conversions.rs @@ -8,9 +8,21 @@ use masp_primitives::convert::AllowedConversion; use masp_primitives::merkle_tree::FrozenCommitmentTree; use masp_primitives::sapling::Node; +use crate::ledger::inflation::{RewardsController, ValsToUpdate}; +use crate::ledger::parameters; +use crate::ledger::storage_api::{StorageRead, StorageWrite}; use crate::types::address::Address; +use crate::types::dec::Dec; use crate::types::storage::Epoch; use crate::types::token::MaspDenom; +use crate::types::{address, token}; + +/// Inflation is implicitly denominated by this value. The lower this figure, +/// the less precise inflation computations are. The higher this figure, the +/// larger the fixed-width types that are required to carry out inflation +/// computations. This value should be fixed constant for each asset type - here +/// we have simplified it and made it constant across asset types. +const PRECISION: u64 = 100; /// A representation of the conversion state #[derive(Debug, Default, BorshSerialize, BorshDeserialize)] @@ -27,6 +39,150 @@ pub struct ConversionState { >, } +/// Compute the MASP rewards by applying the PD-controller to the genesis +/// parameters and the last inflation and last locked rewards ratio values. +pub fn calculate_masp_rewards( + wl_storage: &mut super::WlStorage, + addr: &Address, +) -> crate::ledger::storage_api::Result<(u32, u32)> +where + D: 'static + super::DB + for<'iter> super::DBIter<'iter>, + H: 'static + super::StorageHasher, +{ + let masp_addr = address::masp(); + // Query the storage for information + + //// information about the amount of tokens on the chain + let total_tokens: token::Amount = wl_storage + .read(&token::minted_balance_key(addr))? + .expect("the total supply key should be here"); + + //// information about the amount of native tokens on the chain + let total_native_tokens: token::Amount = wl_storage + .read(&token::minted_balance_key(&wl_storage.storage.native_token))? + .expect("the total supply key should be here"); + + // total staked amount in the Shielded pool + let total_token_in_masp: token::Amount = wl_storage + .read(&token::balance_key(addr, &masp_addr))? + .unwrap_or_default(); + + let epochs_per_year: u64 = wl_storage + .read(¶meters::storage::get_epochs_per_year_key())? + .expect(""); + + //// Values from the last epoch + let last_inflation: token::Amount = wl_storage + .read(&token::masp_last_inflation(addr)) + .expect("failure to read last inflation") + .expect(""); + + let last_locked_ratio: Dec = wl_storage + .read(&token::masp_last_locked_ratio(addr)) + .expect("failure to read last inflation") + .expect(""); + + //// Parameters for each token + let max_reward_rate: Dec = wl_storage + .read(&token::masp_max_reward_rate(addr)) + .expect("max reward should properly decode") + .expect(""); + + let kp_gain_nom: Dec = wl_storage + .read(&token::masp_kp_gain(addr)) + .expect("kp_gain_nom reward should properly decode") + .expect(""); + + let kd_gain_nom: Dec = wl_storage + .read(&token::masp_kd_gain(addr)) + .expect("kd_gain_nom reward should properly decode") + .expect(""); + + let locked_target_ratio: Dec = wl_storage + .read(&token::masp_locked_ratio_target(addr))? + .expect(""); + + // Creating the PD controller for handing out tokens + let controller = RewardsController { + locked_tokens: total_token_in_masp, + total_tokens, + total_native_tokens, + locked_ratio_target: locked_target_ratio, + locked_ratio_last: last_locked_ratio, + max_reward_rate, + last_inflation_amount: last_inflation, + p_gain_nom: kp_gain_nom, + d_gain_nom: kd_gain_nom, + epochs_per_year, + }; + + let ValsToUpdate { + locked_ratio, + inflation, + } = RewardsController::run(controller); + + // inflation-per-token = inflation / locked tokens = n/PRECISION + // ∴ n = (inflation * PRECISION) / locked tokens + // Since we must put the notes in a compatible format with the + // note format, we must make the inflation amount discrete. + let noterized_inflation = if total_token_in_masp.is_zero() { + 0u32 + } else { + crate::types::uint::Uint::try_into( + (inflation.raw_amount() * PRECISION) + / total_token_in_masp.raw_amount(), + ) + .unwrap() + }; + + tracing::debug!( + "Controller, call: total_in_masp {:?}, total_tokens {:?}, \ + total_native_tokens {:?}, locked_target_ratio {:?}, \ + last_locked_ratio {:?}, max_reward_rate {:?}, last_inflation {:?}, \ + kp_gain_nom {:?}, kd_gain_nom {:?}, epochs_per_year {:?}", + total_token_in_masp, + total_tokens, + total_native_tokens, + locked_target_ratio, + last_locked_ratio, + max_reward_rate, + last_inflation, + kp_gain_nom, + kd_gain_nom, + epochs_per_year, + ); + tracing::debug!("Please give me: {:?}", addr); + tracing::debug!("Ratio {:?}", locked_ratio); + tracing::debug!("inflation from the pd controller {:?}", inflation); + tracing::debug!("total in the masp {:?}", total_token_in_masp); + tracing::debug!("Please give me inflation: {:?}", noterized_inflation); + + // Is it fine to write the inflation rate, this is accurate, + // but we should make sure the return value's ratio matches + // this new inflation rate in 'update_allowed_conversions', + // otherwise we will have an inaccurate view of inflation + wl_storage + .write( + &token::masp_last_inflation(addr), + (total_token_in_masp / PRECISION) * u64::from(noterized_inflation), + ) + .expect("unable to encode new inflation rate (Decimal)"); + + wl_storage + .write(&token::masp_last_locked_ratio(addr), locked_ratio) + .expect("unable to encode new locked ratio (Decimal)"); + + // to make it conform with the expected output, we need to + // move it to a ratio of x/100 to match the masp_rewards + // function This may be unneeded, as we could describe it as a + // ratio of x/1 + + Ok(( + noterized_inflation, + PRECISION.try_into().expect("inflation precision too large"), + )) +} + // This is only enabled when "wasm-runtime" is on, because we're using rayon #[cfg(feature = "wasm-runtime")] /// Update the MASP's allowed conversions @@ -46,22 +202,22 @@ where }; use rayon::prelude::ParallelSlice; - use crate::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; + use crate::ledger::storage_api::ResultExt; use crate::types::storage::{self, KeySeg}; - use crate::types::{address, token}; // The derived conversions will be placed in MASP address space let masp_addr = address::masp(); let key_prefix: storage::Key = masp_addr.to_db_key().into(); - let masp_rewards = address::masp_rewards(); - let mut masp_reward_keys: Vec<_> = masp_rewards.keys().collect(); + let tokens = address::tokens(); + let mut masp_reward_keys: Vec<_> = tokens.into_keys().collect(); // Put the native rewards first because other inflation computations depend // on it + let native_token = wl_storage.storage.native_token.clone(); masp_reward_keys.sort_unstable_by(|x, y| { - if (**x == address::nam()) == (**y == address::nam()) { + if (*x == native_token) == (*y == native_token) { Ordering::Equal - } else if **x == address::nam() { + } else if *x == native_token { Ordering::Less } else { Ordering::Greater @@ -75,17 +231,25 @@ where // notes clients have to use. This trick works under the assumption that // reward tokens will then be reinflated back to the current epoch. let reward_assets = [ - encode_asset_type(address::nam(), MaspDenom::Zero, Epoch(0)), - encode_asset_type(address::nam(), MaspDenom::One, Epoch(0)), - encode_asset_type(address::nam(), MaspDenom::Two, Epoch(0)), - encode_asset_type(address::nam(), MaspDenom::Three, Epoch(0)), + encode_asset_type(native_token.clone(), MaspDenom::Zero, Epoch(0)), + encode_asset_type(native_token.clone(), MaspDenom::One, Epoch(0)), + encode_asset_type(native_token.clone(), MaspDenom::Two, Epoch(0)), + encode_asset_type(native_token.clone(), MaspDenom::Three, Epoch(0)), ]; // Conversions from the previous to current asset for each address let mut current_convs = BTreeMap::<(Address, MaspDenom), AllowedConversion>::new(); + // Native token inflation values are always with respect to this + let mut ref_inflation = 0; // Reward all tokens according to above reward rates - for addr in masp_reward_keys { - let reward = masp_rewards[addr]; + for addr in &masp_reward_keys { + let reward = calculate_masp_rewards(wl_storage, addr) + .expect("Calculating the masp rewards should not fail"); + if *addr == native_token { + // The reference inflation is the denominator of the native token + // inflation, which is always a constant + ref_inflation = reward.1; + } // Dispense a transparent reward in parallel to the shielded rewards let addr_bal: token::Amount = wl_storage .read(&token::balance_key(addr, &masp_addr))? @@ -104,15 +268,13 @@ where denom, wl_storage.storage.block.epoch, ); - // Native token inflation values are always with respect to this - let ref_inflation = masp_rewards[&address::nam()].1; // Get the last rewarded amount of the native token let normed_inflation = wl_storage .storage .conversion_state .normed_inflation .get_or_insert(ref_inflation); - if *addr == address::nam() { + if *addr == native_token { // The amount that will be given of the new native token for // every amount of the native token given in the // previous epoch @@ -224,7 +386,7 @@ where // Update the MASP's transparent reward token balance to ensure that it // is sufficiently backed to redeem rewards - let reward_key = token::balance_key(&address::nam(), &masp_addr); + let reward_key = token::balance_key(&native_token, &masp_addr); let addr_bal: token::Amount = wl_storage.read(&reward_key)?.unwrap_or_default(); let new_bal = addr_bal + total_reward; @@ -249,7 +411,7 @@ where // Add purely decoding entries to the assets map. These will be // overwritten before the creation of the next commitment tree - for addr in masp_rewards.keys() { + for addr in masp_reward_keys { for denom in token::MaspDenom::iter() { // Add the decoding entry for the new asset type. An uncommited // node position is used since this is not a conversion. diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 81be7e48a6..868e49fbdd 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -26,7 +26,9 @@ pub use wl_storage::{ #[cfg(feature = "wasm-runtime")] pub use self::masp_conversions::update_allowed_conversions; -pub use self::masp_conversions::{encode_asset_type, ConversionState}; +pub use self::masp_conversions::{ + calculate_masp_rewards, encode_asset_type, ConversionState, +}; use super::replay_protection::is_replay_protection_key; use crate::ledger::eth_bridge::storage::bridge_pool::is_pending_transfer_key; use crate::ledger::gas::{ diff --git a/core/src/types/token.rs b/core/src/types/token.rs index 0ee60b4326..c6fefea9c6 100644 --- a/core/src/types/token.rs +++ b/core/src/types/token.rs @@ -13,8 +13,9 @@ use thiserror::Error; use super::dec::POS_DECIMAL_PRECISION; use crate::ibc::applications::transfer::Amount as IbcAmount; +use crate::ledger::storage as ledger_storage; use crate::ledger::storage_api::token::read_denom; -use crate::ledger::storage_api::{self, StorageRead}; +use crate::ledger::storage_api::{self, StorageRead, StorageWrite}; use crate::types::address::{ masp, Address, DecodeError as AddressError, InternalAddress, }; @@ -814,6 +815,31 @@ pub const TX_KEY_PREFIX: &str = "tx-"; pub const CONVERSION_KEY_PREFIX: &str = "conv"; /// Key segment prefix for pinned shielded transactions pub const PIN_KEY_PREFIX: &str = "pin-"; +/// Last calculated inflation value handed out +pub const MASP_LAST_INFLATION: &str = "last_inflation"; +/// The last locked ratio +pub const MASP_LAST_LOCKED_RATIO: &str = "last_locked_ratio"; +/// The key for the nominal proportional gain of a shielded pool for a given +/// asset +pub const MASP_KP_GAIN_KEY: &str = "proptional_gain"; +/// The key for the nominal derivative gain of a shielded pool for a given asset +pub const MASP_KD_GAIN_KEY: &str = "derivative_gain"; +/// The key for the locked ratio target for a given asset +pub const MASP_LOCKED_RATIO_TARGET_KEY: &str = "locked_ratio_target"; +/// The key for the max reward rate for a given asset +pub const MASP_MAX_REWARD_RATE: &str = "max_reward_rate"; + +/// Gets the key for the given token address, error with the given +/// message to expect if the key is not in the address +pub fn key_of_token( + token_addr: &Address, + specific_key: &str, + expect_message: &str, +) -> Key { + Key::from(token_addr.to_db_key()) + .push(&specific_key.to_owned()) + .expect(expect_message) +} /// Obtain a storage key for user's balance. pub fn balance_key(token_addr: &Address, owner: &Address) -> Key { @@ -847,6 +873,98 @@ pub fn minted_balance_key(token_addr: &Address) -> Key { .expect("Cannot obtain a storage key") } +/// Obtain the nominal proportional key for the given token +pub fn masp_kp_gain(token_addr: &Address) -> Key { + key_of_token(token_addr, MASP_KP_GAIN_KEY, "nominal proproitonal gains") +} + +/// Obtain the nominal derivative key for the given token +pub fn masp_kd_gain(token_addr: &Address) -> Key { + key_of_token(token_addr, MASP_KD_GAIN_KEY, "nominal proproitonal gains") +} + +/// The max reward rate key for the given token +pub fn masp_max_reward_rate(token_addr: &Address) -> Key { + key_of_token(token_addr, MASP_MAX_REWARD_RATE, "max reward rate") +} + +/// Obtain the locked target ratio key for the given token +pub fn masp_locked_ratio_target(token_addr: &Address) -> Key { + key_of_token( + token_addr, + MASP_LOCKED_RATIO_TARGET_KEY, + "nominal proproitonal gains", + ) +} + +/// Token parameters for each kind of asset held on chain +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Deserialize, + Serialize, +)] +pub struct Parameters { + /// Maximum reward rate + pub max_reward_rate: Dec, + /// Shielded Pool nominal derivative gain + pub kd_gain_nom: Dec, + /// Shielded Pool nominal proportional gain for the given token + pub kp_gain_nom: Dec, + /// Locked ratio for the given token + pub locked_ratio_target_key: Dec, +} + +impl Parameters { + /// Initialize parameters for the token in storage during the genesis block. + pub fn init_storage( + &self, + address: &Address, + wl_storage: &mut ledger_storage::WlStorage, + ) where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, + { + let Self { + max_reward_rate: max_rate, + kd_gain_nom, + kp_gain_nom, + locked_ratio_target_key: locked_target, + } = self; + wl_storage + .write(&masp_max_reward_rate(address), max_rate) + .expect("max reward rate for the given asset must be initialized"); + wl_storage + .write(&masp_locked_ratio_target(address), locked_target) + .expect("locked ratio must be initialized"); + wl_storage + .write(&masp_kp_gain(address), kp_gain_nom) + .expect("The nominal proportional gain must be initialized"); + wl_storage + .write(&masp_kd_gain(address), kd_gain_nom) + .expect("The nominal derivative gain must be initialized"); + } +} + +impl Default for Parameters { + fn default() -> Self { + Self { + max_reward_rate: Dec::from_str("0.1").unwrap(), + kp_gain_nom: Dec::from_str("0.1").unwrap(), + kd_gain_nom: Dec::from_str("0.1").unwrap(), + locked_ratio_target_key: Dec::from_str("0.1").unwrap(), + } + } +} + /// Check if the given storage key is balance key for the given token. If it is, /// returns the owner. For minted balances, use [`is_any_minted_balance_key()`]. pub fn is_balance_key<'a>( @@ -914,6 +1032,24 @@ pub fn is_masp_key(key: &Key) -> bool { || key.starts_with(PIN_KEY_PREFIX))) } +/// The last locked ratio of a token +pub fn masp_last_locked_ratio(token_address: &Address) -> Key { + key_of_token( + token_address, + MASP_LAST_LOCKED_RATIO, + "cannot obtain storage key for the last locked ratio", + ) +} + +/// The last inflation of a token +pub fn masp_last_inflation(token_address: &Address) -> Key { + key_of_token( + token_address, + MASP_LAST_INFLATION, + "cannot obtain storage key for the last inflation rate", + ) +} + /// Check if the given storage key is for a minter of a unspecified token. /// If it is, returns the token. pub fn is_any_minter_key(key: &Key) -> Option<&Address> { @@ -1185,4 +1321,50 @@ pub mod testing { ) -> impl Strategy { (1..=max).prop_map(|val| Amount::from_uint(val, 0).unwrap()) } + + /// init_token_storage is useful when the initialization of the network is + /// not properly made. This properly sets up the storage such that + /// inflation calculations can be ran on the token addresses. We assume + /// a total supply that may not be real + pub fn init_token_storage( + wl_storage: &mut ledger_storage::WlStorage, + epochs_per_year: u64, + ) where + D: 'static + + ledger_storage::DB + + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + ledger_storage::StorageHasher, + { + use crate::ledger::parameters::storage::get_epochs_per_year_key; + use crate::types::address::masp_rewards; + + let masp_rewards = masp_rewards(); + let masp_reward_keys: Vec<_> = masp_rewards.keys().collect(); + + wl_storage + .write(&get_epochs_per_year_key(), epochs_per_year) + .unwrap(); + let params = Parameters { + max_reward_rate: Dec::from_str("0.1").unwrap(), + kd_gain_nom: Dec::from_str("0.1").unwrap(), + kp_gain_nom: Dec::from_str("0.1").unwrap(), + locked_ratio_target_key: Dec::zero(), + }; + + for address in masp_reward_keys { + params.init_storage(address, wl_storage); + wl_storage + .write( + &minted_balance_key(address), + Amount::native_whole(5), // arbitrary amount + ) + .unwrap(); + wl_storage + .write(&masp_last_inflation(address), Amount::zero()) + .expect("inflation ought to be written"); + wl_storage + .write(&masp_last_locked_ratio(address), Dec::zero()) + .expect("last locked set default"); + } + } } diff --git a/genesis/e2e-tests-single-node.toml b/genesis/e2e-tests-single-node.toml index 4a3c02b805..0020dd995a 100644 --- a/genesis/e2e-tests-single-node.toml +++ b/genesis/e2e-tests-single-node.toml @@ -36,6 +36,11 @@ Christel = "1000000" Daewon = "1000000" Ester = "1000000" "validator-0.public_key" = "100" +[token.NAM.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target_key = "0.6667" [token.BTC] address = "atest1v4ehgw36xdzryve5gsc52veeg5cnsv2yx5eygvp38qcrvd29xy6rys6p8yc5xvp4xfpy2v694wgwcp" @@ -46,6 +51,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.BTC.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target_key = "0.6667" [token.ETH] address = "atest1v4ehgw36xqmr2d3nx3ryvd2xxgmrq33j8qcns33sxezrgv6zxdzrydjrxveygd2yxumrsdpsf9jc2p" @@ -56,6 +66,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.ETH.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target_key = "0.6667" [token.DOT] address = "atest1v4ehgw36gg6nvs2zgfpyxsfjgc65yv6pxy6nwwfsxgungdzrggeyzv35gveyxsjyxymyz335hur2jn" @@ -66,6 +81,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.DOT.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target_key = "0.6667" [token.Schnitzel] address = "atest1v4ehgw36xue5xvf5xvuyzvpjx5un2v3k8qeyvd3cxdqns32p89rrxd6xx9zngvpegccnzs699rdnnt" @@ -76,6 +96,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.Schnitzel.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target_key = "0.6667" [token.Apfel] address = "atest1v4ehgw36gfryydj9g3p5zv3kg9znyd358ycnzsfcggc5gvecgc6ygs2rxv6ry3zpg4zrwdfeumqcz9" @@ -86,6 +111,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.Apfel.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target_key = "0.6667" [token.Kartoffel] address = "atest1v4ehgw36gep5ysecxq6nyv3jg3zygv3e89qn2vp48pryxsf4xpznvve5gvmy23fs89pryvf5a6ht90" @@ -97,6 +127,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.Kartoffel.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target_key = "0.6667" [established.Albert] vp = "vp_user" diff --git a/scripts/generator.sh b/scripts/generator.sh index 3fe1792a49..c9635d498d 100755 --- a/scripts/generator.sh +++ b/scripts/generator.sh @@ -9,8 +9,10 @@ # vectors. NAMADA_DIR="$(pwd)" +NAMADA_BASE_DIR_FILE="$(pwd)/namada_base_dir" export NAMADA_LEDGER_LOG_PATH="$(pwd)/vectors.json" export NAMADA_TX_LOG_PATH="$(pwd)/debugs.txt" +export NAMADA_DEV=false if [ "$#" -ne 1 ]; then echo "Illegal number of parameters" @@ -19,11 +21,14 @@ elif [ "$1" = "server" ]; then sed -i 's/^epochs_per_year = 31_536_000$/epochs_per_year = 262_800/' genesis/test-vectors-single-node.toml - NAMADA_GENESIS_FILE=$(cargo run --bin namadac -- utils init-network --genesis-path genesis/test-vectors-single-node.toml --wasm-checksums-path wasm/checksums.json --chain-prefix e2e-test --unsafe-dont-encrypt --localhost --allow-duplicate-ip | grep 'Genesis file generated at ' | sed 's/^Genesis file generated at //') + NAMADA_GENESIS_FILE=$(cargo run --bin namadac --package namada_apps --manifest-path Cargo.toml -- utils init-network --genesis-path genesis/test-vectors-single-node.toml --wasm-checksums-path wasm/checksums.json --chain-prefix e2e-test --unsafe-dont-encrypt --localhost --dont-archive --allow-duplicate-ip | grep 'Genesis file generated at ' | sed 's/^Genesis file generated at //') rm genesis/test-vectors-single-node.toml NAMADA_BASE_DIR=${NAMADA_GENESIS_FILE%.toml} + echo $NAMADA_BASE_DIR > $NAMADA_BASE_DIR_FILE + + sed -i 's/^mode = "RemoteEndpoint"$/mode = "Off"/' $NAMADA_BASE_DIR/config.toml cp wasm/*.wasm $NAMADA_BASE_DIR/wasm/ @@ -31,8 +36,14 @@ elif [ "$1" = "server" ]; then cp $NAMADA_BASE_DIR/setup/other/wallet.toml $NAMADA_BASE_DIR/wallet.toml - cargo run --bin namadan -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada/ ledger + sed -i 's/^mode = "RemoteEndpoint"$/mode = "Off"/' $NAMADA_BASE_DIR/setup/validator-0/.namada/$(basename $NAMADA_BASE_DIR)/config.toml + + cargo run --bin namadan --package namada_apps --manifest-path Cargo.toml -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada/ ledger elif [ "$1" = "client" ]; then + if test -f "$NAMADA_BASE_DIR_FILE"; then + NAMADA_BASE_DIR="$(cat $NAMADA_BASE_DIR_FILE)" + fi + echo > $NAMADA_TX_LOG_PATH echo $'[' > $NAMADA_LEDGER_LOG_PATH @@ -40,120 +51,49 @@ elif [ "$1" = "client" ]; then ALBERT_ADDRESS=$(cargo run --bin namadaw -- address find --alias albert | sed 's/^Found address Established: //') echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" - }, - "grace_epoch":30, - "type":{ - "Default":"'$NAMADA_DIR'/wasm_for_tests/tx_proposal_code.wasm" - }, - "voting_end_epoch":24, - "voting_start_epoch":12 -} -' > proposal_submission_valid_proposal.json - + "proposal": { + "author":"'$ALBERT_ADDRESS'", + "content":{ + "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", + "authors":"test@test.com", + "created":"2022-03-10T08:54:37Z", + "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", + "discussions-to":"www.github.com/anoma/aip/1", + "license":"MIT", + "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", + "requires":"2", + "title":"TheTitle" + }, + "grace_epoch":30, + "voting_end_epoch":24, + "voting_start_epoch":12 + } + }' > proposal_default.json + echo '{ - "content": { - "abstract": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors": "test@test.com", - "created": "2022-03-10T08:54:37Z", - "details": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to": "www.github.com/anoma/aip/1", - "license": "MIT", - "motivation": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires": "2", - "title": "TheTitle" - }, - "author": "'$ALBERT_ADDRESS'", - "tally_epoch": 18, - "signature": { - "Ed25519": { - "R_bytes": [ - 113, - 196, - 231, - 134, - 101, - 191, - 75, - 17, - 245, - 19, - 50, - 231, - 183, - 80, - 162, - 38, - 108, - 72, - 72, - 2, - 116, - 112, - 121, - 33, - 197, - 67, - 64, - 116, - 21, - 250, - 196, - 121 - ], - "s_bytes": [ - 87, - 163, - 134, - 87, - 42, - 156, - 121, - 211, - 189, - 19, - 255, - 5, - 23, - 178, - 143, - 39, - 118, - 249, - 37, - 53, - 121, - 136, - 59, - 103, - 190, - 91, - 121, - 95, - 46, - 54, - 168, - 9 - ] + "data":['$(od -An -tu1 -v wasm_for_tests/tx_proposal_code.wasm | tr '\n' ' ' | sed 's/\b\s\+\b/,/g')'], + "proposal": { + "author":"'$ALBERT_ADDRESS'", + "content":{ + "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", + "authors":"test@test.com", + "created":"2022-03-10T08:54:37Z", + "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", + "discussions-to":"www.github.com/anoma/aip/1", + "license":"MIT", + "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", + "requires":"2", + "title":"TheTitle" + }, + "grace_epoch":30, + "voting_end_epoch":24, + "voting_start_epoch":12 } - }, - "address": "'$ALBERT_ADDRESS'" -} -' > proposal_offline_proposal + }' > proposal_default_with_data.json echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ + "author":"'$ALBERT_ADDRESS'", + "content":{ "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", "authors":"test@test.com", "created":"2022-03-10T08:54:37Z", @@ -164,59 +104,41 @@ elif [ "$1" = "client" ]; then "requires":"2", "title":"TheTitle" }, - "grace_epoch":18, - "type":{ - "Default":null - }, - "voting_end_epoch":9, - "voting_start_epoch":3 -}' > proposal_offline_valid_proposal.json + "tally_epoch":1 + }' > proposal_offline.json echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" + "proposal": { + "author":"'$ALBERT_ADDRESS'", + "content":{ + "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", + "authors":"test@test.com", + "created":"2022-03-10T08:54:37Z", + "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", + "discussions-to":"www.github.com/anoma/aip/1", + "license":"MIT", + "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", + "requires":"2", + "title":"TheTitle" + }, + "grace_epoch":30, + "voting_end_epoch":24, + "voting_start_epoch":12 }, - "grace_epoch":30, - "type":"ETHBridge", - "voting_end_epoch":24, - "voting_start_epoch":12 -}' > eth_governance_proposal_valid_proposal.json + "data": {"add":"'$ALBERT_ADDRESS'","remove":[]} + }' > proposal_pgf_steward_add.json - echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" - }, - "grace_epoch":30, - "type":"PGFCouncil", - "voting_end_epoch":24, - "voting_start_epoch":12 -}' > pgf_governance_proposal_valid_proposal.json + # proposal_default - # proposal_submission + cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-token NAM --node 127.0.0.1:27657 - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- unjail-validator --validator Bertha --gas-token NAM --force --node 127.0.0.1:27657 - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.02 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.02 --gas-token NAM --force --node 127.0.0.1:27657 - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_submission_valid_proposal.json --node 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_default.json --node 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + + cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_default_with_data.json --node 127.0.0.1:27657 cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address validator-0 --node 127.0.0.1:27657 @@ -226,41 +148,29 @@ elif [ "$1" = "client" ]; then # proposal_offline - cargo run --bin namadac --features std -- bond --validator validator-0 --source Albert --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- change-commission-rate --validator Albert --commission-rate 0.05 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_offline_valid_proposal.json --offline --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- bond --validator validator-0 --source Albert --amount 900 --gas-token NAM --node 127.0.0.1:27657 - cargo run --bin namadac --features std -- vote-proposal --data-path proposal_offline_proposal --vote yay --address Albert --offline --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Albert --commission-rate 0.05 --gas-token NAM --force --node 127.0.0.1:27657 - # eth_governance_proposal + PROPOSAL_OFFLINE_SIGNED=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_offline.json --signing-keys albert-key --offline --node 127.0.0.1:27657 | grep -o -P '(?<=Proposal serialized to:\s).*') - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.07 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 - - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path eth_governance_proposal_valid_proposal.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id 0 --vote yay --eth '011586062748ba53bc53155e817ec1ea708de75878dcb9a5713bf6986d87fe14e7 fd34672ab5' --address Bertha --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --eth '011586062748ba53bc53155e817ec1ea708de75878dcb9a5713bf6986d87fe14e7 fd34672ab5' --address validator-0 --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- vote-proposal --data-path $PROPOSAL_OFFLINE_SIGNED --vote yay --address Albert --offline --node 127.0.0.1:27657 # pgf_governance_proposal - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-token NAM --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.09 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.09 --gas-token NAM --force --node 127.0.0.1:27657 - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path pgf_governance_proposal_valid_proposal.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --pgf-stewards --force --data-path proposal_pgf_steward_add.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - PROPOSAL_ID_1=$(cargo run --bin namadac --features std -- init-proposal --force --data-path pgf_governance_proposal_valid_proposal.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + PROPOSAL_ID_1=$(cargo run --bin namadac --features std -- init-proposal --pgf-stewards --force --data-path proposal_pgf_steward_add.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --pgf "$ALBERT_ADDRESS 1000" --address validator-0 --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address validator-0 --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --pgf "$ALBERT_ADDRESS 900" --address Bertha --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address Bertha --signing-keys bertha-key --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_1 --vote yay --pgf "$ALBERT_ADDRESS 900" --address Bertha --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_1 --vote yay --address Bertha --signing-keys bertha-key --ledger-address 127.0.0.1:27657 # non-proposal tests @@ -268,24 +178,38 @@ elif [ "$1" = "client" ]; then cargo run --bin namadac --features std -- bond --validator bertha --amount 25 --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.11 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.11 --gas-token NAM --force --node 127.0.0.1:27657 cargo run --bin namadac --features std -- reveal-pk --public-key albert-key --gas-payer albert-key --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- init-validator --alias bertha-validator --account-keys bertha --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys bertha-key --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --public-keys albert-key,bertha-key --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --public-keys albert-key,bertha-key,christel-key --threshold 2 --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- init-validator --alias bertha-validator --account-keys bertha-key --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys bertha-key --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- init-validator --alias validator-mult --account-keys albert-key,bertha-key --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys albert-key,bertha-key --threshold 2 --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 + # TODO works but panics cargo run --bin namadac --features std -- unbond --validator christel --amount 5 --signing-keys christel-key --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- withdraw --validator albert --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- init-account --alias albert-account --public-keys albert-key --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- tx --code-path $NAMADA_DIR/wasm_for_tests/tx_no_op.wasm --data-path README.md --signing-keys albert-key --owner albert --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- init-account --alias account-mul --public-keys albert-key,bertha-key,christel-key --signing-keys albert-key,bertha-key,christel-key --threshold 2 --force --ledger-address 127.0.0.1:27657 + + # TODO panics, no vector produced + # cargo run --bin namadac --features std -- tx --code-path $NAMADA_DIR/wasm_for_tests/tx_no_op.wasm --data-path README.md --signing-keys albert-key --owner albert --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- ibc-transfer --source bertha --receiver christel --token btc --amount 24 --channel-id channel-141 --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- ibc-transfer --source albert --receiver bertha --token nam --amount 100000 --channel-id channel-0 --port-id transfer --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- ibc-transfer --source albert --receiver bertha --token nam --amount 100000 --channel-id channel-0 --port-id transfer --signing-keys albert-key --timeout-sec-offset 5 --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadaw -- masp add --alias a_spending_key --value xsktest1qqqqqqqqqqqqqq9v0sls5r5de7njx8ehu49pqgmqr9ygelg87l5x8y4s9r0pjlvu69au6gn3su5ewneas486hdccyayx32hxvt64p3d0hfuprpgcgv2q9gdx3jvxrn02f0nnp3jtdd6f5vwscfuyum083cvfv4jun75ak5sdgrm2pthzj3sflxc0jx0edrakx3vdcngrfjmru8ywkguru8mxss2uuqxdlglaz6undx5h8w7g70t2es850g48xzdkqay5qs0yw06rtxcvedhsv --unsafe-dont-encrypt cargo run --bin namadaw -- masp add --alias b_spending_key --value xsktest1qqqqqqqqqqqqqqpagte43rsza46v55dlz8cffahv0fnr6eqacvnrkyuf9lmndgal7c2k4r7f7zu2yr5rjwr374unjjeuzrh6mquzy6grfdcnnu5clzaq2llqhr70a8yyx0p62aajqvrqjxrht3myuyypsvm725uyt5vm0fqzrzuuedtf6fala4r4nnazm9y9hq5yu6pq24arjskmpv4mdgfn3spffxxv8ugvym36kmnj45jcvvmm227vqjm5fq8882yhjsq97p7xrwqqd82s0 --unsafe-dont-encrypt @@ -296,27 +220,31 @@ elif [ "$1" = "client" ]; then cargo run --bin namadaw -- masp add --alias bb_payment_address --value patest1vqe0vyxh6wmhahwa52gthgd6edgqxfmgyv8e94jtwn55mdvpvylcyqnp59595272qrz3zxn0ysg + # TODO vector produced only when epoch boundaries not straddled cargo run --bin namadac --features std -- transfer --source albert --target aa_payment_address --token btc --amount 20 --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- transfer --source a_spending_key --target ab_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target ab_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 - until cargo run --bin namadac -- epoch --ledger-address 127.0.0.1:27657 | grep -m1 "Last committed epoch: 2" ; do sleep 10 ; done; + # TODO fragile + until cargo run --bin namadac -- epoch --ledger-address 127.0.0.1:27657 | grep -m1 "Last committed epoch: 2" ; do sleep 10 ; done; - cargo run --bin namadac --features std -- transfer --source a_spending_key --target bb_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target bb_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- transfer --source a_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- transfer --source b_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source b_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 - rm proposal_submission_valid_proposal.json - - rm proposal_offline_proposal - - rm proposal_offline_valid_proposal.json + rm -f proposal_default.json + + rm -f proposal_default_with_data.json - rm eth_governance_proposal_valid_proposal.json + rm -f proposal_offline.json - rm pgf_governance_proposal_valid_proposal.json + rm -f proposal_pgf_steward_add.json perl -0777 -i.original -pe 's/,\s*$//igs' $NAMADA_LEDGER_LOG_PATH diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 04b5809bc2..a1a6e93c40 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -4,7 +4,6 @@ pub mod eth_bridge; pub mod events; pub mod governance; pub mod ibc; -pub mod inflation; pub mod native_vp; pub mod pgf; pub mod pos; diff --git a/test_fixtures/masp_proofs/1226362759E8F5050B9954234033AC0E46C83B093EAF35D1A0537CE81D282FB9.bin b/test_fixtures/masp_proofs/1226362759E8F5050B9954234033AC0E46C83B093EAF35D1A0537CE81D282FB9.bin deleted file mode 100644 index 1764192dbaa796b80a90fc4d76da3a085cc69cb1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16958 zcmeI3RcsvFwyw?0%x*I?Gc(1EF*7r>otQCZW@ct)$IQ%*9W%3iSN7RCPghzGd*6qv zl__=q)m^2UJxBlgG*n{_U?3nM^uJ!e2QbQNW71b?A)vbDPx;vbMUk4%q(YH6Ak+su zpt*#~+w#Ewz7klDbEg|`3< zLex|-2_eUUG!ltEwq)cRXT%9(My=Z~kP0qlIH%<9MEy zg(w1CcQG+aVxch8&xf;l1j*j=W}i>CPXMf$dmz}*L2_JpWu<345D>xS`bQet?)X3*33K7S!*=<8FRS;>%{=$b zvd!U3v2<(95Xz($6nSNV?9`R)v&`j$h&B)_l*HmTJc{bUy>0BbLzXRKq!DY!MxffP zM|k#3w{j3BBL;|^N>0!YDfK8F7amQ0_;8E@m?0}+iHv-JX3W!le%J7Ie>V#+b%~ZL zK_af7p^uEqN|WhpKobif`d*&49dw~ulUcM*xxD{auD%r=JZZ-XGyz_|ZowS~P$-Ts zA2Fw7&kWYpMB)1>ug}-bDBJy|A|x7{H_Sg3K(13@c_D>f=#r!OL@)=vdea zYXJ!YVVit`t^<&PY?B<9_TUBHIx&jJ);9ijGkK=eI9a^q=s}E-=0xqqFEC{H*gIPnXR}4jW*^IZxQyjPMte`kzyWk_Pz`C}&0>9bghpSb zh6+@JsqN&!V5;O&zNi>+kZJ*w-;TV>%_2-)c zr23XGZjO5j^MHL0JR5d<>3`B*8d9S8k!v6-lvb;e0Bo1cx-v{xee8VO6tNsuRt?1T z$(wJGc63=ZD}+wt^#L0QkP(cP814|UqWr_9D@Uw8mvteV>8 z22f9RhtDqaJF{O-JUsApU6I(rCk!oRFhPg_t(+dCEfO|XTuR?~gK|8Fs!o9KJCbqD z1Xo?r3Vcd60Nj>?mO@fTqn!cRd*yb;?y#$Ny>bTF0~onDxV=41@Bqm;-?DUBXgP~h zm1HoNxh-+a6#3(-CCFqri;lz?TLVDWS?K;#yV<p`(acMlTFT_9@crFkplcJ zYx8@;#N;v5bo(VEk;0-_ej3&BCJU%1x3MqaZh z2&<#uEIz_FjoQ-zWKAba8v{iyy66pOR=Gh7B3~xWFNkP(-70E#Xti1m0ov+lD!d?N z@zkb(9r{>okymZd_Ou-hYalwKM0;j4NC0;Mj(OQ&;|`gDHJ<#7G|+Zr!Q-K>X|4pk zx5O|t8X|yeDhf4qGN-+cmg$3FM&^)u-2R6iY8j+@U#4rTNFhJK%<*Hk_Dk8&415on z;>u>0eL|y3bQWp}%WBksTP_FyFxj+AeA$B~AOhEebie4P#RVvV&F$B^{$mRJ+0jna2~f9tSx`hLcmoe%z%WgL z^kza#-yf9YyGA7X_WnG&V(_-;=$8%nx)x}LUqT}57nn*?$IBg765`%QLxTs6xe|C0U#0k``3Z*a)8T?7WsCn#n1AKNpnr$8jE|2GK0*M_ z+hj(Ti=!0{EJGjS=QjM01)lmq(X>t2u)3b)UpHDhYsVO7){xO+&>{l%57K2!MJe(=Kab`Ph+Y3+Ldg@BH#QgRK&2Oi5n=@1|~rhomm zKLq%Wu7Ge)wlwbH5 zoYqOKo<6Q6lz{`+*O;v@%?5Q?sJ4UzgrYGxz14aXLV;2TE?;N$dk{Et5s$LiOrJB? zPD15<2B4;si1aB%hjm4&$RW+vA&wP3NP-%!xei5VAY>pDO9GH&%sb5-bXd|?wd7oY znBvyMK3p@h()iBwkX;-uEARmC8fJ&^DT6me_iEvfj9gX>F5liVfRr6k^H_%rWyOU6 zrih$1A@3=AKKA6_CzOGG5qCa_)ymt{65Cdm@sU0x0NGDBiGU1@iSI20j-R&0Zm;AX zeFq_q?M5kjt>#7>sGwD{o077g3we2hsBZwrU)!VMrFU_j|1_q3ICAk51Iau z>HiIxbh78!zm;->jPtz@BNs0&f+I{lA%;GfxhnBzCwS`W0@yB<&3LfN^eH<%Z}{bC z0P2I5@YM@tMPsNxE-IoR6#&sCo}bYhjkbbBXE%Q8J~zs^RYPeO@1PC1ohs7g*ogpq zz)VhYFVxc#cO>x)Jn!E(#XHbzEl*7DBirEBp(AMlRi?C@^hQrwSMQb9u1zK}r9GiNApI zmjeIYGeC<%8FJax4j#o3F7~$(x6gycdL@>|+HlYY0Meqn^n1hN3xb!{)*6Bf*TN-y~D<^f4#-YGJQJaVDT zzhva~%s6e6UwFS%CFFj?b2sTQWsJH1_X7XOKQjXVO;b}9ug?yv835SqPM4m6RxH>z zdc1y=cQSGs3A{KMfL#5K@PBOT6U*dk!o*<;w581tiT7?VOWN1Cs8-6|160`~b zKc@a2>VKyGH%$%WOgMSy7afsGDuJOspTC}}wonerSBzx8grM_ReL(hig#TkxN6G8% zOh<_1FN3LfLvJT~GQ`tp{74etrY55r_fFm0{A23hq5fy;f78?p%O}9)NGS?GQ99Di z5@sQNEKL@1`De|$Nv;htToNvRNBBQBwHmNtJLM$<5_58c`e- zJ;lvfoc`bD)HdF7zoJ~Wc4R!Zke*Z-Fr~~QWdkD?6!+S!ekxh*T;j=rPP_Sc^ME*h zhm}1~+4xzQV`zlT_PH{_0>J>OY8f>7XZ#FzU<>jeQ~xu~{Cm~^O;i8f(=H$_!+-6A z`HxM#_lCblX&{hQnPO2N|4f(}*L78Hs0Kc2KFpwuQsGwi$JD?7-hZb4av2OoF?o^8 z$MLTS{Lh{)kZrWiuy3$GpPbII@(b$|xH8ArYc^k<*a*G!ITAK+8E-H}l_=4%ql=#O zRvyWiuwJPYA$8nrI!HW~BKdR+{NWo=yKZB8s|J1IaQF|IOb=LrMCF;Ab10!j<+#c! z42(N<;P<|MG|s0F!dp92Z}z)6-*8qdVta0w%wxIF!ZHJYeTfJ_A;+<2=$}#&H4B)MOkT?|Kmbb&yb^qthRU)oo%jc8$RAXFE@t*Jy%ctFb68%7*=x{%%Rp4`s7;b+j%M z8oSR;4=xT6F3jgctVZab@S0wu>!T!5JXZx(h{Uh(wqL?($7 z1B@2&x8HU_+VzV=aCfo_vPZ17FTwGKdnt6A1t}Ky*<8u3A(3V}K1qehg&Wq2;YEf- zB3ookAb&}I>^xFyG|Uay`P6y=?>)ZWCeV}aYaNFH1H;v7=*9$dpiU1>Q~}C+&fL8T zOvg#iq6UIL#ia#Tl`guDpSo*hdO~um*rh%#4SLZ#b!!DjF|s85P4IBtD)+prB5jL+ zC}gcBqz!A%3U3`Mh1~jm%biYP$87n5){&`LI)Anr^!tXwg1dS&YK?7S7(v&Jt!=IA zqp0GfT-cQSNqs&b-RN44&fj>)bmn#u1-ph>x|Ocl(^haJMG3vArH9jv4Q}m5!|$_g zxMDL#l4b0T#^yvYYIsr0(Ag7)Vsz?QYs+4}6v`s9umraxuSvou;PB`+C^PUMWIt~` zvtAJ*8RnOk6|74)_?*{LMOOT0HS)0`FWgW^JJj1z$|_txXiz2jMTBd|CpPE7u?#|{ zWYofPe$3q7ZtR(U%n>y@$-Uk087G|yd$GB~`SJ?!G$21L2}(kqs$q zCr)h0s=wfxdHbU#qmW0EjuXW@+%pfqU-1G%pk7Q9##PD+T>n%GUqm_I3CoSeawL)O zqq@jfmP}}DJE30_tlm_xyPIPaZyIT|WQBV~Z7&E;2ys)BR8t%1;gM9=ED()7<*Rg) zj1Rewf36bC^CNQ&S%S60TmH2@R?8XR~BpM$c&8rFQ+RiHpkM47h~v z7X0b^9h!+NN59ESZ^vk6^;;Oy`7b(~N=v)Rex z)3MaT*$W2XXVL1dY*HTFHQiVHj?R&$I^lTxwQ$jyaBLhS;3qt_+=|o7=_^q(#(+RJ zN-w@(n5{h{;nk#vN0+R%+B^HR7rnuccPht3HjZRCk_{Z?HX~&?q3=q2T=yQ?DysU7 z$lZ2WB4RpYP@&N@)HKAlXQn&lc2bgZ)2wt zeLL9NhEl>>YQ{V%O;O$h=d%t2+zI)S#GC2D1jSGHzNM&IWM0ivor`q!>o-q4sRPV- z_b@_evmCc5Oz`M&J&k@N)K{LM9*uyACoiR;Y*CmBI&4UnlttbtZJPB`b04`UYuD^= zlcC$q{k9e{?~D~D&~vn-&TH*CMvIX3ksLqU$-+c4Td=IKs znl%4R3`zC=Zv=3Fk8JxbC_4vS`_DDc;l{`+ee}xZ-z*6`13KW!ZaF#lWR5sz7uM>X zFL_zj97`BIY|9075a424Bcodu z9Dx#CTQ=#LpEy!K7n87t`TUdCFFOo3y`1b)ZP-P%lZ%D28`rnvEDpq~aw=c@%C2R? zo@Y9Jk}ddVu6<5u!E#1VPKb$S!4-GfsAO!!?(@edD+#iEo*uqo*ry=4hFbl8Bgr8d zd%hv2~f^I9>NLr(=MiNSDSQE4;%E$maNyY)GV^@|; zG||s5K>y7JP5WVl>< z_r|KBu|dmjdV-g_!cU3vSWtgtFJhu!th*cI#Oku=ZH~7;Xqb&%C#vZ5H$&H30JgOx zw|ron=S=;-v!8pr#r0hu1ml;lzP!0T2XZjheiwY0h*H21j~aeqb=Z2g?%9^Ebe+vl`KFC~ zSITp?n9q2SmlA>gO`x)NRV1*m8<2o=y9k!r)EJEog=>Vt;J7d68+)cW?eVn@8P zN3_;0%~YFODoe~%8kVabH2+BIuzO9{Mo#oRWtcGG=z(5J-2Kz}xZ`qp{=Uq2b|+EX?#ii9!^rkbDthu2wQ;35k-XnrNfDJI-50DWg>RS02$_k1VO<8Cdq zOgDyEH=5)+igvm$GCb+qZL(KCh!d{yyZ+&tQqW8&c^rNAeh_~%=EoIh+C8`1w?|;w+iqg8$p6{Ig(p^h8xXuCnr!=_sfC9Uhq#G0?8*yg{SX%z zU=Y+DhIk{@My4ser^QoIoJQ0NHH`Xl`X0JkCN!!5w3=xOMa9?kLSJ{#JS9Z?B98H( zbOLnibGnpI??Q$qi!8Iu0kldt7f(oA>^$FuRsY%|uuKDTEY(B9UzXsF_vAe~le3XQ z{3R`YLE_-JW$BX9!ys&~NR!1sxA=343f&IYAYJJwZzyS)4r+I+akVoj-^p2RB%KZQ zU6xhVd?jy0L;NU5B}IT0jW%qR%vtc9>#ejvRe!~hKN7Hg0*(f8*0?8=ZhS;Zl42D6N5;{a+AO>t@6Xq?RMuFa+ zI_V~NDJo2Dc|Q%h{iD-mAq1B2B=cygEcGyf>kPb^N>6fqA5UkeE=9`}a6@?b9lrHV zYI|yEr=r-5&~>k7W6vzZSrZp34&$%xeMJI|%pZ7;uFy-`&CN>CE{u$t+iapRub}cB z#~&b|Q`7Q2%i^J!K$m{rJf=B$Bu6snR2X#j;|QFwgkJXJRagQ)ptjRItr%}uLJ+|l zd_1qV5p8=8;?g;lPco1+n4>ypQfsxYWv3bC|E}pin@!D9?4^8P&K#s*R!MIv&-GN) zodbU(a5A^R@eZjwOIBm6fU4TE%oIYT_kapI`W;ZZ;d-$RbAVewDpxRNKP{g`@M#JT za=mnE`RvGyXT~G+s!E6gDc5U6dc*yi9qvSM0M(SA#0N|bxxP1$Nj_Bp9OqbE?ogFb zk6JPiJI6}=h_aBr34yI=cr{~Qq+2xd2Vmh;3nRG3pk#3qJ3~}%Rczxd!8(lMhIO`o zp0)?!N0WskdI~sHpXLDp$*mFwNjGr8I8%BYZ?I^hX9#4`xU}4Lml5V-Ml28wu(qn! zQ+KC}!CVx^LHDsbW5%Jfe1{oFSbT0~^rQF53x)0_0NmnkB61RzK5=I zqF2Jyrlqrf9bBk05X~mY+wofVUyD@OYfEKw6`ndS5{b-%N!4$LiB1jI z#B4m4YN$8pUqm)_X!p4a$a3o;<3bV1rq=3A=_kup<^%HSjxYx*Kz(ytAFx9IzkZLwAIh;*;O&F(b32X^wM7e9}wViI8(X?%$vfub5WP zPD*Rt&`un>in{4{U5rTRZ@HV;EYU} zts-zWcfuJu&HLhA3ev{PM3?4>|E8ar@{@gMw`IF{roZVrhmSMY^A0vP-JEbTxnAh z`P{*hP)nE?2xBac^i)KQiK9z_&>TQJz|gfHF%LQvgJV=6x^m63J|#=VExeD@RXfd6 z#1Vv`Ib=4etWx}gU;(I0D3A(-+_IIvcZ4(db(x%x@8fuJ>l!t6D4x1)Y&hmL*ucq& zCx?Kbf1o=?rCdov=Pw^V+SlgP)>)TKZ^D8-qEGX;_KUfO_bJ3kzbZ-5qhibve_`Zj zQQ`BG?WtHZK+e@;w~TBp1ff?GH|eYPs%2z#nhQ1_&WJ_Wpp@COF5UOfgEU`w z%($`r;#!NV2p6$iVVyDJ8ar*XtlD}pm6*&O9cl)lJFYj<3*7lcv}6DsAdVMtWhjis zS#7)TZO2T}3(ckN^G)SN*bbKXfjD8d`o$rIcCQZ5l-_(GrpTk&49J9JNbR^+r)P=&9|Lx#eCMu@{ct}w8S{$8;T0LU*QyRgoZr>`TnUpkH+ySrdH&$n3-f1 zl`(VrnARPoDBt5*gT{z0l zmY5w(=Y;HJp@yQLgfCgKm)a{XA)RK!R6#`zEjW+KW58dJ#-1BFF36`_5`q?cdwjw- z(U*_8eMgrRg*5^fa5689W4si|7C8+SfV%2e9LP}6T6}3`AiOvHdQw;-!k0pL1CAjOPV={)Dde`wX(>2YX;jIXWCb{Ny)gV-A9kx94&`?Gi;7;q6& zRT^0IL;>vFE9Nrl6ohKpfbb~b$pzc08G}St9{ogG?oJ0%mC>mfqq1@=LhrXBW|g2O zoCg}3ML+j-bu(<^x&Y|!8Y%y-e=^6s2oYYRy#?s`J)i~h=H`k}0nrgPJXeyUYqGO_ z--hg_#hHzM0e<1@2a>)%ot=NQ1gQ{^t4VtkJba;8rwYg2+?;g=PIiKY;tL5zu2>4G zZLRaDTRM0!d+hW}{4vs=KbXL3@A=P4KYS*4R?Mj6k74i*h6fbCF)orS20EAGh$`+! zjHuh`T_-0nTKC4r_8TUoy>mYe9X4kVA|j~I^_62pq$!rwVjjh)P%X}qtgn&1+;-@j z!GsM_$-GgU`817af3hMZk9+Iln7LZq z)G(@Rw6m2*69<`*el(|f*7}=!VSq$S3If;ldh%4=1IEAY^%IO5ng%{&N2 z!b-QUh_&N4{r85FM3}jj=HX(?+iU;XF9ik1Z+?Ldp}xumZ_d|VoQDcr{JusaP#nkUpaWIS9Ee8)(nF}{miFl zrJ{ERaqmXOot^8h_e6$PHRj$CvHLI*@6S$>&U zsuL#p80dY#nz|?f-3NicLV`*ZN;tfh2}+Wib+94>Z&c&$b(ChzU)#c`li=feKL-0D z2&N8WMN>{AGZkYww#lf_qPQN9l7wqQ7zSY1M@~bA`&XS%eu=h>-aWCVwqh6B;$f+@UL_-*Aa_#;^3MD`=_Ee!RDy?23O3X zp@lr2R>j*#w47m`i|~&s9B5esk(N}j8cPm&H+wvRtZ6BN zhi|szE9T z)dWd%a1DI4@JjV4AV89|u$lbnZXqP2IqU!feO{m%J-U??TM0UgC%KC}_Xxf#h>9pU zii>fpTrLe6*b@mxG<7=65b5V(aqeq<;98N!8KhBW19VMK0BLYOiMF3wj>Vov%G9aL z$_mG^?J6xO^|mD;9pCBe8?IiuR%CDK3||h}lw3hnfA$XJwz{N9OIF-T=V}XI;3en* zm1{t7e-_!G?&K;#cgOi{b7@d8dy#QZ)&e!FHVFd5JLo?&)K1uLV_ zvn#m*kUxE|k-&9>4yC6NHJ__FU%{DSLS8DopVn6g1QJ!nyFk54_5Fj@K5Slo58s%L z7cn)P;91G;ml9S*Cr+NEZ@&9|o;v33JLupyr5Ebsj}8vsHcaS|ck{93ZR&&*ObBYT z`B_-q$oo*F=P?xb}D|PoPix9Hcp?UOj*hIs*DBRjzxu9R+`MXndE{gW>M{P zB&#Q%B9>#;utv7m)L5eo0}J}CG;EyQcnS4`U$-D`t$IS#Fo;M8aA<`**FFmIh~a3zAMjW829z|yorZS` z=LMA{Q~~rmdJ;d;X~RC&4E4Y(8?);g!~SOH>!M`jiff~G+<~+h-X#?R#$zM0Z_jLf-IKZOf@ zv8iH&E!3}y;P2kEUlqY$pk*o95o+sy!j3?)2rzLiDn_ld*Lk;u- zAo>W*4+W*yW8ST5ntdnZX%9pYUkB&FaY^gtM?x5Sma|t7^a3E^HG97fO%V;Q3GW=8 z7Q-V7YOwL(c2Xh6Jv}Z>+AB4)YZLSWAiZ4^lX^sH^9rQ&gXP(NuV|+^k zbjJWR=d*tnB?Eu1Sg7AKIHb>%y>pD=gV38zfmhBe^c}z4=K(Ob=%{M{GsRD$3}sZ< z`13s)&J$p$1=4PqDhg)QZNeU(xii-KDhxNL6T5xkfZ&S2@@+#p5nc1D>+w{YLX>>! z&(-(_=Uz8#)G0G3uXFL2P6s|XkLbR8wo}Gyo%@r%Hd=q(I=pa6vcnOgR&z_bEBNqu zrEU7CxQ2`KTrvb^`>S8#c{R3!F-PAjJ(>y-n$8UaSCF`*SYvzvFG=R}eVh&=w&UC* z2OqS8afQq3o2A^)AHcBKI0{XAQo);6Abp0KC3t+&_G*<%7>tZW5tFZNZV!N%e>k!_DUBk>tMM;|yBYaCd)5W;LY=eH1w9dVaAr9_cIbU*CL4 zTofue!39yve(U!}l`*Mh&KFjp57vqJd9MNH!5^T^@Nz^P*J+{qUg%%B(7?TX9gWDl z-T`_a2xKz$#1sDxiW$g95=pNmb!S~*pq(l-ri>;=)e>AUV-)|RwpH`XqN!aOXsj6EXANhRAs!h94?&m`>x|&9` zg73G;Mu5c<<7(^@(ZS6rK2ivl2=1-#Y$9a<8KL{`4OVh4?lJn{9oE32)hTjhl?czs Ya}+Rcru$9usD;;K>Tn9_0h8n$G(5oe8~^|S delta 1002 zcmVDesq;T?(}6;GJ0A&$?ljj6iL3u1;1rj(E|Vaq^z0{Yb<2 zRZ!sAVzYl1B?EteB%bahyAOV=K1~c8E1~ECI!?J)a8J%u;9THG77q3h|4jmv>^N2j%Tq{f-vl2<6U)wr)%%$C6n(i|cDRpzu-qM0 zR3y?U(RSHvjnB!CZ%=<&fIBH3bsIJPYuDpXGn9F^^KWwFj`$N2uyJ8Hqp*Eo zPqE?SlW%{kni&~??X?R?Z-vATv{D!V79xIHf5kRFuRXbY3pdJdB?MukdLyRJ;yRvc z@SaXYTfLWC=OfDJm@qJ^*2=KgNuzpdpLcbkp@;K!>KzuC;ERJVLen38-!9EFSW>oF zcHl!D$8EyqzFH8zu{N6Y6Lf4@Wy;AX4>C;Qlr(>v(XP4SduzysR)O1wbU>SZHrtR% zSs~B4#Qy)6GfvDU+)|2r5Dcu)^oK65j^#n@Chn%L;b+IOCaZlK1uv3if(18xD2nb~ zn2}q?o8@$xaOeKPY$Im_WRx=GsGjQ-^0WyY#P`ri7T($Z<>ih8-R4n7mDwylJu0$e zefoduiW)R5L6MW3<&}?a$Q+2nOu=r8RM05VZ8uxi`JeD_T3=*ag?~{TU$81v$yC`3 z_&`GoX`d_=Nbne7Qeet$jj%wypfP)Bm5$`dTrla5Pj#&dauc8u6ZNo_4&Yo0yi6HhnVs{%c(n1h+DRk+HvcP}S~ YSVrNE4_G+MjtEdT%j diff --git a/test_fixtures/masp_proofs/37332141CB34FC30FF51F4BEE8D76149D3088F539CF8372D404609B89B095EF7.bin b/test_fixtures/masp_proofs/37332141CB34FC30FF51F4BEE8D76149D3088F539CF8372D404609B89B095EF7.bin index c702a4e9b7d40dc5649b26874ffb844890a78506..1fbae64602829c00c5f1da4fa116e6a3dc28caa9 100644 GIT binary patch delta 1002 zcmVe6>1H+jGSqT`PNLuuCrGV^a3DLVLmyV&#ydNPhCsu zM2L~U&v{E|(5ctv@ZIaruv#;-YZLSWARl*%nEx%fG9+jZOQ2|RQBwB1w6)HeI?t;v zi!}V6E3-~B&4&sKlc=-Npnv%n*-s#fNnN@SBwzXwTAbL@YsCZpj>}s zmMD4_iHoL7Mcp^PB2n~iKxrCIR7G&rsxv^O=upSz%T<4y!GEG5tS}vI)@n`s75lT@ zp&JxP75=|#uG63X04BaXv{mi?1;PP8KK+U0lLR>b=BMk2NY?LUSN)8bMY;Owi>|UJ z8}_M>b7b`U5b=7q=sLOYsQbV36V|>mXqe8L5J`Fd{(Fm)WHqA~m`yt$gOcP@-F6~* z@DWtVao2ys$941zZ7A0@5_=@>=%xs$(@N+NmL>KNLuP5#r2RsUXX z%jE3-*wdFoiZ0R`TXOkj%_!<3k#;^Q@tye^^e0mW@rB+UmV&V1I2&j zg4u9pG$u(HmsxRui^s3DqlE>ieCZ(bQ21X819yLtK*zbY2}HsAUE?=XBWALdePvmZ z;0OsMmb3?Ht!E-r33Iya-{6K8^$>owT3iss!|0~Zxyyu6f@Jnw21t^=ftP<6jTJ&jhU{2+t#bE=2NVa@6i3n65c(VD zfI)W2o>X~<1Kter1^D7~_hxv{Zj1nk4VgJPpyk6JZKPUH7E5v(JxOB)V=rYI@NQ?- z5K5>zTt^&&x$^B+HWZC8VOPbN~PV delta 1002 zcmVC6}N zEapxwnp>)jr-~G`oxDz)P23KuyUb>@7<^x+wFlZgI=DKb5ZoBe-?^*KPINBxHY#X7 zp;w_2IJQ(Ft{4m%|MHQ?DXS;7IPNC*y^x^#r;0FU%M*WIJ`X+{Z8Ozx&q)yJU_18{Wl=msyXXpnXJLB3Z30&a9r{+2B ztHyP7Ny%L}Q_J*pVnNb$pE4Z0P_Y3$Ozr8%Fl&a6!3e{W#jO-z5aiDjsiF9y+r^GL z6*9{Vvj%^=wcEdc(@e6`iZe4r&Cu1hb4vhH7-oi=n-=(pX~>np3k)z#f$&wqN5pP5 z0y>}|BIXhj0ZSl;_bUoNnw-YbEiE06}ct9mmI+uHW8Qwll`g#oAqhmWBGGgPt4qxG_ug z#UlJBzkh(*xYNIxLz{p&hhC}IdJ&xI5Ju-}qU=OJUJL=Cy2@8`M&ZpC;-H@$_C&K3N-6R+R_;TlVKQPv3Q zV~HrDDpb+!#wQGUh#0>`d}o%EDxs7^{(#NOEK@&>AELtPs%KBfH$AmjcXGm}!`?at zN{}O?aZ1h@UEN%yVTqrn-*;71H6keT)z5!VCV7sI8+y{U;b5u!oGYL4%P-#Gx_t7* z4?N(b;2$b0PPmW50-tH}ET3r`i{JiOGv8Em7QrIpgV6JehH;>Md2OYK9RnjiXxiV^ zqG1nCR?yxruNd?aGiPb$HqZ@=0&_FjDqCc^=17l$0KgbHz2#E0S7n{{oGt9;S-rB9X?z5e?{VCIH Y`ypZ$xg1bux4+M9IV8Pwp0|W_f!MPCJ=H|Vd zs+p>(x-&Ij-qg#g+Gn40YE`Y;Uw(V7{~rhd0Py}?KLhyZA~<*I*JGQ~0Df0N#8yGN z&mA*VRaNH%U4;x7y5<@70<0oXe&!Z9(?6;RJx8l{R>hzZbY)g$$EpW9(rpuwr|Y9$ zOw(Ce7cxI7+L9w)`H(N^d!Lwd6%4BXEbzZ6^0Uy-0J5Cfzz>hYf6nxtXSky*Da_=9IuD+YbS23&*`J_*^ zY2jgmVw6Fog0*N}dE+0Pt)08;Msjtmr7+Msim0#?3nV!9ua0Ig~S4rx=;B^6Mrb(DxQu>>H@ft zW#`t=u{F0Z9byx9xK^pK2JM{d0M(h-=)4(jWpFJGGa#?VEmF;ANe?IPPnV}SE!-wv z1!%c*LJq+bw{~cT6rQ!IAjqcgp_Hx_*GU4e=V6L??3JiwpnwA_2muUUk-QmkKGX#c z2|f`86ppx=e_rdy7@Ij=?)qanSM3P{{0Z--0nt~A;|GY~%cv`na*#mv^1?g_%maoq zl`%DA1xGxX33VJ_)P`)BKO+=PdiwEhZ-PYnE_6 zexz16UbO1$L4}dzD+?Bvy_iXyliKT+K@nOiApS6;yqQ+Zb1n{2OG5QLVGC z=afpNZM=Kl^5eSg1~oFk_Knn8MBFXAtCR)_cpldJMaoI$1Yc>Bx2knX;qFb$RO&({oQbc7(FRwyTjXqfWRXoDl(QCl(D$GpDZ z)xGI0y29jHE#i03gm}LuHYw)m$+G~`5B|Y;$Z2*IA_R`$*48aEM!u=pxpkRYpAI?j@E|pC#o~z^GqF)5fTILA z@p}w4%G%oss3p7%%J%H9H~?O@ek8FJTXetjgFVL-s1ZB7H=&ZcU7FFa~EHg>T)o zSk7qPRJ7HXL9Xj8szj-g5e6;-aS?=Ow%&l5r`L5H)JRmW-wn`Fj1+_10@WO;ZLhd_ z#=!?T_DWn}ks|7Mk5&+y=(?HYXIzv~IZ4BxlkS&StvC`nlh3y_!5-1v_=y(JZ!NUQ zv9>`lX4DRKKm5u8+VPpgof;2Z8nC}jbzWzLGz4Eu?|7VC@Z^}$IGEOeFqd1j;jz)y z*B%Us)}B-nm9pNO4Gy{w5m*X+f5xREp@mI6ca2^@ZM*Ldo>ft-N}jS*OVatgD5q@e-~yp3wrl&KgT3d9MrKlDZDH1F)DTI&oSefd%u4 ztxEgq)@hCHNFjF$fVKDe*E9vIC5`nBYR&chw5sM;I71y*{~F*tG7uNGr5c z*yW-1H65(RP(H9&WU$z<7`*yD>)FOJci!D~K$2poaJGF^G7Y|K2Z9lJN2KNh9zxMw}6rO1W6AIf$+qx;|5qP`FekyK#+!?eA;pRkhtyG01yZ<`U|Cz$& ze(0GM`7<3F{EcCo%4pZVe#r|yMHo*2!dG%$%oWtWUHjkvVDq+a?NJH{T?(`$E2dWR z3rwM|;p_1@jiLkmrtohH|EBQ2F@>Y}sbFe;5xbkLYg1929!{r*3WFDRLKacqw6cqY z{IUN_AN&(r|LPQ8^P4`RE+sI>X_no75X`W!aGt+>HWI3H@0ZGQCh(bK4M9XC6t=MQ zAcgj8@+e*UZZrLLY+(A9#uLh?!H0mP_FV_u_qrM^NOxswN76BEb2Rv^D6+j z%)qJeqa7c2DrMXYR{HbdI3u`J&hQ777JCyY`uS0P)*W+lF?xqIb>FKR3bF0pojos<`nRML1{>TOsROSu^=N-gDHU`~>+`XN|tbChJ4(F{vTNQe^}F& zZkhKjFTO@>6$ih=HKuYI%WUWmk+hiGj9uK)flqs-h|dVu5i5twF!2W5-2Nhf)S)@f ztGcnPq9UG$tTY?}Dat+Y-fd%sEQV1X9J9+v7Vx-mu00i(}KI4tM z)}yhh2;QkXy{Pej4k>82X+F+E|E68xR`UIzjd*|l;g0~4H}GsfvGiB`oZ3<8i|7hn zp7wtJ`iBw>{5xd*>csv#4H!O?09u+O`1;)59leS-Vky~$2}Qblc$SIsP`4CCx1(ak z@5TI`X<^UkjQe6Sj{n=XvQuqV!#Bqirh-UArs=-3nXJ?=T08~6Jc!r0xt>x#8Bu&( z$n&I&SCNV_6U9Q)*t%duF=UK`?WyKcRKxmPKGE|kP+qO=E=w;b3}SdK_<|qv5xLu9|J#9 zgI#D*xOJ1rl2%XM<_$q z2g(`fp0OfM-=*Y0eTH1C)#B7K^?JizxxvK-?n+?ZaaYbQ{wTa+KbqLlDOG>Ytk%sp zA5Oq>zs~ZU89mL8DNdmFpc&zTr;l%fyWVJIm1^4zt>23|8V8IfwbvJzIyVkok|f7y zn?7EXc4S|}+(lDAyK5{nX5Z2VJ|S3?Tj{esTdr;9Isr9RWEgz_KI%BPWwvOZ_m z42O-psxj5uR>;u`9Aiqi%VnYk?zf!}BHt1>?k5mf;L)$$!Ru_l=Xfes?jR)HEn=)E zX}^Olen(Z#$@%Koi?GN_Sb!N*o3B$ z+rF_*^o5DiV~;Y{Bxd0t&-&=bC89bBwT_oT+j+$v+Scs3Hl0I9c%;#A*BM(Rlq4Z( z`~7>nFnRx({w2tis~$b5wDmfKzmFq_AC%MuTQXMOcSXw!VGb9Fe9bWT6f{u#EuVU5 zoAZjG*dm$Of#emOl$S~lwOoyi?5q5QaW$!##XIAfXL6$}K|>(kCY6J5zvIsc_g`uI zF_alz5UpaINj_hgjP%1{udPSTeiN;Yw93|EO5eXEJuAc!>l{KV|B)QXD3AL<&}s9w z!}eBwDlZ&maJ?&Mf1QtF#*8;}_a(htbws0upPD=aZRCAzW~QnTBMmbz*VicF!+EiQ zGt7?_#1c>#se`PQNU~fC%441kr`yJ%e2&j{n@mbSPAhX@jy2aT+vSh zV?Jw_1{e@>R#%3Cr_yM4E(m`{_>0byt|y~N{wmxUx^k_tq^O8vE(%-373H{C0%hV& zD5uE;68`dHHl?Q6yX*Sn-9?72IvdAbC9-q>Xj!W?=_I6}94!o#!_f)gIRonC)3| zDxK3G1*3+HG_SlN7wC$^qmX6^aGwc-XP}Q{vNJmmAt1Y8F1{+%F!yIc2|&W1>kh8! zKNb5NpeRQFavQoQKYcwhl`>z+_fR38PKiO?!Vs9>MfD_+!F|EGOTH~VEixOx3k`&Ewb4*j9UI-oCa?*!iD7JiAx`?HEL*TD zhyleKk4|;$$tzHjUw_Gjy>fDX8@J4kl?!8nGvVBs*s!gO9*n}eoXyL-O+Oe_%f^K_ z;-#s!PifD5uQ)v;C}5oFG9w^w9VVJt_r)XWMMhbsrI8I0W!ChOV<2u&j7@T!X!W7z z#g=7uj-~_TmV>PRZG_SFwgbc#L9!gx^ue1O&obQ$T&cF30Bw>v z+E+N5$48SFS91bv36G6swnZ0WT~Yg>O*sdb5=8V_y`ou@c|JVHEYr&qUeU6ZxI`bw zo<&b*GUJ?X~SoC?*O?EX$%G~ zv{33yA+|1_JV~v-Li7FA?ljZRiY5ls#X;`LQ(bt2k)sg8b2Zd&{^m%@btE?}*4PaB z$ZYzq^z3Q}9(1KaxNI#0oNjjb(h%OPYCPShd_5>h%~RpvS@LpBj<3>36e_3Cxst4g z&I&vNty!_$%3~H0fcefes*la(J~G7l4rLL#^tyzIKwA!rMdP?>mRvA`o2dH&0=>J1 zkz4Nsz0MP{@ewhMdiCuvktpo3!apEH+N97|rwxj9g3D?~vtC(yL^kN?R%@TA8x~01 zubq7$6c`UKR{gAh0HRkG?%_SIcgE!m5~`jTc>OT6q?f6Ay!{B?$IOP6l6GCD)sv3z zI>#}8h}`e%kL{^2sB+6ou}y~fpm(Be&RAqJ{MhWDfq$D3F6h)3VYEoNr7swg=qQ%z zhEb6}jU5J6bX;`~2)R3;_#jf7Ax`9V3^S8d3pR}idkJ(s8~vg`>-z1d*u{hjtF_Q%94{_ycE$0ne?;~ zc7UUUzW(t=D|@u(({*Wx2c%i&VAWp9qNZwjLs>%7KgaK?iL zdxea)#t#=p8|=A#p8Fc5@b=EVee@3-?VvsdHfhsd^#X$x_24>P3Ip0e1u;p&`^wT} z2Hl%GblrWy*RJBMBEv9jIKtu$+ z&)e38ke;bjr)%Zx1pjIIukdZ^9ekOJcwvV^WwY69h<8h+6}*E^Js8w2d~XhXL3hks8o z_TfY77EF8pWW-}(-{}g*s-+znJCG%vuwA!NYnq`f)!Jc$Vvpbh6pK-hUvaj38@I+( z4t57bh{t>fw$%YY6Qnu|_|HzWr<1pf#$h5Wl48-zS}u69mN^BkUDjja#E#WwOV{>h z*$M>NCfbL`4G}s>+|ir8P_q%S{HVc5mPkc_=}%DNKol?Tx>KdQT5EnB6pyBrXt_9o zQqsD$kH8JY?dFfwfdDz!lL|7iRXiLd@9wjlJA}WiMH@WOZSjtWir}YEf1+%p6kwr< kjJgN7vBQF=y_$zn4nhA}-TyVj{qkLZeg9%T+FvjK0jR0%X#fBK diff --git a/test_fixtures/masp_proofs/85BDAF3AC6C282F8C767109FBDFBD6BDE6A1BF0BAE7D72FDACCC888A32094C72.bin b/test_fixtures/masp_proofs/85BDAF3AC6C282F8C767109FBDFBD6BDE6A1BF0BAE7D72FDACCC888A32094C72.bin deleted file mode 100644 index d4245c57e2ed3b88764874479d31af8ab8ba2edd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9184 zcmeI2RZtwvmdCLm88o;g3=9_BU4y&31a~I{3$B9`JV+P@3vNSj4+IGi7(#FccMTAH zzwEuK+J~+Bc5CE^-kw28`fGvz&LY1y^~;b+DFfeNw2s88$4Yjz zobkO>hObcFqZWH{W{`+-@M>7Ii{LozV&C6Z%=&2`g9ss* zY4GpNk9i)elpf-NpZG$hMggpfNj zoe4p9^GuIIPROKI>#@mpnn>n{ejiKMu;;)Ot@bHRjTYpb(uJ;1Q}M{uOf$^h;c`0--T$zq|_11_+N`0C@MAQ`S70Me<^*W{|;g<9At+btgMk z+g9Auf(#VVx*_0JBml_T#svC(a1a&QO!q8q0XSfwpu1)s5t{&&F9`Im-npR zk{es6o{b%7!Ny9LJ^g5LGlJxSQu62U^lvgdy>eJ0^TiZtaMiV(M!qvCfL2Djduyi) z5Jrn&M?lJDa#mjvwEg(marHPh)X78U6r6I+`C3MY68Jr?eM-h%?ubNXjlZMi+vIy` zyUCyBfkM`5(L+7^!;gS$KA4T~Vn53Eo>N;zDGn6rMlidnp*^lv&5G2q5}-2#f?t8` zSzMCzZnuoD`b#hI_?AlrU3Jyo=uudf@%2G%fr`D~kIr_wiwlthH~bq{d)Rkbk1d&= z1Tz@mseh=^0|8xU>^gI8=K|e=S~bZ$X6Gzq%^5zP;Umm=3@fGf-Nl^w;fTq|JwHDxv)A~1v0p45 zl}S-(fGvW4L(TFoPC{C#qTvPp1J!%T7wwspc9L_iiHZV07yyMGKRHS%ZVz>YlCM>` zmHGna+l%Vn)6)aVUeVt4+@2O=u$1Js->KTkoY=`#rBv%Vy z{z1(8Rhu1L_o}+1sY2~1%(hkngo!qI9!7*F0$=w4vQ8q5T(v0Gf4&-IpdKj$_=IV> zft)UR`Nn{Igf1#PK=D%0t9yGj$3z%B3!R510g^G?ka4@XY|oX>oprXZ3-n9mB}um> zxVF`QhQ9%bV}0DE9ZXU)NIwSO->ml|q63CF)-osA4k_eIK=0=?{hfQ1}mp{~uErNa*y=8zDcL*?7U)r^kk3L$ld+ZSc9t ziiL7yepS{3<2R~)`+s!`Te@0+bI+YDs}SZ3Sa&Yl?w9Ay$soEFP$Jp8`QgDeMc>J? zy@`6bpx5kg|%9?i~yT~M#Y#NRc~_m$ebv(>I{X$p(fG`@ZH?OiXLM^bb6 z9+#WUZnR~lc^#yR5eXp`Kla|haS7!eH=@KUe%qS~ddw;6pS$0xHhq7rOD%jyg>{7ma8cZiKpRkN) zE}`ECf&v6msMQff0IL0C`>JE02!-~cF?%f7kb22Or_voqY`Q4$f?<_B&ohk@r2QI~ z>bSVDWl*(S@_ZcGze|AJNxg*~^Plwn73x6wyMT)IUz7e0eYp=059q<{l7&l-SS0+J zi7#ZeU{h3XpR*&rltWar-cA?<%^@LCq+avk{&^$*QR@FcmHK7j7{ACQv7Y4uy4&lC zO6@#C9{&FYrQXV4*`A}~;eF+1#!)oSmBn$Nm2MoSo-&(-XB$Nj z$`9;*texU;H~2lFg~${nO9w&n`C z3r@~K-0#0Z*uONj^GiY}UgQ7OJoDH(k2F)?utWaasa%CGV-W@@5y(t-Jj#KMJ88Ls^qN zoTL@IzlosD6r6DbeqrgKhVqwx%vj1$;{CAmYv|u2!N`AytluxO|11WKIvwW_UZ8X` z-!n#Z;_6kOf(*M(!Ik2}-JZ^{xd~1_%&4;)bg$Gp-erpvfqxMgF0n93_f29nRygA; ziN1(3J}fT0Ar)K-_KNXq?Zv-_DY#PDWLOZUmQ-jqsNkl}j;={uR?2^h@J~{x<@iAK zd|L;@ie+K{?Q%Vw{rf}5@x5<$qn6-IzN09OR14IQ;HC9WuH0(SPHtc*4&~I^;tSCe zPc%lfSRWM?F0(fF)DBb|?01VSLVS5O2$_;n?DULL)-dc*{B?LDo{rmwX}bBDj@JfUZ*?C&a!$7t#&X7 z=j9VUdCu=&Htq26wvpoE4sz-|@Q`!*3Tu^Ju8MR~4hNhixvO>W@@%q*Py9U05Ss-Q z+gd2cTW<&DCCF|CL`dUby#Q>*L3z;OP%rPrUA~Ar2RIW>jx$S&P~%=J+8y5~fpOj@ z^O0rR4QuRy0MAwHCN6K?z9>kk4k+ns(;s18Sbd*ifF7Tew3_X1gJ`W3^HET0>5gsclm zPQRgOENIQJIct7{Qtg5Re=mEL>{Dl?>1pJ9hCqyK6eV&8kqodjf8`51e459SV^y^Q zAF~;5XN_ha&ZmOQ11Fcs6w|OPCoxOkHx$$lM~`UKTbXPqLG;7Eu;e-yvd|;%cAS02 zyryX0O(nG@W?Z>JHQI>hdMH!vBBR6@Oess*w#lAN~8Yv}?fitXBQKN)u^tYZxDauj)wX3Nr&`rTWZm}IAb6TfL& z>y06(Z6WYTJlI(8l(nf;RovPCZXm^JyIQG_}ZbvMq2WG@Y8cD3Wibm3ofrd7?FGIWM)O8JJN1PuP&!+#jns9tj~xZ$Sy*F8g9*>za`43PzP?d z4c1a*qL008%+J>pVWwl{=b1?m+y5>ZdWx4>P4U7Yj>=WuUOY=78|xk_$L+JauLud; z=#a}EAZ%w1gE;VfiQ)A(RJQ@g4%tZC$XW#NDoTIudNc#PsA+T*Ztb4r11nW{b_}R5 zE4!et^EnT_OnfU ztY9rs5<{i_my87PArH0Fqb1dpWKva%H3M$TaSW2hxdIwp$yYy{4!7o*)?torTPjb_ zLK5Zeb7V6x!Xf&&SXt*b(G7ia*@7@ZELZR3$b}HS4)UIF9)k2M3U%Bs8*{)CggSRs zAz_KIJ9Lu5w>h+Qr5nTJeJn&D#7AddeKpuBjB?|e81nxsD?-%pCeBg^DS6$rnnN67 z>fRh<-dGsNWbD>)OoUZ*Eg&4@h1||wS0X=BnF-&S@g7c{P^F?Z5ys5OTMOXXmLB zKtN|>@QFmF=TVfIG>%sf`Mi@UdBDW@2S0kikM;|G5(9X7hfx@qR+1rd^fvoxX-yqC z-+)u&pleng=55ulkED1vNxC?afyn%Nz=!6RmJ%6(Vyh*F6R{{vdo^QNUp#Om89O3? zE1XQ{%B*dIv=P>A`xEIcg|#W^SG$4mPE@Lq!)$tQdg&214V(`7eUf#kphpSCYG3Ns zB83>qbGm>yWG;xD0~t2%D8EQ2oIbo~xEb4&&D4EyHc08Z`4VM{@FwfJ{J7M5kT@zV zj;Di;w)W8cIysd?j6)K4V~QeUK~=tZSs2#^fzU6TdKy z1;vtkb9~hamiRdV|6&>nz0Tbm)hZxDHRq?RbII<^k5`_A3kzA~d&7kk9ik=jVN-q? z&+{ts?aUp?Y2HsBxP=jgCpl)NNYw57pRd~$Ky+QzuDPBzd4bJ;Zn&yBp-ERhwz_-6 zOKg`L0Ibp4N;PPa+ZSBwmF@56`7hto|Geyx{w_WHcUjh|Xm@kxiP^6Vs@k`gargby z7rZ#mzPYIT*e4ZG5tWR1-sj3ci~Tl|M3CP_CMh7wJPUNXPjISIFiDR;<%`)EZ{^|{ z#~~1f+}V7Np}0(~iI^)%L~sF;*FIM)2fgUH&k{RjK*mz9t-R+f$O#XD#8?7?O---5 zQ~NHR3==h*J+xv?S2UOcBSOZlIn623Fp;)UD&3!rul#FOge7l6lQ8CIzFF7`ZAPZk=V(3sO5H^QZ-7lE5YZ+WF zSAQL*RY|Fg-i*n$b{wEdZByi()ZJKEIVh#|K7S%}bz^WPX~-`QW}!iMk%(jv+x2N0 z*yYijH~uoENsZY-z#x>;PMGtf7R|cTqc7tD&4Xzo4`;+=R9xg)>h3bsU%)f2``&*D z4@8EwB zf|4Zg(Q8m_#Y7<;Wnzf(p(ttyhW}E;O~hO2?8nJ1<57ZPj1rH^@I}LG)Tno*>^Y$D zlG;aDJXShdqAd-%ovyTsE)L3ws=jPf(L01*)=Bx znf0Mz(|+}BMF@cQ*rLx0m^EsbL}0<}m)mPq!U}I-Ui}%b$7HF{1Y67dal7%7-A8N1@KJYYv@=g3;WDIz>b3IKrxJIWvwmX1`>wKc$SaQg zr)YoW8)csM_gmXd4Shb`x@sFmJt$gOV-Sw3d(BB(yf`> zh^5}7sU@5!p4r&FZY_vd3$ulhh?G3Pn&itqKSsdifF5M?`3YXl3U=FN z{YvV%1cTCtjF%WH26XV18df&@;CQf7j4&l(DD&kk6cyofGmi2|t+O`Tt3;QKow`ad zuaETApo7yIHQysfzNZ96NFe?b!;j)BRLS8<_9!;=FekyF99PmtP}YalD5DFTtI_5- z0`^JaVE@AnNYSvIZLi+Ot+!1;%tbH(5fDAR>WnFl@A(4~&(hNA2AP*(>*e7)8le!+ zDGa=Y_jMUmjk*KhCoL>)k2Y|F%1-jfm2fh03sAW;KgF8dcn>*!5}Y-9Q1(p#$FgYw&DKwkqmHM5LnK2HhHW1pJwHvP25{fBfb0Tg3fRaKG$# N{~tXhB)~r#{{{l65zznu diff --git a/test_fixtures/masp_proofs/8B29BC2E1A96DF331C7C3A2B227C98D1E5AAAA9988F26B1A47090ACCE693572F.bin b/test_fixtures/masp_proofs/8B29BC2E1A96DF331C7C3A2B227C98D1E5AAAA9988F26B1A47090ACCE693572F.bin index 54421f95dcd807e270f4e698bf324bc902ecc86b..fc528241a153f61d1e8fa210caa13bc95cd1c670 100644 GIT binary patch delta 1002 zcmVX`P$Z74p=MaZ7Dvz4$JtiYUa?TP5 zHV|Z0vW0r36Af3MU{~BXL~+TpYZLSWAko2KdLrr~RiCgbbe_xnoT5dVY$d}z%#ehK zr4vq3JhOimB?Eu35Tx*YDvx@r#_Z|xY=sp_jaiNw7v4;ciB>ceyJagBqYH6j*$Y>7 z6Fk%gOVpu)1Na(R!ud4v7wMXetet!c6&EXwwOmR(kO~Xw#dU?1v{Bw)lbrFCBKh7} z%*|yAEV;n>CsHM+abb6bsbnwu;@bs^96uQ5>M07dg6)6U=dLIpb+19`K!UqW1+*rhiA; zx{Gt&!le&kUWY+u-@-#7pch0#xC?uVKFhq zlXt=%+LcXZz(Fsl4#gU$@c9xosQ=-+iG;_#P(^>-|8262A%s)jYfCBHw5YhI=)Waf zXxG@p8TRQRR+Q?(c*$rBt*)NCvJ;wMzSyS1wZVd@>%H=K5HY_&g&g(Wn)Wu`3M@Pq6`>y}y z^_zd^ZbE<9`eL2Y+Y=K9!5?i%5uo3!Fs!{D6Y2x9AL`99^v%}yr?Uok&+^7NmvTD; zz1-@zV+!&T$OcphO!t_*&4mM~95i3?J=Uh?q0mmLCzr(-_ktdOwT4xfPf z6-B1Nv3(UA-X=$*38k(Oh1j`STkl0_n%7yL=PD+Df80&$2c7idFk=SGx|*HtZ9l6B zO~C@}rl7tpj$w!bwBd}PVid6GVXJexKsCLac^*1BDY|CL+CgpO<^=Ek@f>hcks3y$ zGi-QV`-o37XUs$*4%}2OhxMTlf6k{K4H+sG_*C?)eK|(Tra+T!jYRW1p;{R-jY_#7 Y3Pcn$N)EMb8cUh_iZU5#2$SR-G*c?_y8r+H delta 1002 zcmVC0yIvoU>OD^a3D^5mp-@o@M=3gFP?J zv2v>Jd6x+9PA^=hk`)a$IMD2~YZLSWAXB1ya8Mw;&S1ZJK(&l?Ky|llD8kPZ(F&tP zm7K$4MzenwB?Eu9DPMG1%47MIn!-Pz(4fyShms(70ikAF*zRH3(4&J^>BiC#C;`WK zBTm4&oSKTV_5ZR42(GbEv-kHus7`d4^P-ezz_a_xLfW#`ej!FExu0L`+Ic zHJdII)Ff!g6o|nhjE#i9lvYR@Zl5o#0$7jsHY29?#36s|bLj&V8##sfCmf(&#K#*3 zhCO&9yt@w}?TPmu*Q7B>Qfa$05KbF6jSZIJ!@EuJ!o9-{PXQ`Y3+jFOFtG~$v*@fb zwQQ*pL`0bcN$i+q<_ye+`ia6G?7el2^VPFeg&un?go@|JNRKs)$#+nzkCqMCEk?v9 z58mx!Xv}}0{idjBZ)_D$d`%hVB{noU?&TcAI18+b<7JZS+2X>|umYM+cA05e;_wdw z@8wL>G=d**)o?M9rUJz;Ozt#)E}!*=uExs)AG|uWBL_SYN4A844u-`HeOl(my+HC> zs0}RJjkpn!ES2{V&U|aU13eEx#&t*G)oycQDPVs@_z;s0)?7-%21v?2b3T=d43!19 zz#b;>J2v%9CGLg{g>Sw?lY{f3$+tCKj!EmZIC7RBOd9`{G1y5)Q*-s?w9v8FmJW{r zX;%JYR)StWq%?$QJoWpGtpUIETkazYd7TEH1QWtarcCV4LYEtTiinZ2!IbC6^1VAV ztMGqQx7|nBehX8VS@Ra}+dfj5>L&-NpRy3a>y7=IGS(KClgG86CVAt;v-#@&rei1D zI@&O@x1z7-;`tY!wX3wYbUF;CwO>;{)k)$Td&EteLqKr)TQN^AbAHzRbW^eO`#@;wHfK2;L%3k6;|W8h3qBm z&WMD`)?D#7rL7W=*%I`aO+f6=`l5qz!s9}B8QIzCVuF&YHsq2w^iH%!PR4cmbBji? zM(=c~c#u=5oRO#q(GafuVDSVKg{hy;Qt@N+qr=>%=W$xQk67GGrbh)y{rZuUuz>&& YdTThf7=`82h!}GyroTO$29x9*G3x-4D}-|eWgbBPihB-ZgqtNhwFv4R39j+ z&FAND_-_V0pY;|#FS4^R#KAHJ7z%!-o|+ZT9#C+wyGNkpqY%DAE@`FHO5N{qp(8xV z>LvDu5UGV+3-p`b4N{G94dXng^QldNs;>F(NgLYm9>ntd>?KRQKNFJ*5^{V9MFoH* zsf%IPJwx!|<2%aN1;d_}H7k91qlm-B&ZET~jXOM`ob;jr&0m`P2b6qr#Dh{LSg2}o zZWapg0Vw9O<`xeD_>F~d`}d!;mm{jsh-*YX-=2Fr}tTD(v8g6 z$CmugCNLnZaYe|ZKnRtw3iI7|rs-irymNLOxX-LS-$rmhVIYs*w=y-2BM_ zsJoARh|DgLPF;OT*z)7N?E*XewY`hvTZ;M?p)xDt>&~?T6uM>)PPRMpbHIKZKDDcz4|muP%~*&6 znDp?~iYs+sfa8qyr!=d1KPUfYbqbfcc~cn^riK#&q%p64nZ#a<39yKV5y4eX=7s*g zw1S&@g&2o51%iufIdH_f$78V{A`HME=^K`sVqR2lUb-WY6Cu0a-ZaJ@1bpG6W_C*C zDD*&+p!Z;-oP)iPMyyD1mQP>BF8H)Hk=j~(-h(91uUHRwY}aciq3~_6?KS0EsZ*gB zbl#~`#gwv#ESrkDv!fmtkpJLclB$fQV!Nl6i0VGMENhpfx?M4Yk%(j48voAG6v)^Q zTfbE0vQfJU%mw`&HFRrWKEA@(+_y-65=K)b5k4!DdenXb|Iv3rjXzO$OXVKk=5c_h|w z+i36a0H{Xme^C~bw%wk36MP*av=AD3%&R7;OGGt&j$1$I%mFA{jTP4gNgaH8Xf{6o z7?KxxHD+@_&nDtoR)u6YXfgu~bxE{DP)cH0tpMc1_mooChLDyNq}~e64(hwCfX&q{(12z`zsP9Do@sb=TCyEm@1$vW)3 z^_;i_L$@p4qe&|N36RsrcwsPR3QDuKb!g$skxZU)!Yk71iasf_#mr7;q5`yS3`5^a zaKZ4yHN5o6^+o%OI_5X4kTD&K$7mr>-Fbmc@`&OWrXGmrlWbCWa^FF91Ga%LNem?j zX{;VjEL54G{*9`?`~O1}?(EUsk>R}@Wx)F|jc#M+;n}8~@J2&wqQ}@!RT89Z^LNbq zUsE{1iG8$MjJ4&JEpv37FSw5koBK0LXFB?q(~VMEkBE5x&Yf>ZoW8cL-O8^+7lN#5 zi(twDLCLUcfo|`^C??P!3jd+-9}53Jrto3RX0=hM&-bctA=whmbkrG#gmf`Md&j#h}!hhCGdp9Lq6C|Co1cg_3B&OZn-F?Nn zarowZ_OtOlj9~>9O_8!N)W4?SHBq$tr5YjVHfQmV7onTG&!=;`LT<%PWg($;6k2%o z6*v^U8K~-D3C0J`NDzoB_L2|p51syK#6KGG-)h8n9q9^@AD~5!2A?s=;71mxH2cvc z+5lN$Dav+L5-K-9DZw8$tvYqr`^6}}^2>G=oe~b3iQYs_aVErNs?rVvFv>*m#}+?E z85X_mCoG(vM#KAjfgg5e?V<4`GtS4@7~nh9v=*i)F=1)*IeDVO2I%(M;Z{zS6bun> z9h603r~%PMKF>(&%$I|uCl+^%FYA;%D>3y8kt98yd+KaiPEvp$s^uQ_fo@7XQl7@t z=k#_-ww1KXZrAcOvKdzwE0P_kuwoZ{XntXEbQ_k+>H$;SrkP!^>%ftFBjGhMhkmIK zAlUF{C(jevo>I#*(`B7p#|+o27W%Np=7(ix3saat82+LY)=l7MdD2^``uh{E$fS$G5lNQZB!6xp(i($7 zsdK3O(;G`0h6hJ$SNV0g6GWGd2M4GiDu68V%{PBJr4%22GY(W)&S# zD!CDCdW|M9QN1FX%|4d^3E(d7lXbMmJX>V4JG_k3;zUe}B zdDj@c%k3n+^VTfUq$<0d8HALG9Lv@1A$mPkfc#)lEQ}k)e?-T*0v7?bqb7p z9MipCsxG`R-_b7oz$P(8@H;;!DLlvm^T==REV+kVB0gz*&8kzkhuU zo&L4JKi&V&9r$%Wa~hzH!!7c;md>>X)>CZFJ=HUO^^(5l z=UkBsE(N%Sc{g_vT-M1uQduUL62}&ls@Evv#m$vTb-gbJcBYm1x&x4C2c=QrvXJ@V z4NCn-gC@8EG%d={=PN}Y^c6f8Pb)h8CV2(vAH_^;c}nEx;=t4xA9p1wa^#B2j2iB# ztjulP@-VgyVu_QISo-zUiK;C(w*!L0zg2sGu4R-y_;r$Zr1cw9UtwXAd&x^iJWWYt zAq&UL@9J~D*<;gKnKV|FH-4`his`f3bA0K0y~-s$5fS!^6ou7dpnq1BC{pC@3GOa2 zS;iF2tNf{b#Ym&ATr8G8UjghNvhp1&vGgMrv>D2iVy8|`nCYQB4m#^@#f}2!RAkWw z9+9R$SF7#UT-^pMrC47&rerg5<(Pl)Jvncc;pAbZRq7Y)X1TuGC!2~cWK4o?>loxQ zxY}`MU@goHh`d+)$lB{tY0wwKOU@Ha&6BFV$)6al3v$DQe}RC(%AV2VxA8g8Ek@eG zDLMS5(pA7pWn#ELTYt+INU;PU5~0%RW#- z6Qt8!R=_zfD})g5kK^Do!4`;e?YSQfydvjcWgBUpz{Lg(O#=4VmsrhLJo(ZBe-Ka@ zbiQqcqeb1XrA=w7ZN%E^g`q&!yI&UTA~8jhnNL820fs^fOUm}1X&w%G#I6NbdsnP4 zb`}Zgk<6v^pfFh~2}}upGiQ#-py;xo z1_!k76HiBp341Gc78K`6U;Aq}*|lBc6)PEPVHSTF?c?XAg3|OSGbvM&ajd+vMTr?( z!6Ox7rx1F!?rfJnZkGEs@zz6~h-K6+?TgZ3e3$@6@4?yz?CUwBIk#b+px%xV8x%8b zvobO!{FtIk< zu?K{c{zk#%9nNfeVo=dOUS`0GtkYu`Uiy1suicI=G|JYru^;sK8xF4>x3k(9Tm`(P z$f#*n-|K|Z0gW7#Fsq#9D(+l%GuF{D@rB*bRf({Yx~+IWNJ%Ti+!6NCGaa-+n|@ABqB75tXt>_uLQyKI+}yzzf*OBP$5#6vR4PMn zrhVfJ|iK0kpE1UqA;xCi5CZNX_A1nX(sv()_dd^_x?&oAciQmLBg+d%o*#u@`W=&T0 z(A(js)#gSUirKnBqiks}a@ZKbJ8dTen3q(IJF(${n5lXD}rRwU1KE-qWnH|lD&)O!Q zJEjKFKh2bHyHyCj;1>+=AxBnE(KM-H2Ct0UPAax>)~8PNnH<~0!cB%ck2%BjsACg0 zA|qeCRR~1%EvSxr7%@XjTF*lSd%3d(p$VN43#KYN9yqxn>|sLTIxN$7!Tq&g^I(G; zJZI!ZR!LNj)K5{QU#Vro@*_*ZgHZk^wcW6Q?=`5`GmL$B zDl88um+_9J2_btdw33I=94WUs;ya#4gAh{v$uybp- zzf>5{4Z|8hc4qG&1?UhK{25y!%<@(6Mymh~1s2Be>)MPAbzxQpc7EQODAB!H@z=-r zi4|0mdJ!~^ayCzrc!ljFM0N2>0_<6ZG=1n zGjjK1o)CvM)w{t~8h(UBU>_(rw$n68A*HC&Z?gvXb&LJES~|~M4tL^=#+%$GiZEL| z15?_9fmte6zXr&$`<0q+g6A?HCYye0&zE8KP!EdtYN?71&>pFpf`TD`ViVEJ%!$nghPx1;;@GYb1|Q9r4Xqx3V)Ow={-N9F`G zZvwMIO?AM@u4sx5d$mDlXWIGzcWJ~LV?b4%=qt*oB5>s5=ZYd5B#_c{t^ z8vucEglda?rX@ZuY!C%x^nJe{cuR~FTZL@oIr=(_$Oq*rZEpo<1PsEe=Qk+u_XU`O zOrGk}q&PDG?&}|kmUyt*vmCZ`n(KfC^B%UK#J!d!y^VyUAsFP$J?Z5e-h?fofag<Z@eX z$Oe`$Z>yUl+EjpS$5*|6kK^s(5pNo?o3CWWba=XR*iynWP2wFT%^N4?giLMRU+;?3 z+0M(!ugM1vAtH7tY}0 z%pSrWc7aC#oN&DXqDh$R1z4Iu{L?OR6LwIZQ=fC+jwl2 zuYGYrT2HSuIoG_LYnf8)K+PilzHQkvCKaoOshrK3?=RU@`Pa*_-Ojt4Zfw62@rRN6gmb z-FxVQDoZbK^LnGQD-{sK&_O|(BljiyzJukccX`H4%LR*&d75?f{ZZ9oVHQ_K|ApIV z)|^dz;w6;tedMu>EwclA$PdzTxCJ^-XCQvz@$F1)X%Oozf2>^W^>{&be=fo*1(7G2 zh$ppJ)U|Ky!`|##jnNE{2^VHOF^ELwo);Nw1@-bm34RB&Rhs)U%EMqn_ kYl+xzjmfN`5kvm-FGs&p0>A9n*x&d69QZHK=zk6V2Ux4QX#fBK diff --git a/test_fixtures/masp_proofs/DA50E59A47A7BE9BC8BFF03D9E755E2583731052033322E25250C780EE322BF4.bin b/test_fixtures/masp_proofs/917B7AD5FD4F2F0CB33924511A59181FA326C06FCA9A49B5A5C394C75942820E.bin similarity index 50% rename from test_fixtures/masp_proofs/DA50E59A47A7BE9BC8BFF03D9E755E2583731052033322E25250C780EE322BF4.bin rename to test_fixtures/masp_proofs/917B7AD5FD4F2F0CB33924511A59181FA326C06FCA9A49B5A5C394C75942820E.bin index a8a8691cd4e3d5a3d412e6ac82e1b884fdb149c4..4dde0ccf5f17bae502d207612170f73df7e81ba2 100644 GIT binary patch delta 3619 zcmbtXS5On&)(s_uP69-lLI_owfJg^HKzfl60YfiRMLD~8l+2yNRt+NZ=s2V z-g~bSr1z`;{l15P?!$e!Yi7^cXV2be)~ts$`xK%GM!ZZ20002=cl-ka(nMzS(7Ro+ zD)e+339ks@d0fhsgWDl@Bl7}{+d907FrY|JRiL|+Q|4^}_Yndnp2`wFHxA6>mWOrs!t!Qb@6MC2uw-*; zXXD)Zb>dj{aFn3wys`0}b$gP3O9l9&8DR{b3l=j3<>kO@`2Tp49g#$A$1Uk3=!;jF zp6;Upe6PS=PPZ7K*Af-XIxv zN}nX^d3$gllJNFQzOF}GYKBFmFi-6p&E}<%orwY*F7dg~vq-wVj=J{@orMe16+A>+ z0k9uiY$+$f4n_07S|NOpk35b_qEQDd3|nakTOQr})D#dHf~reB(sV(^&W^RbVw3SE zrS(({6B+z|~l5$D;h$36AkR1cj_5)1t-j?N8%#033rYH2@6L&yIW3y?MK z4gZnwW#RF0Liy4Nbd}cGNW+olR*Z^(!UKY|`Kgg~dHu8XC~97UKeN=XWFQ8=ok5c%Ff@0w36VB}Uk3aQ z{ks_8{hNITdFOX39^h;1wxCbhZQFFfEN+PhdI$52QQR9@`!F>QP(KatIR^9*|1 zo9_JNd%eb|Yw$S-djkE)clB?xTI}!=r4G{*c!;9a9sJbSZBdc8;}2tFOS~}%+214_ zS3LIDnTpGK&eUmYu4FA_^2`?`Pf46jYBNgLms{(`1uRkB+;Zh;t)DfL%xTBrQLz1) zd$5nUKPTPWij1}Waafj<5B6YC_#0cYdfpXCgj45 z+ADKOxN>zVJ=4lrecgk-^vB}NR|CvLsox~PoRwci@QH3UTt_)ZACh^_ZrL#K>(_~c zoTTM4c6wE0cDCu3O;2sX^Yu61iZ2-5zcdaR>ryZ7q~A>@r~iZ)2M05lpN*a_(vzA2 z=QAx;B<7V#-+{jsm^X4Z($aO5MZM#5usGK5B5ntXDfba`Iu0 z1b&lnU1?3|yG;3x3^N*+@DZWM--mNcdSZ zI1a3TW*y7iTg@_mHn+bcLWULVTyA(3>Af=Gs0Z~2{5P}y{~nF}Pm_+o>-V_%uJqVv zjn!8zTqN{tAgmRl&dn844<}HD6V$#5d?D_pl}`nn)^2GBNf3Ws@P!7|NxbdqNz8F0 z*+gVfuobu*E!xy}o;Q;uZo-IiYaIx+e8eE`yV)p-!7?4|>;>o2AYrUhnBbMJevK(9hoGJRCidak#^FZINM4|#b#gAz_V*T z7*#8^+R1_#Dkn%e-0x707mu1aA=TtO;zNyFq#iLyx>w&&)M4i4gAAJF3RtXrW0P0= zU&ba|-nSF+YeDzTrai-0`t*d%$x&dH-V0B?lR!;!zWy*fJd0Et9Djq#E*U<@kcAa~ zdmpNOw>P|K%uC?F8CFFmMwX?8CZ?T}Im^M8W+~ZLTg~B4dT|Esw`EjmkmdF$!2MVE z`}I44f(`JL4}tX+cU8mD>=6yG)+p(|H#N3hjbJzhDu|kdm7RiH_3EkeBwZ#+1H!i< zmFKhZt1d8yO5 z|HBI^8Ncoqx2Um$dM!+|ar>*s1Dfm}kV)-~^VsFWYVH|G752hhm;aPV@ARP-<#JaZ$~_(7fY?De#FjpO7zXC>Q~&_Vc5c?CWsLw z7Et4Ie4es-L)Z#;Q94xkN-2%%VqsU4W&7S>i?hVRm);P0>X(_cUEtYh7L178&)$yh zQqy^@OhCOO>ai!OQ-XBJV)t3si1cFi$(uUIPC3d615m3u3c1g{l74@Q_)4@8qi}WT zl020?@AWwa=^r_@k@Kk8?V&Y?V_B4NHR)(s&SRfdyfW}p#ZEoJq=zJ;pKHcJVeB-J zEq-jcP#{rX)iDFKLQUrg5Ntse)EWo#8PA;$<^}1|%Ly=FA*Q1n(gryKJILe>b50ox zqI+_x8rvVjkP-X>TVNW(A{O?DiINii_I`jEE}-k+3E62QT8{3JLMnkFIm8KNR8^;H zX(>i`RPEHrsNH^egchP zdEgvR=XhWdi)v@o@?n?Hi(WzW&F{nn^tCuU3Yz(7J`TlKTqm zNQN>O{{*-ShHuoqh2TB^I8^4{@^Wab}Zl zo%eis1@HZbWsV8Oc)t5yP4d3T4Yu=I}=e4`xX^Hy87wkC~atF4h8=22NW(%4LE^ zqgRmFT)!YUE0gqFe;~Lr*o<7YDP}u^v80~ zdqX2jy!x(g@N0TMwaUu`?!`aK3RRep);Qi&Y=`Osy`J8L;@Kxs7T2(7b?8BFmvruA z*Ir5HLncF|8N&pKt4y4F?Ei?VjPJ$YNc?R>jC(d-b|VDrUYUEL}i#H6trb zO8D&Oumw57{=XsiC*sF7`el$ zBMRmH+`?SPsm0}RQ^NdH+>rT-iRs1COyX4MBbCxF=;n9&C_hz&mDR01r^>`udU`1% zdEPO-)ZXj-z1~swbfTIgO|{y`Ay8z4NiZk<0p+TEK9juZ&o@b3Q(c0^6CoenFNUJ(GSz zLG--Ue7M?h13HwEDOn;C2eUk}K!H`K-q3CDYB47KfUwQ=Xpr<+m}fA3E%>FE-Kw4G zX+~$^?w<$ROv)={(<`@8w0&?12ddr>G=#77SX^4i8inz-n1y18CyYD+10R`XoeEq= zMofZ%GJe>!Vr;VR)N3hUMWmj58k>D+q(c22(A~rR{z1~eIKBO%S<_w3)a*wrk7HUz zGS#v-7;-JwQ3N_WmSQdXjqFAZVK#$-RC^tqh^-`?$b+IsHVKWcIK5qZO%@^lAFC$S zuild9Ah$?q8A5&*8XYO<-vz+G E02i6kBme*a delta 5328 zcmbtYWlS7gvtD#5Ru*>_x5cecq_`HBvbeMqcb7#LC@x!QaVr$JMT)z$P^>t`tx$?D zPPu*W_rAILa&PkexRadB|EN#6g zt8QndMp6vKI#x517)=>dHZ;T23t6G!C~0&h4~)Q>dw21J9JXMa$5IKCB7-gW_Y1Dr|7sNg{f|bMVgOU$k7eh2dpL6A@%277Yo&Xbd5R2I$XvmT zsqcQ5{qXQG27f({ft0D_t~`6CA|cOfmOi9%Zp?0h(qZshoJBP^*#61X-HI+a`ILA* z%!150E^}us-5}Nhek|Tn(qBD{#FG`DxJ5%+f*3Gh&#G2o(d@5YCaF)88U*QdyD>3l zoJLb)-H;3IG{Ongz!FIo3lH7(7og!>&6fC>ZB~Zp13UNMN7D4YO6BZ)RN$uG)OoEV zNJoI3Jg=mYk`Yn%N_rhxBKhzp*?K;MA|n`FK&h7A9#EOJdV0{VC~bUJIV3avCQrf| zQ;rK(|v-uHG}31{aONfwNo85}>FqOm5LRB`_D zan7Upvx#3s^m`52SP1_m)r~$=T_SK>-Bi(B%XDgt-tjMf-V>aD9n*fmkx5zPmM3s< zf?E>|-|$*xx@zD$lD^$78K}JBJt$Exp%@Wu@$L+p>1TZ{!Tt9fF#gjF{sK2*g$#@! z195;Wibw}ipGQphMhp?y}pmjj6yC-qb`4tiiU0t~ZQ^yZL z;?`i^6c2DXyqI}oyFdbMRPuzqTPkV3^!eud<(k>mQ_GxqvIGV&u>X^{(C2}IG?>rz zAihDb7a5$rX5irFmX|Q7m>iD+Jlohuj!Wwo`H+9b&(EUh)#W3!k+rT5xP*`}X)P>L zOM(3%Gbp$N>6aaY!D+)|G8@i^>2jL5%^jWW+-~!ui|1e;jJ%&qFXUqJ%${V|865TGKmaAh>)aVL;xANS^f>Q{|9EDnGFIEhK!2fJ~=em z885aKMG;pnxS}(8R==@iEc2%Er}su#;x4`LXtmOpcxGPBc?>8># z6ZFOeHp4AEVmkDD;K?nr2wj$E;4T2ixK4tc`m1Wt7uA;H_$aQ|P)efS-8;YU;$5@) zP{b+oGjOa_&!;2CL!HL)OVinR zEmGUM*x9InS}^l4I3s37-q9}G!G=ynO2OTHO`91cQ(xKFf`kivK!=T|x)BCiW)$WY zOMk?9Q#x?Y;`ALZU*Wjro-MvxMgP>umNTm016yuLc!I$P`;G-;FDcYq;`!tB_Rvqz zw>g?pjqiS)R8R;mtvU~k(w!mC&Py~dU* zY~t5IOPjuOt#na;R4VMySIm!p0yJ-&3mj%?(}1zIY7w_T&7zJ!gN0WakglY0pxcra zraQ<^<@t{dBEd8vdXdk*b60$pZU8K$jovGfAGV9jg{x6NJ_U7}R0T6Q&hJeGCU!*l zcG3}+z?C@kak8HZSMJxvqri2LR8%{sKU!OV1@Csmi$M*dQrP=O__UbaPi32tQ&(0M zcZcPc>URghQ0$eIA``BJe(QG_MK5CsoW2acXgvu$pV_!}ym6*=~@ z1v$@)0g1@IowOaFO|x25B#Mos%^J>V<-4rWEiS~gqOBIvVVD6&z~B`AA-Kj<8RcK- z52QF@nUTt#KH!ZZ6Wr>LD?sehoi{|9dc9RSm5Ll@6lXcFaR(uEb*ogXK$|7FvB9m_a zh0X;nC2D?y$`iY+0+o@pWmC^FH+e`Yr@}KUJ^il;>piexg=1oH=MPtVX0NN~RfP-; zxX!w$P>j=U?5kJBlG0WMA2R%QcTPJyH1ak)ImOSE|XpqjjU9O}VrJ}==ka9awa?v)Vs-_E1hE1Z zdMxnHaBhZw$YNlw?sa2VvjP@M)LUm+!c7LGdLHzu;{+P`yniNu_|gq&#F2&$JHZJ` zM#V}{&ie?T54nUEqu8^tWnGHylCG30a^a|@%UwDPtK~AB)Vy%^?uI4t>-xJid|wS3 zaYd)y!#8)!0kS$a1L@giaFZjB!r&4rDofF~hLorp3ky+%NR!+TxBffkj>6f9Y2@A9S{Yk%Tx_w$DK(6Cj=);Q0HU7Gp7<3)u zR~2u9f74sOoj%-zwp5uE#vmg66fvzaz8>L)c>?BWvEn;k`gkBH5I(PzmR1{pbmab1 z#3Nd-<$l$m+I*$WUY+#_N8dsa>h3qbm{*R$6(U+I0l)2)Tk`vJIm4y3Ro=8IIX+)O zdjX>hC59iINC2VYw_T=H+Nzbidlo2&+H+Ei81SEV*?(VQVgGxFZ6u%3%lSPjQc~cc z0K0w3%YTwRHM4Chz9%zk&$PTJNnB^XdsmjV)A!=~xYRahJ?^jwORVB0iLIIOTY&^r zoalhc&M^%=?OiD5Ss$I#PT5-5Bf7xv&Zq7`yU*xF*hEYzUT${+#L;eI=n1gOX!Y6O zvcJR$LIF^n({FAGj8FAu9`g97cEzALTb=CwF(awJTd#oBN>UO&oO@ubc`*$c_am-a zhV}NlBI#6b?d8gHyxWr`+?IVwz4)m-attMR*%c8IB(`N3#N`8Nom`Z>NdKeC=`}yTwNM1;NY1ScAl2zUS0@GH#nHoWT!e11{M&u=lBbp zJKu`Ae}EV(*c&dkcTdcZK((lp9mfeg;$wlu^gW0a_{Z?pYHsye;b$?RwLKd#U+0vg z&?!i99Xcp3ds~2yuHhZ_rLwO=eBBRQ!@;23`^YOBJ6J1;wj2eQC@lS3&lG4NW|7{0 zzaX+ai^2S3${-?tVxNW~P!8=p*UI6u;L1!-%F&eq1Jyl6xfQQ z*^Ho|EqztU78)l%etm3W>s+3>Fzsz{xZi#dN52pXotgBx~umr1<@BfMJnZ4yv~94}&gl`Fq-S^` zf9w?F{@}}FH1UBr`HR-gR{$D;X*HFu;xkBW8Sy2tgf{PwlH1h{37BO+o9!sj;wgk( zI)|%&5O-`Momdg@ z29kz?+jAwsVR3|SX-Tb_0i<2cF^WX05&`aBDZU)TfOPjHD2V@_kBPq94q;XtMWAn) zxv21TjY-TZ0kZ2MpTvdXC8M;pW*Xz8m+UC^DV`EK((~wT53dLMsGsd`8BD4D+#c7*yES7oc*dqd;Q!2lUaGxj1a7|VO>eanXRq2sep&b5bTo#$Xz zWS^i2c)U%*RK8_XXaiOqvBf<-_@vxr!Q|sCe1IH_-IuYgAiow&q?rynBA(SGVp&&k zt?u4g;4&4iUl|JibXc}ZnO91WC!IBkRf%xWOK5$#|e2QbMLgCQ&uUt)MGf-t1+g4rMxh`jAQFNi$n+x z3z^?$oiu~ceY>3 zqvy5X{jj3rTT_YYn{4=E8E2H7KX~==2GL)xDnLhhjJrkf^F}9lr)C?ce$ZKEX0bgt zrC&jSxMQi+9)F?`BN*+apI=Tg)U}n9<~-_9jKxaK7>UtzL|3ONACW|?mhq5_3s zk!jSJX&KM#7y|2dNux28WD}AfK4YrveS9|Wf|;=(uFfyJ@K8SsAJ4;QiJP?~7w1cN z>ATdJ`EqPa4t_a_v&)M!$S2MmkG4tE!^id^o=}P7mx^3HZ!Kw&UhPKYdCci+ii3ML zjpl-Pn7_A^i?(BU8SHN!oBioxN@Y8yaq!a~VJ_gq; z%ux0DCg@(`6){eW#ERh@&@O&cLmdfz(ru`<&GU8e#DRQIw$EOel!#_laO^^>eBbXA zYhoLtajlTYv^J^8TLb3;#}QJ#YGF!CUtf|+lvxh?*uA?tzae*^=Jyd9^zx6_7}i%aaO%|Us@_*7wQ*zm zP@Z@R7@qBlMKNkO3IN&qn=-SWN}zLsNqMkijs+QR&HNUVlQ5Ql-r0M=vS_e{_425Q zchIKZ)tG4DqTZ$Il9^Q87@-S zBfbY|7blIQH5w=*3 zj!-g@SEqPVsJA!M-(r?7!97RUkdf1dG+4z<7O1AV@ibv+5?8lY0&XIW z$Vft(zeQ5``;dln+$i%za0(q`P;3o0P$nS8!o^)Z$RKfS$=K@i3^lWk%Mciik1)i74qWq!ZvO4Reoq6sMF!kE+Udo^|x@cCQXK% z+76%OhuQ|mecExNSt$%ZUN$s8Ue7nO`k3|@ORY~=W!9@Fo`+%|UwNDv**9AF+D)zL z6_$H=Z^t@;UCO6Vn64i9%J1SvT<6;L(p3ojNXkRJKFiec!1()L%JB&ZzCa_08@dNI#V79FdAD;CjzAhh`^^vER8&)i}#KicUL-OV diff --git a/test_fixtures/masp_proofs/992543B7B7B6B9DCB328590D37CE9FF2DA066496E6664F56EB28F67D75C21911.bin b/test_fixtures/masp_proofs/992543B7B7B6B9DCB328590D37CE9FF2DA066496E6664F56EB28F67D75C21911.bin deleted file mode 100644 index 96669ba726b230e46b99331417957a5dcae14d83..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9589 zcmeI2Wl&t(w(pyU1b4U4NfX>%f;$8W)<__@yEPIZXmEnNyG!Hl65KU7BoN%4yR*;T zx6X%CmG|m>dUZ3Z=9+8O8ns5v`QbPIbBqNC008j+8ovzguW`_%GAp+m++`E?>UMhi zc&x6QQh*X>tX;!QxTC8`T?~ljg#Plis%O~JmKfQG?>=o7%y9FYE_F_UYpPzRE{qV|klK?1L{tEDa6XaK*Uj{diQB(5rjHf`nE9E%^6vFzB z(mn^JwZ)>N3&lIP?WY;=Uw;3(mVOjM!3ZBK08?;p%VL4`h^50G5U+k1Mpx3T5PPOh zv_kN4P$T70$@pr}I&(&29n2lNYKOA5%*8R#+6!Lc#N~;y?p+;BV@P+F)O#w~+<=HC zE&-@Py;P*!y^3CH!k3xl8?kyBDVR|i@sV{L8q4zVZ|elfBC2@=DNOwFM*$vV0GQuKTj^~^yyK7N|B)chYajGNyK?}9g2o|_f+YrJFIhX0C zwhx{^bXX4TDX4UR>X0Y@v#ibss9Dy7MU#C%fox-BAI_*25>;95+p&|kw{piLV%h)> zB&pdobR4zqOPen-JM62kum-FgY5`U0*XUen&=MfF$NGwD8oAi39(xMxqs(4hH+USy zfP3zK{ii{aQI;XJ4;1b-$*2mOUi;$OX6#2XT<-fxB44hA#C&;em{1WwpcS#3A%}f! zxIw-rVxRmWs4;A<4`XEd_*>`6w`}Dn3{V;VP5m=3an^HG!l_{wVtGH_s&5PPs31-d z&`{FQhzS(@V8q{kcu^C$wUJJiG{-<2D;3qv-1VB*Qj;~%r;Z&IajC%23_nw$$tGT{ zQqsLE4{OAK7QgNYAB(O~J>yCO%9A8l2%S>4V?$`GqxMqD?(%}VU6mw0NQ_oC=XEy1 zf%uj0u-Rm=Q6)@*Jl%@S-|+9mIpj5>nj-?A+aw&}zX!=7Hc1c3y1jv0{b?9YtZfu? zHhQ33J6f>n;Pwh5-b)H3B6TqxH!Hr|C5g<;V`(pb8XR@Y;v%j}3YrgXnG&~?JR*3z$<^98JDDM7I(c2} z$!nzWd9ZW;>oX7|6saaTq+X)yg2FIRrY~2`pUzeR0i|3%F+kOjn^F@L>;yKWw~kc5 z|E_)0Q*edJxmv(uqo(jtoy4G!vpdHGB-1s2bhg`(mksB+?OwCd$+X9CY(RtKO{IaU zSX7}72HDJ+wxyaZdfIw5y(6+;Sk#x$qpCl{TGwUQEEU<0)&qURKn!ImBXE8GrKFsd zbPX4cE*Tyj-FV=HZlBHQAb=kf%%Q1WqK|Y_y>sh4y*?T6)6I=c*9D6+WW>Niju7|~ z)X3vD*dS$X$*Uam#xK*ouly(6WlI97sqmr;Uan`628hq1*Fr?*$6%`u@n(r_fh+2w zZKu3GaSuTj32A3X9Xd!l%Bwi#Ewa4%zG?!3^VF)OMWVuP`5a;bnt4lHq>Vl({UB)T zrrC5_?WVl7u0-Lw)}&GhjD*mC;fsgBKfPrSVwhalvQZ{gymsoRq8KVfh59Pnf-SE& zI7dN0ajoC7g9Hn}PLE~^77?}6iO<>NcDKPAmT~QQ*Lo$1fUN>mR2zqNWT2$*RcGSjXFtNx7 zpBr{JYIgLJ{tdLWYMnu=(Ey|k!Bc$$ERLo#0j)EAAr`yWhHp+@*RTRG8pheNnIeMt za!JgJdu!KU>syg2+{psg!g8;7bWO6P&_7D`)8S!)E`SD{clA}0u|NnPU#w@X$w4?8 zY}m>rKYu<*`NN_yNI1k__^U9y%01KR#vw<}-L+4=T!%oWb$B8rflE7r9>{0d_(JEP z2d|Z;RY*}U4t45cfxv&7+KwdLT_*&3sR`2CIsC3YoyUzr?<=Yh7oovG`~X^Nb>Qjy zMn$V(?-i)3@ZM)@bCq=QT%eWu#k+60?=mf}N-L0j!zkUKMhkLa&6A91V?vRyTkt^6 zS1kAZp&&(z%ntOz<~>Yc(XHRIr3Zdmr5y5p(>)T`-pfLE#iEa{DdLcv;w9ue+*#D#zOn}}vi?oYKicY#w)&&3{*$(X z%uJ1vz4dRZPu9~3Hve~%LM1^Ug$;Sy8 zqCf0a#}GLBHUgrCVeY0t3;pz0sG8p&`|$N53O$;V{?!mE;9a&T_$5_9vU z258WF*#s+MhHq5fSYY?aRogYfgIq6IQx_eENf+N>_J756cXAsox&6=?Wql!hmWlf8 zYn0yaBpARf82MM|;-w!(UuRO{%Y4RE*k!CyB>bZ8>mmEn6;6fj-1rPDzl#G*W?RoU z4Ey0slXJ}S#yxevNnz&kSdD`3fy~vo#e^mD^1m1OXZqi{1HTRpDAWa1`OZ(fzjom7 zwgBAk^6)>!k^j^X06vl(=HH&X%AQ)iL;d-TDP#|0_)Nc7Ht1rjR838(y0_J`2!Wxt zz3>!KZ*Z6Ft_Y>IYuOOLNm=!&F{zsUz;h;@2~6AU&NuZSQVn%E20!UhBrN2(Q$;I^ zM;ZxXp`BoznWyZuthLK#H0vLX^5}Cr4I?er27Ebwy#KnSin$DvxpG43-ZF<8#U{gF zR5P8uY(lrY=tTtJ986~W(WBSkww*_ImiKx_kqGXvnrw3b#1+`;Gii{;<#~%s^??)R z&9sLLmCO~H-DOJ^Ac|ndiI1-lm9wM1?O(8x1gI1$Ylyk}!7}jEtRp!tlqc6(iwC|q zs2J?FZ6**-j_p@t~cMu&&L5V$cq? zmqj*g^5oJ6VHcYsd~DIg6_Hv#Yc`Vk0v+;ip!GLi!XB>jv!v67S3EREQ`Cz=aj#aC zGs!)GIr`RFg>pTRsyn=AJzq|npHGM!;(#~lebd>B?f*TA_Bm77in8qa^z zLrY&#lGaEtx#zBfzum4z3>W;8_bicy`AGnh-A=Qr7Jf1VH$~rMKSmGU#PV9(b0CXt zT9me_z3}f(YL@LfO_PFD=N;+VQyl;_i$F~5?%7MIHecR0D4c(Fom2CEUP=4JD<6;_qXcBD z0osqlg=HWJtcDHb3V#uXQ69z~jbW2~mvkZs0SfR7lg1CX=)I_*u`J?|jTZ&~v}}a` zp+!F%Q&>mB65EIIA^Z3#o!&YgC5FDP@jXo)54U4x%X9Zn#ONDqV~)5X3F5-_$dy%m z))WR0yB>v=C8r`7J-sf3Ww`c2=-bqt#FyM+4)_|;6fUBp)Snt^9>8GYxkEAE+fA#2 z{shg;fOq#&ol45ydg)yG4;|tWqgb2_u(!9qqBdeIY;z1l>&snGa7qQj2V$QAF38#E z(aLEzu0XxHr}>yHqR`yVeB~wzl^wA(j_1Zy#l!wF)y{fo;{QVrp-Al-?)4RcFbg+5tY3(Dw6BFjhieb%KUZ zHPxRzpfDRLG_o0Z>SriV)ozH7I%viywUhhP*rM<4pBeI)j%tm=8sdf>ecpdh5Q^3SR{98s3oThZWAiaKqRo)+E*HlRo87$Rb} z;daGe1qQq?#7BxD7frp+6)ne|3(XrlHi=WXE+!_7sF3(`k+3N8K3JZzT^(x(GJ_c{ z!X?!PahgLcDOkk?gX}mjyRl!sL-=A0&b^(R*ix~PN!1o^28pUQelx~u)oc%KL9}#H z_$2H#xkjI~_ej$mMZ#Y61fOpM_T~fm5{>RL!lPxC9`#97 z_X}EcPZv|(#h#O^L^82xrISbn8L-Uiub+pMstw<7%Vt4*N9a?nvgu#L?X{i_Al;HQ z?8Ok8;8U;N!E0@Yu|5^bw-b?lFQBUoV+sFP)m5mAg!@II6`30lnrOhI=hP6!_k621Hpv8F=pOb4xeB*lLf2phw?{F zaPzX~kzkmeFlzAJ0dhYDY9f{=WG@mvRpjXvg*`}(hbE4UK6QOp#U>oWg0?#4!g{oY zI1)TP85)SII$m{WrAhQbb8b&1bWBYTx~H!dabfWGv0EN%95a7_bA5Q@@|g-LSj$7V z^{jjsZENOClgg$&C`9jT=P7fD25C(Ec37CzXPHmaeM<@xF7K&Pi(9S(czRj0cu?az zLzndB_gv6&0vLjLL)2+zpZxl3mUF>_+iX{ag(it4Hl#{GaSz2TuynPglu~ZYsIvI< z;+_8VbLrs~zd=;4M#Y~&-iI)R`?=SB81gi<#H$#mV%Q7gA>OzwHFYSN_7R$htIW-Y z)O|}5GyJSy90JH>5?;X3$>2Q@c33#JTi(h{>nT?d0V4`#q@?(x98+2n|Cd+fX zRWhj$w}rts%ba~Ah{Ih|(Kt9{&{)h^Lf?B&Mr`2!IM4p0O@7yF|6R*GFHCjo<_)}6 zpr5de+->l2DPC`0-0<~BkIsxNRvjk<>3IzdkM;bgC>Et!gJS*l#%Q*RSx!f96Z1Wr zO|vx7>+bsRReG>6fG-NUIzzunBB#u0(ooI71~S8hw%GU@w!QwbP21#RT(7`8U}6^W zgF@4EuM?xGzxh5VxO_5HfhE)sk12aZ%CtRgWGD5hw*O zJsg>vnn=fZO=^abVRsez7{bXKFWq%TIh+W3_^gbsA;BQBy<*Pvjk$gN@|6ytWT$JP z`{4Er6Qh3Cr$Ca)^7^7dtzHi05T)NGm!+~ct~!RWfTTR$s^sa0g>-WU^sK?atm>Z6sv4m={Ve*+p#lf=l}jj0_!ZC#$4ryI01Z- z%~gkN<+PwBIE3#5+IYa{mqpITku_ahu!<+i7n0(3WxWd_$YXLOWjSEf7}K$R-)tYD z@x?I(H@ns%?OtrgWF!njng}AH0X8`L(3^%BQ(D$T-S5X(GCG~cF69h5(1bbsh4H;t zVM7`?P5A4f(FG75T&>Y?_314^`i?m++;khER@)2`Mij=Er!35$^y}P~D48g^@0O55 zPkfE(_LCqh70&|*YDu&c>@bn@-ecEI)am6RzDX%3H`PJjcwH70Mq^ZW?lJo?aycbI z1F3!BF2kMSp4ks~n?fcf*q4?2W@xAZHCHEvH`OITM5cScx{b^{s=@6Mz9$1OpoVts zI)0N*$W|Uic}6L?!uU8N>Yszb{My8Z@5|><#S;bqNr}k>xpyw*IfZXZ2tkRCOGi#; z_O}6|?x1B4E!*2lMnCPyplxD=vh%nPec#bvkRtWu=R>Ug5Y~o9QYFIx-hKzrvqwAA z=PwE!7gzWC8ne&yR<^(ChvLD_B+LlnKr#CIF`>i`c&lnFs&k=v`IijNbbiX>tXB)` z=+vS~I&C_;K2dp{jD7v^_^27ua@tC9Cq!@R9IYiNgKDu^=x>gjDq9jaeA!kOWA#@FRAC(s_}uQ(wnZ` L&wroe|9bIXxH#^@ diff --git a/test_fixtures/masp_proofs/A08264B610C5903A47D48E90ABA700BB49387329F8FD049D5F66C95B11B55ADE.bin b/test_fixtures/masp_proofs/A08264B610C5903A47D48E90ABA700BB49387329F8FD049D5F66C95B11B55ADE.bin new file mode 100644 index 0000000000000000000000000000000000000000..4d88ddcc5d402717893ed197c13f45baa08a53c1 GIT binary patch literal 22648 zcmeI4b!;44w(jjVGqV%7nVFfH5;L`#nVDn9Y{$&Z95b_HW@e0?IA(?z?#Z3W)qDTE zbFb!UH0O@DRJE$Qs(Wimt)IU7YOe(j1_p-p$M$Q0|FKoS+fFA&iK~b-$aZXld^u;{ zW2nd{voevaDMz|%-FF8|V0ZgL?j# zWIA8bt|OWL=~-Odmaztl8L#I#my8Gcgy3c>1#_UPC%4Ed0&Qb1Ya}2cWlRvY*OsU8 z8_kS{5oZJK7-W-}|2Lj64CZ-ZuwLRkvAm3H3uL)?qUZP8nRl9TI8XwEuo}_ zZ}GsJ{Rf@oBub0>LkgVuB{l>6(=@gfq&Lsj+2uNeuE;5Sz?n0G+#>D5Dan!0X z|9qxO#PdYxZqjbb6oaTonJ$soNSr9-li~?>S%3Q+h!LR$|>ZLHUX?Um@Vkj=AE=f8efJUtU50 zvI7A|5=O=hz_2G{-ma7D`j20CvItWb>8KMVWBV9;$+@jGnLh?JvI3)T6zJL@=BqVX z#A=nx`wrymTCkv!ww)jnkQ8d?-SL5i5^oeDW|iz&AUhi=ec$Ev`nq}CmZv<4Pke4G z>}df9@+$pAW06Kf5H}6=@hG+6lSNRX3!C4s_{*K>&rVh6nvaJ;L9_z83JyXp1pm%<22Ns0EYoi@M* zl5K|V+%mEB9cQ$*tGkYS`0xNL2eFH=*as$8F%<&fQbhZ#n7za)w%jg9N6W%Ywy4?6 zU6~KJu|mjj&(R1JklqcpJ}taSy!V>O=%e&Nfoc$qodP6$m3&IDiV-K7CNRvI)ST8P zM(y#N_I-cxEeiW)F_*2Xg0~vJVF`O*z9~?;cj@%++-J|Tey4}w2i=(=6}lgV z28v>7wHhhVcG0Xe!*s>R&bReFuEX+*fw(?J(p>vvHW5wEd71BBLHu=+^*Oi zVa2XT-T<#3I~Sjzr@IjeC>85lmM#Y;Z*ioO4CykvDPftSa8R`flZXwat0(gjFBgYC9C?<7&HdnBWZkS7fW+jfwO&Y07dR*T$XHH~Ex%{dM^oVA|PBcY( zFjaqxx`z-!2j8Xahh05LJ~4N+U+aNM2K2YA$?pymSHM=+IhlCNb9zE!5Jawvo5v>D z^xSOaYXek>)|`?P6tg&(5Bl&J%)R(P};SQ;B*q$CJXF5^Z5GZ=u$zV9W z!UItd`E$bjl$e&!t)ga|POHTbsI88v!UrgeqcH_;Gkn4mebk0%OWW442BSAhv}ZAc z0nXm**Zyo4cxIog4YE zz+z!0ybbW>&X~+7LF58fHwW& z5Q6Qjg6tt{ZOH$Vz`u_FLKLPhUYi+IGXS#N9W6e9En9G`cYA#+?_lCG68!040671h z+CNO;L(Al9qQpT;8pTuBWH>#xO94HM63lUb-<}`e&l$Zf+WX`JKCT9u zy(uA;^9xKRt>x_VIE$hHd!_Izg&?WqXS{3S^BF1W2kPsTV}jc-X3TQuaduID8&!85suAEQ1oInJzvEws!t+aq;N_?( zia*fX)6EiQV0Do~S(`cn z!JiYxcg=2x7Wgoo5W4xgM7KgTendoIS^11&BDgXqz`tpugc91a4`Pd)Vc8U z;oAWvtU|Ug!B5O?4Y|~d7L!kHc;Q=`vL>RwgZ+)F-|=6a!ul*XsEe~rzU%9Gs1A<{ zMsPK|MjD@dmK4h&!W5~^synS@b$J|O=k@!#f2WjoD9BeCQi#6(LJwMjJzmY`a`NVh zTY){J^0xq%<70nn%WWNf2SI60iW0XV>wF;J5MUq}2r$Ss*PWgwVU znPO2F_dt{x+j(AYs0KA`KFFwyUg1_2sFV~$T5hh(4g+wb(dm8T&P8%zHZ3{Jt2jgP zi~$M#s)b**@T(U7SJlG*1PVtN1%nlBe82$qM5NmA>W=8<9r<9Q@JCIv541czE!+KH zZ{~hs>n}uM5i|1ibu&sLp@~{@if-+yyux%%4#ageL>_`2npEob|8Ep_97B*hNm>6< zm}6*!#ty1Xus}9Ity+Re{t-9L6WENVtMb@hPjBqH`^{QMv)T_ZyY>OtKJBxzUt#E{ zL0f?IO5s-uzf$;rFNKFJdThcrEc=zc_!d0dYhVEpY9{D7b4d(yR?I(Kh2enGSKg)_ z=UQI!{qvsaxAuV&M~&SoBM!Q(#)%_QBSWwVzftu&{)H%fkaRGj@zQ(DNcPgY>I2Qn z*xv-GOySA{w!x70O^9m#PU#=k!hhyStPSFEHc(#h-!&+_8&e2HW4et9U+YUr^D#dnyEk%`ewF&vxDba~)}y z$qseNWYw9%HE0*5oMu@ssk`b`tmQDlspC~efXKZ}U8^;+VdCGLtt`lB$W>R`jy6rQ z@&IMxM?t(RdWgSK^*jEBC|t4obe`AC&ca`@M=A|AFk;r{4_8iRmYZ#+p`JAp5%_zh z+CNO;KYKgZQ-<}`2=k&YBAsurd#V`Obej5VD#AxlJULR1Ge+c93%^qMmBRnd6yD9r zGq95W?m}EMzO!De!cZXr?OuFCC=`iib7oKOs)h8MzQ5yNh{DIM$_ugwVZFAIBoA|Q zXOoS6Wc*}xCffCkIQzPalx1KNeWbtA`iCi8;fIt}RWR43!qptIE01{R>zA_ZQ;hrq zhWk#+i>``PGR2)W{9Av(w>9nN(4q_~<`?nA=G{j! ze3L`U%?-mN`~IEs+Lh$YkAP=?tnNw6|D5*KFMah(|1JGe-aeOlWC4lR&h-62`e!-1paw#)H_cw_xcO`G=fzQ+Hl(X*u1y!v?W=T8F2;1c=-hfrUAx#}smdfl7W8k1( zf5%(J^#bX)$TOZ#4}>XIq;i`kLt2D z0iucbOYK`SE*&{qQl~#I)PHm5==nRH0RJuO{7DhlFM0{5CEpTo?P{{HKk{N3VN_mZ zD%H%W(c(%?z$KUqPNBY%>9x-4-@4A~pFk#DFv90sBDQMp?qN>bIt6qQ8U;AvtJ^k| zXO0tRu4f+LJ+pAVHc%VLYSW=Vl2z;Qqi?@`@Sq~30D9?QhgJm#IN?#8O$EvCsRGNq zRFUP_^+_+$8~UfeiN11&Aq(reJdpid3hUPKB;CVBR&T^EvDf|0>3EWfF6c0kR*6)U zhz__&b8Z(a+-o)~>G9njR{A4zl5{^;P?i1u!Tw-6u5ZXfiOOe+=`e!CqQG)HRV zrOtn*?#AHJZM$IS@{=9x?0-V0KYLvKMW!9uAV`W6_{os3pos3PB$BQ~tb^0{-)6Qt z0X?%P%QgN{+OL}FwF30tw*u6B&uqNBtoRb|f=GjNmtBVKJ4;f(muJaV6?DCF7_G)X zqXP8Fk)%$r`Rx;Px$6s38?i6K=QX;(U_lR8!{Lvgbt3J6L!lpgsH8HO#W%#WN>Tbv z3u(#H-Dx+9McfD5pRx^Yfax%VXjeNJ8k+fY0$;7H$PRbF`8kMU=Rrb2$F}`m20+Dq zsg?0Tj*!7`aMDM?(dq-Ab3#?fvO!-aUC?WZw=ICs8hXavA_MnqC&J0q$)4rM0~d<2 z(^fx6<4BlHt+)^YzbgCxJ^+72c*CYkr6RLcLgtKbO zU$lF|XBp9kVXdEHdKl~KKz#WU0z#-r`A+L?0n~jE;X8MPq=XE2lapiCZDWYxaQ??aYJHa%r|>Q$syc z1tJjvlMg@h{D8L5Eyd7X>%eT2&Zqt&B`tf!`&n5BBAX*;>8>H5KI1db=#!MevR>3s1;10%L`dq3 zqJM)$=oc3y^itSq;z7GlXQWQln=jIBXq}ikSLYug_MFSfsZnn^*eJ--1hZ$>;u6uv z2=nS%PoWFpd}@Ox1WTEM(%i-MMs7L>Iz&N57%JvL0ZPz(_(Z3QRU!;1)wGcMdZwuj zkyXau%mi8m_MYyI3`xgE=RPoq>x17$_lYmQU05CUjW~=O@#!VkY{n)DvPKP#Dm4<) zlC@u%j+bK{Z?Qu-dc(=cVvUE%@j|l}!YDy1!WFNU)ChmHu*{`bWT#G~%E+sfX~YLV zaZ;G~6NhWV&neWSwHNiFLCO~2B*_3HzNq|togDHXe@{vShden_QGQk0{W;8>@ftJ& z73Dan9ep2A@gT%{WXW~6vy7cOymX~2ooQtdUjK(_`hXo~Gp)ya?{U{g0hR8LW`s@0+2ZdZhaYN4MD zOD+CcnqRCW;>wzwO-%$yd6~FXOp4cSQwfSQeI(a{cyY|UmrOw}Cdgk#usq<7{D@|> z>_5|4S=5wq8LL*_dFJgf(+vXD)#pb|m|KF1+i6K0dK&4_YfReYY8`IBQXp>*FNAXxS{Qy+#lPVM03U*S>O05rE0>OP5qRlsmQ}0>$9@n^U{#f zaj-5%@gFS-1zeycSK*6jL=f6anhT^zrNTYuOR%`@97*T;>~%<_4q&v?1?F0?j)$^& zYATw9g%6vEnur_t{g4(N>VjGX-c;9H^0ao(u!qT%J9Z2xZpzv~e`U8G){E8~Y^ZoD zs#JXf?43MsM{tXqtc`gl^kEWY#*e5w34({5lnSE zk;>J;1!wE>DR|9atpl%T!GVi>OR9!NuRc9Y5JTmu(myb|;SaRs#if&w74MBq_R(UN z3XCl}PiuV3`)J2JHSGCEcM;4w?+8N`uuoq>Rg40;Ap7P}{q}0(4lyDiLjpGv{H6`2 z9e9ps>6v)$KvV>&!f5M;!&~m2SS%uO`T+O25C|&Da9V5Q%V3;0*K{SfMJgu#^l$+% z*bBWup9ju_>H=hiDEam@4y0%9CZ|)EsyUykgfobdN!zFb3wlVXGY|UTCz{KWtpq6b zGuigP%Yy;O($JAQ@alnFg3GHk>*-&nVr`Cva z`e=N$igYs*ihWOiLVOX1lv!=~sU7HrOA6l2>nJrgm3LBg9Ki9U6mM{xkpGNqnFmHR z+SjOiwmiqxiyB5)Z|nH{*wkcR5#Y?Z5*E0K$YUJ5opaj?(*AEsStlVFM(a|v~s`|23T11p*54f zq7+2ybz<3qC{Jq$3;u0N1~X}iX0AFF)OG&s$c^XeSYqz{oXI_Tk%&zA^{9DhoS|Q_ zp1p-mmvSEthe&u9U4lX3qmw_Mfj59$jatNi4HkJ5Leue5$dLLaT!LU0aCS9EsZ-A1iOA5m7zDyQ`z z$n!lJhi{&?T|yHaj+X5FGji-{=YWy+L@}4H2Vj<9Dxj?q(hu{H@(hqp10fwa>06o} z!flG0G|}0J<7=ANFqN8})2~`cn@SH!0S|*sk4L>j;cD&LI^Uf0-Mi%5Q0#{c`0g*A zFqzP%m|*70X76hhCgmHa4OtEx!n^}`V$7d~x)q`%CT6tq63t&cvp@HVKYO%T`C!)g zOR@R4Mjl`0Rf=}xw|sH-7wT@GZ(k-pE)^9$x0rd+%JqT=jW~J!w{_y*#U|LFwVnF2 z?opq7S?0+i{Au(L!qjfN%f1$@`_O`>a@V%}TsR~LbK(WR>+d**WFA1MFOGV6e%rT!RfA4C-K+*glF!?8H6 zT%gcR*~3r_5uAb3ZU%i^s@K1e{Y{=;&>L|S*BKDeC&@TAG_@SQ2|dGeie3j z1jwnL>$)@7nL9+G5?b5@;b8}Z6R})uR{%~W((*i)xpP2O&y2x!RW%Q2Suk43z3ALUG3jZdNWSX!=+jK0sN?8wub|;9!d4BDp#+ zSCdx;M}|b+;g7fLY<_A(CH@KoTsQoz9QaTgtuPQ4w3Gz9b04$U_#-)2JcYon;*4Yh zel?Rj>g$BwkN6=T1g$7?>QpB)h_Tw^dnNJYQaRoE#M>at^ z)eC*awCpt>xk(Yikk6$a4B=6OPEJm8VWBeWOXHiq@LVeHSbzj_J~|w+=EMA6GR}9H zJA{`l{n0@~A!Md7ws`^$xF0{Jg|>HO>{ER*YBVw()i!DZ`#E=(L7P7A6?LvzrH(waI`mLYUSIs9zkAl zUie-EqUJVy+(7prUGSOUw4X@z4#t&F2flECzl`8}c7nQrQJ7k=QFg~WwI`na=h7D= zH3O*+%2qmq-4e6U(8(8+pc@&X{bHk1pRF0m5ShVSO}BX&?@^RsaR!+s`9RAT#d)N6 zHl$pbskO8&lO_JKJ$Ql{X}zT&uUx$8-y>dSE62TXX6 zdLah)zA6z+xr;#AZH<8<7rDKJv8+t81r3V>%Gb32=DNKP7lVrYV56J`dA^#Iw*iFc z4JXvl+NvxqUENW1=Tp|(INMy*5;QdMErqKl2V(Y!vRW*V4>tDOL`n&=Wtt)CBG2~z zE_b4Cg?&DZ%sx(!?|~U6H|mKL(!%E3$n)#oCq!@9BZYV!TAB9kt2mLhH$2a$yv|a| z(07i|j~S^{OlP8zkE&YUaflN8y6+U4dE--etn21hv6=^h?qOsUV}wE5DN>mE~a7}yA&Z+Hdi238bS#*V|j{NIOa?j@j|GGFA8!$0;m|S76yr2qrteuff-5*6+(R|J8^IP_G>`G7*p@(}ZMlfibtdyu0x7_y!&};@ zD?HNSPn^+F$a!1FkV2w^ue8p=Rb}Y)P{NMcEo@DsWj)CC~mYodW zkFzPLit>coV*0U`JJ5HR^N@2PzGFcw`>Du`epxysz4mQMFAKoHK1ER{4m0}B)OhFZ z_+nRd9%O_X4dqYH39}O520N8amW*_{`%n~tE`e%L87z)Ul0(8mowm@+$1&@vyBNqI z#)TkOd-85^5SsTEr$7zqvcRfkD+|$t+Sc9eq#a?-b%>{QoyvVh1A}`Oy*s%|$SSU$ zP-2IvFk@|#PXY8oa$P)3$B#02TtBFyoFj1|wpMz*w@`9!%GN>0#^CZ?A`F^|L%OY= z)6k{n873UQ5SsgbWul>7@ZLQ?c$8rS+-Asp{L*SSyRaDs`P>4txT4H%GFi!KB1z-= zr=51kYFJ0(eZ|Lx9t$lh@x}3y6YvE>h-4G$5?o+6+S_>eOGgh8F}~A zG$x}PYC12j1O{T_;8VsHNudc*kW3-+W*Yw3(s228ZQ?Nk$$q_wMt})bK#fUXXqR1E zD8KxCrFB7`|JSvPoP&xkQrjw>Q43=5TGWt-MW-0cIagY*Ke}%=99Dx&gs0$ta*?qK z!zb)&=*r`0h^|L+c?UJoT{%Q%W9W;U1&pFgZld1Ou0MV@#crctEIjW@OS378ERtBo zR5@9Ewnq-JnU+f{$(ueV^7iY>#&o!|652+D*{ObNhZIRhZlE55g5z*k?&8Lu0HcLX>TN+i?|hTyT;;zoaP z8*rW9@LRvnab(ezx05=bDx64&8}_d+M5bn|0amzw>Bxv$Mry&Pko1i)=t5;YfU9DO z^9oI!Nfa;qf`RF~WQTFx?=rX158f(q2$k3G*r|o?lKg_K*}MLICKM0h!xE2yx-^}c zI*9zBYI2mgJZun@baOz34;d7l^%a_aEv=K~wn%6#6z|1pldzQV*p433 z9U2nvdyf7yR~>9XO55;&v;7+*1c?IGiUy&cnH7!$i^#R-cVAcc7HU&)`~cPMo1Y$^k{WtA4P3LyDN%9zsNOs~(+qGB%`Y$I!h1IFl3CMGr=A!N(GIDl z%k&CjuB5vL>;Wjriy12da^l~9>G?8CfE};`dg7;B?`WmDmz{2fi#Hm+UBCrrVDyua z8f`u~ZSJvj$#Mu-NBQ74H_hqWNp6N`V{EJVP1Z1JMwU>-O-tMrG~RCnO-6C&Ipvq= zS*Tqv;4$I!K0PXYKW*4s7~9G*D53LPQwG?i+50$?+0?PfRe`*AAsu!zg1}wK#SVcp zn)34TkMPK!2(oZNDyWZ1Ynj<&?N__s)Nj6N9EJWwLBgF$7EhO8DaMpF9; z(PJ-4P_tx6)~V(}_*J>hppvETh2e>wZV#&5G9`fsQIwg2_~-`9TM O`(GFTIDr5A$$tPvo&`t% literal 0 HcmV?d00001 diff --git a/test_fixtures/masp_proofs/A9D6D90370C747C254D4DD4A2D4B1C762CEA0436B0ECA42C52A830A0FD66BC00.bin b/test_fixtures/masp_proofs/A9D6D90370C747C254D4DD4A2D4B1C762CEA0436B0ECA42C52A830A0FD66BC00.bin new file mode 100644 index 0000000000000000000000000000000000000000..d67642b43e9810f3f2e1acccdbbee81da239a5ed GIT binary patch literal 10312 zcmeHNRZv{pw#BV+w?HGoB{ULTg1ZF|?%KG!hTyKj3GQyeNr2#uq;UuYx5f$Z&bjZT z>V3Sb^KtLrJ+o@nSRb?YsyS-ZsI}&v@NjT&M1QQ`0YCDz=+P}?lRyGf2x|mN_zaRw zWQBE~dt_F%31bP0y21aI3C|BiJzz3cHi3Fn;~{G})Kw^w>tG|_?Q0uq3G#_l(f>~T zSAMm>7zTFSR2d!h>m3DLIukOv*hkcxJ@e#xrZuvF+?gI7B78uCGJsOB8MD85GS$`A zb)aRuNZ(cp8?(2J7C)`@4cEcVF$7z#zp5=j&FKzEI&%#Nct6ZQ@p@P_P)q!}pmr@i&>i6ud&Z1wizdC>qbNThm)P9AP#%z7{wy^7wtFyC5NCvP?;6i#N4 z?$4Skm-j5j=5x|(LZ&pizA?T`tcdlKp-cZ2vK@FNWnja7n!@LMlqHdPBPJOt;KTty0s_}0@5Wt^4B$rvpUFZ#jeA)(ZH!<~ zK+abC&sU2yp0R;7M0ah-fl^$T0Ft?Ho@B~l0u8I*mjFOsAcDD!xdjI>`q4tD_vETM z^2b&lW!54abE<68Am_kK0ed~J$dFcUVEnZTTNmPdogPT4QL}1rU%9D+2swSz4KWo< zrE%Vy99SyzLM3KS!x@D1xs@?ct!N<7%jc#l^HF-TzN@sq10E=(@qh=C#{)=Py$|rI zuoV{CO>-^n0NA1;pg3im61@W|qISxSEBFY*uOFHxlNnf~z$TA1S|-cZU43Y=(*tFJ z60%p2v<0dC0U30mu~}Ftm9yoJ*L8W@bBFOE z#NI{fB0A~*m8aBO3gA*q_nefo%qg+bHeXN2!fd{z_3UkBfPjTc)M)?F7&4IE3#~c( zL!0!#6}5S!{78vbI15+>38PjyGvclJYkECkv^$**t7C%pub&2YLuEHOyz6EBPFgDd z+T>>Cyo1G7K>2~CQ`mlAX%T$Dj&Ji;KgR*vnHdv)5Q8p`YDJwk9nfjf`g5)oGyojf zsZQ$h9cn6V!q5i8-8ACXtCl!QHUX|;qsF|dA@)W|tg7Xr*dV}U$wtJ&viy3^as;wC zjt~My^XM5=nWEh_?%sPqHfJLaeS9d5JaKtHOqkgzk|59kJNSJ@+hrZ?1vFBG!wP&y zY7gPByE7=P#i5>rB>@$>Kta1{%Sp)5 zZlez&Juuj=p|~$vT7_8W-N_`C;OPmAY4}Sc(jp$Qwx@RcKu4fTyxz2un3V1Qe0bQe z2!Z9u*b8peH#+#_3%6LUqt5I=C9BDbmQcyl&m3kmP(j3!*oR4*QyNxbuj+-GF?!8zNYhVv!MD2s4IxI`5}yQ; z&H=IKI`EW02byx8EtegExI&j?($eaE`=kiY@SJ771^Fwm?sYy-31j@W>OTQ*gt~f4E}ZOKkoF8JN@HM|DQW;@J|`O;d-(hh9!-$m7}b3 zef4@^0HCacS#raW?0Kw#wU?@P&!%D#$Jbd4LesOPV)vI?lVMMQ>?gqKc7!Dp@ZpNC zw6nSHQs-0eifq_s#flK~5N@%Fyk`b!nD2Wtao`-KKTmZ@z_%f$PT?5FOH@gy>KXw! zUXhv|+kj7)!x}if$g>ow(~|)Ewr0i3kY%OhrS+21`|FeI{rO}GZ=fwu1lyxsi)PmH zn>54$`YWA8HofjH-6c;6t5tw%6KHM)+>mh8p%D>0bIKiZ7h2gtchNEO&8EZTpODK_ zLY%Ay0}Gi|wG+jZV2lx=mS*A(i%GE`CAW0X&>Z0GfMxG#wd($IC!{|R{_pN@CZKEF z#ksb?)zzZJuENf7HAF@mI-KMMMfZK>mJn01Pz2LL8-3O>xM94%GqKTYlD?pS%a6hSIP|IP5fE$VF0<|p&mJrAbhvsc2txksHU z^G|15)Pj$c-j>}~uM++d^>3>GJMlj$>Mrq0qg-EXinO58jzL>(_I-~FB`Qo?z0ArF z<7>IQ>AxBNw?%zumr+NZ_Jz@+Ai3Q~X?wmw_0%&1-Ixa^YD`d0I1&7hsDD%a--!r+ zl#Rhy#_-AGdH+ua{K<$V5h(YOH62%Z5%26v6h6PQGCoM|x$`WLvIbm;+*z4Bq^%$k5vEj+eeFDC zh`o%Wm$?A^=q5YkFFj^zL^m&vnURUE4Cqy$yHW34ew02LtJ0NUnfM5Knjh}-L*Z0& zI<3dBqbBsCA{2a#m`h!y2HwA+mP9Y)De7T2j)QP6a}BM(1eGt7D4V5ua=}`10W0 zJAUDtrlx0hB#Pria`q1D@Ca$VuJpW!3W&))cu48rYrqZp^62|yv_|qVA8ThY{&u@Q z3G<^GyIoJJ4qD*+raJ~<P#HKHhD3Z zVJ%eeBXk1^#w&{kOZxidTw09zB9C~YT+d_V2_mqdqLhE~e z&z<8Jzk2vkDNMSkV#8gXJi3IgL=Vo%w4E7KvOhXe5Znb{M0&_qACmfWPuW;@PuY!? z_F$xf=j>i=3*9wSkj0H~eq6>Y-Orfi`b5k$?Cg*_wAgD3X*HR0$eDhKx!&m~)65~r zIulQ5aJ`))h}5ic3kFrCN$>jM4=>PEDG$SVoWVywtvkf-26kJauLjD#JZix3qnk=x zyi953hgiS4N(#Eblb)w|RpURgqdV1nuFJD$_gxGf%B<)^Ej;s9ka|zQa8_~8ylX5x z-3~6LAx%_fkWoFQL1OumBswx@aNT_0zkS=8il2y1JD06Qn?u@_^%(rq*$kD&#;LP` zp`nLYwk^>JzZhAaL%lEA);tPs29_OaBru{zaqD{Pd#P{0weL-cQ$9wV*~rV8=n+k3 z>to^$F+p@P>>x%L)a2kG5TSUCjfI_CPDqyd&}h}|W2_vf3dBk`(WtaI!TWs_AiF1C zR{nULK|ti)0SF3AOUT97vUsaMHzlp*oyqM#UfSG2h*5bVfX3#sF%&*z;6y*zO+L~xrRu}LDbb6w6J5VG$N(6m=kep-v&L)tJw zYnQEvA1yho?NI26&ws_@3u>#kB}#9#rdW9sJKhH})!mGCFEXgUkiNgpfhHIVdb{{4 zU^ceUAUK|(xfh9Yh3r%Mb>9|w!+(0fW0NCABI&*~KLni9L1Z~%bQe57?WHU&D3+D+ z7|`x?dQ62evaM599TT?Pe9|gN*Qix^Dk!)|7l3+48Z>7YrhJ!53-@X|t%x1u9%YAJ zW70P2Vq0COJ7yeylAO+^_t39n+TyJcbARfcrb2oJ?eKENVlEWQP^UEh&u!DJ3Jg)&1ekiO+??RO3fPi53XW`0dYa)NCcxK;WbslAU?!6ldB%Sar7fXZNTe$DgF*4|> zOPN8SYp#t=SRaO!LjJ~R?%0CRTy-SeOk=NsZeDpwv#YazNf=e(D<#T~i?<$m`5)2A zjVKRB?uo)1zuV3&&1$ag)T!c*)767JuTgTOuqs)~JYXnoa5W@m&!6gIo0`bxB)<%K zP_u{`im(RCd_5;B6<{W%apJ(cMboY?$-5E3tX?P20IL&uqtSzsRb2c0EamgqZtlLf zu*}Gme^23mGts~?FuIz3q!??+{LJpZb~P{5jwPg9nT}4Zr*&`YPWaY%B%LByFl@$C zdTjZ4gS@0GM6qU8DU;if4e=Q7)KHj2xtxE!svQFxfJK3}LRm>|J&|uzd4y^N5pi~C z$hZEWYQ7t6!QPl)zK$q~@j>WUP4@`4Zj>nK;GIr8ReoUsQlsZ%`na$uw*yq=l4<=I zu}pOW&I>lmN+@^20tfG(K<}qp*+$|?n({(N@rx~%KAD@^`dj>zr6Wd> z<9STodyq^>@h(s`;&a1n{fR@{6H{n3RX2*2`K;T=03-DLFUuwXH_N9Pi?d_6IY)Xa zf0nb87fJXHvM)0$mJjODkK)hA%xl-%EQ#mz(2qRgc2XlRFl9mjcdjomRzPWr#0&X#*s=Rz9D<_T(;cdFF!*O2@{i{q=?7j z=$vI%LC>p{6;uVMDI65IS6f|5d{mJ7cy5!7)Nvz2+(`KJN?Vh6S<~6thYl?YYcycE z{a?Asn@#MUazm{p6)YOizz9TN6FK6yAjbh&Yqt71CN~NHbMFnGvW z(w0VX4L;yO*kdnsY4kyz4$b!A%)^AaP4 zMmuOE=+lv>CdGxz9{I~id3ofR04(+Res(q{b%eXf`6<|Z%6%5vWRGo(2j;XuuhX~22WU8fT2R;o3eu;-M_&@H`zXgAh9eX5m*Ma0iOei~ZmJE@|?~p+n zw``9^985wdfz`zy8B?@Deo9UjYdYeCjVj)8pxfQ z6elx%TJn)2dX6011f9FtX?vhE8n9;4ej8$->ko9+;^Do5YaDS#o=G@pjEk%8aj!u% z5(fV)q*VE6Es&?O{cgp9mnd$P$5Gu%qh*}<;>_M~&^e%*$+1o!h>B$5FE?ZY5W5nCwQ2fP?sJ5l5JVBx#PX98ItX$D!y40jrK-q%% zwMiYVqsy)3d2>2tb7(l9(uU!?Gu_)n?D9@P{+gX!-8UR+0SIwQ{BZcv@-cf2ir|p& z9ak7rvwq1QWCRqG^j0Ri#l70sUsG}VWHgzMJ`;zu!f{>)&mVx?ur$SAjhLGxgZv~a zaKzj0utXz~36+;IFB2wb17k9OS~EMH{b?n-=0W~#<6D!{#Sl@-Ntqur|nI+8WxwxxxnMT&wQxUZ={ z#XXH^sHu;1^s<;TMT0L0!efECLij_ZNRwID!!aa+kU~wU)^(z?y1Lh+@skwCcptEB zs{e%el9<(%)k$UAPX)~d3>7n|&Wf~5;+w-)GWn4yhRBIEjlFaU3-)p7+ql||c()2V z5v_wzUb}9%2%-f*NA%sI^WuCWefHW?Q|R_z3HNuGO3~l#1siV#DSZwU3ZxI>FHjqh z{QSmjEpJr^KVtGT3tmS+Y^0;{^A+0L18|zt)5$g(3Sath!}oZMQ40Y)pgU(cK6DbF z6Z-HO1tN*ALM48MYYK_8n5cZ&)+rs{>hy@Y8Hy1pljr*l;?=bvOPrC#Y9r#XyS(?- z*_)p$P*8+_b*GY{e@*D&Gm4&_jFA5C>d$^WutX124ZoKz4Re2H>vNql}9d+mYBH*L-^oX zLIOR9c9q68wtumvQ;)Fl&~ZJbhGHdF)Ian@BXI;~)w_W+CjrK8Hlk#jd4oiihHVp# zoEUfDw6RDSbR!?3k4!i=uVkTlTIoEI>!_+YUcmX<4@A&WoHJAXX=gmdyXqkT+E30q zDs&<1@qBcneJ1gv$cJ*w1}yKvL)nW{xHsu~`1n?V z^>|9&mx?>2yBUL&vf{HiJ%hxmmk3j)t9t6)Zo7kM=s{irfZ2N4UKBo|f#4Cui=lq) z)*l{)Umq8xlqZhP&@QwtzV)Tag=G9Fhr57`12P~A5xn9|c@p|Pz{-71VudxPr1Ib*1Vq%T;fbZu)#u6p{dDO(jaEGtVEn9om(w+{RY8W>&OL*h9O3fb<%F%BV^y=j%nVHKUK zrg;Hcy;~P7TVC+4O?Tu?irWt)NJm@HMNTaNF*Yt0aIba}d1y!N1o^y7AXp@M-f*Id z4)*nycdT;b^Ggmw_w6(2kV%X8XgxI+h1YtrS?J4*S%I7>cK3K^hkgO|EgrKcs)OxS z`0+QsZcB(7*J|BR+CXO+LFy08aoZPrZTV$*8((y#hc^$J3n&$?iMgGgwCYhAqIF-) zIJ~C7H8`Wwp(feJN2`Ap)F35ecZ0cp67@(yGi3P=IDbZDQ?X{SJyJ|ZLjSb&1V7;!S6__(%N8hD zS)|#pt8Cqc%U0;@Mijgfb1o)E&*k@dCyKz6L@&V0!Iwu^RXd%OsH1hj^7z%5O1e*P zQ&L)7>)LH&C~faI+|i)I#m@(=QRkxcIo?x3UM!jzDpd;eI|M9|QHm_%p7lZS-WkQJ z!t)h`0~j8=!Fd&1Et=Zdp)g$TERE;H?{)# zz+TlTq@M+$58ABaLGz%jun*{i36CNv5+9*fmCGl`)#5y*N>P$}Iol?^4|G7qR46M^ tG6tALvciP4JR8d_ipCJ}|4!=t-~FF|W`76bUzN`J=hyx&dw{{hI0b9ev% literal 0 HcmV?d00001 diff --git a/test_fixtures/masp_proofs/DFDFB1EDE901241995311122C25CE2E0F12C98370D2CDFD6A75236522A4235F5.bin b/test_fixtures/masp_proofs/DFDFB1EDE901241995311122C25CE2E0F12C98370D2CDFD6A75236522A4235F5.bin deleted file mode 100644 index 321b39faef07e63b03f2431b84b66ff9a9bfd082..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24899 zcmeIaWo#bHwytZ&nAwiw&&E$I;2U zn$o>{-sO_Ihtyy7S3Ua2qqpCx8o)q6Kxls*zZ>vhM~&QeCO$$!Wt4udLkHOFCH)>v zWg(HJv1nZd++inB4x}%#oJb$)9OU7NI9#aY=@6TNwVv*nb3dYVndm|a zTWw7;;H@jKoYF1n3TBFO4H_E(eh={fWRTwj{cgZbAhEjs%CpbHT%yI>e!*^=osKh2 zV6>DNab=f2hl=9R|JCom&fEgmYB3>zkpWESvGn- ze^hkQu^I9{+UtW{zNMwC-gl}ER*Q>YqmX}tV@)(5FMe+QwDvvI0~X9|N;in*dBj!w zOQ~*|g^}h5Xng=t$xY_1LFw{2CN-6FBuFvHu|;YPUh*)hQC>o;yVUoEI@xoVRA^nM zYaU&cGSshrA9V3N8Z%&JzWbaAX__&fC$e~)r1Qt!@d^g8+t9#*0RWqV591Cen!uwR zuUP&imXAeT3+L!eY!#xL`>Kve#{_EW-kr0U(AV zhDJ1i@Mj~=p0k_Aklme}kLgQP~4b2!u`QIhqzg60%)vT*93Tc;@p|%9 zp=q*o%fX!hKE+2Az%P0;pS&n^*e?RjxmJvwG%x)N`tU+v04lpy}*=xi`C! zOz3Fe$rw0*+7+rXBcesP{|3)6M0}`7If%km1`MWJIxSeqkd;Us5bi{1Mrj?Z^7KRV zVX*WLfqAQx%|=`S7CAp5yS zg%Dt~WZIo&vg&Q?(=Lx`zp|ctG0d8 z`dEYL`Pevpz0Gg{v3Q^IOett-^An|1Fz2}~5sNgL!|El-)DPyJ$+0&2fSl8?-G>g- zdF6-duI37v`zDiGUP36a;hO+dFwXg1djQq!wuX%Yj_keDFbUpx8LVr7f-Rxt9TW2; z;26bPiV?t5O6c@rCSwuPG@k~+h##GwGS-mtw6SGIpUjYUy{imxk6}VjHbMGjqWTGO z4>pntrbp2iy=Itra{gq$-W`<);Ac@+*c&b^gRZJ|Hu;I;{ER|Bh*$@+fQh%|rPb2M z8Xyy+J}t#7WPZ316!;X(z7`U7#V9MFij2K@57#_uM-7lNnf%-oAb8$QV=%MI0a_IG zIB9l{Ps!z4S+`B4(PjY9R7F+d0x3_RFad1S#9;|OX@YiSY^zxTQ5z=PF_=OEpuf{4 zgo%+K6*jd)01@@9*xpqo&-*>)}9-`SS~zR7SlhT zGoNh0X7PQ66#{gl!cv5wr@D57CLkEi;BOYR2UF%x=?+gfKrb7|HtGVd6xkhIv%mxz zJnAElPcOPl@h&@t)}28Q20)Uevy@Y&iN2TgkO0)B{Jd z9B&Qlg1w>YE&hWjymZ=uKFpvdsX?nj9Olv%MriN}3%J#9*ilO4bLoH}4iIS@`R}y; zHia`7wo4474xGsfFX*^DvQFA%7G5rt@i?A8x*2zx(8c=n9UWZKdYN|)Nco4X1(@QN z5lZ<6q!ZS&4!B=LlK{O__?^P<6#fsUupU-ChiU`j0={wb*Fie-grA8!ART(8p*Y() zMY$tZn&5BreT)Af3NI`l16LrX$sQqhW|}6=LV8;mFXHgbn)MJ~>1R17oxi2@w<)Xw zY|ue=L5ql1xPHR6Ui9Rss7W3P>&FV|vu?XX3HYT}fy2-@szGpqAL`)BYR*Buj3I)5-@sCDDgA8uZHIu*iI)ghE!^U6NoyCd=LNW41||6?5q z7VPv+TPnLYZ)@Q?E)9u1Kld+NLff}Ud5A+D@1=P_7E&XjF< zJ6^%$BYWeV+T49;M1V679yYrzNZ9qMvv>rZy%QfB{d^NNLZ9~I+Skm*XIoY(a$&2pd17Gf&}s>3I`~p1QAx4 z=`cfrxKe2K@&T-1llDwdRLA-0>cP;#`h5x5&;lEE|Im-3$1s#G51?TQI z1+_D#d}RJjv9$X%yrEWnkY9wT04)FM(=TluN}{zqg@YjpXRfSnbkj)7>N$`jL>*GT z#4vC4eT)Af3M1M-EgC}CeK%B#^In#%01cNVH?8Tml+@v{k6+Lm=zUA+Z&UdDGwvFh zK5tG{nt4OQFTCvd?#l`T74T8BVOmAxO4o87rKiqDY9p8LKdiLWYkWcG)&l{ZGv2HF zl?J|Qltpmw6n>}hJB9zT6h_HDc;c*(Rv=k~H5h7a##O;K?gW11jK16uwkVJ}Q~)&4 ze=(~O4&{~Nj+2~0hh~t37I3sF3rj5T&8NIE=vN1b|MW8Hy43KL9$fH1zOxGuIce@y z8nf47G)f+e9vgxB@P?|l_z$A+Vanl{+H3zQEzxWHnl}U^?O+Q?RT^6XpaYU{U{X;1 zEv3Ir;XnDp>%&;AO{7=c_f0Z#6Ea~)ba#;v>jP;SY~*_9r*_ltTKL_Ocy}cJxsJpi zDGZ5MlgAfLADS}RjdWO>%HPk7@(DVfU7oAVvc5KrR|=p{d$Cq}!ayIeERvjD>(MKO za)D$gGU4kbL3z;rI>iP5hN`#t52CPuDRJh8DJdS`WIZuSuV!^Yai%)UhYghv95_1^ z>Es)4DgA8^_8RT8lqk`L}qdg z_fD6>o6L~^oR0M76;6$kVvQAe*TU}r?4p72hN`#t52EmCyW*ncVR*ky6v40g`HQLM0U~aq24l@eT8w=iS<-SK zkpaTrY5i>q|H&5?O><+6_%Z1JW1T%#w=PaZk${{-yq%{3uuny!q)+B`)Kk&FewvZo zrx>^Up^H)usoJ0I(k58U1L)$>)7(G?p(;B|za=`$p!+GA8((NkoW!C&>pO0PWB{lF zi99{x^$Gjp5+EE|e^?`r{Hqq7$<%*s+MIzV1DOyY!Y)ydA4{i}&_vn4uxZv^Bd><~ z3yB+s+fwLh%C}R^gyuLRz z>-F_@Wmj^&L-*x%G}Kh^(T9IbFGtl98t}>gc*fu6Y%`|V=&wIIj&CEF)cm?{@z4%i z`)wxF!+yL~MI%L@*V{1x;pDc*&gj1;1>gPBcfa&M(l6CklV0A~C68=ME%9}IMpTvt z&V5*txPI{CVBT_B^92SFy5b^K<1=R7e^q)c`H6d}_)+dhIhICRrA;QL=MxwbpxMGx z{iCqQzI>y0%>A_rWo~lDSPf2XvHCiG4Y~D#3Sg3m1KtxMV5d9q=%kgqcN@WR_i#nZzIoCzBX)UBb_azwpv;* z`gX%&0z{r+=ywLPQq>n!R~lMj2`NcpCBZbb*msTX zUL1{F79%%iv|&^+a*{Kq33}ZBO()PlkqPv#j(?i0|4pwyZL9lDFNw)TWESa+NoZJ~ zT@7%LdeGuf^bMA5`AU~iiJ&I1lHMtce?8Vaz252dPOtwHy<*~$t^0%{i9TFxUr}_~1Wp!Z(TqYT24?OMnRL+?TQ7 z98;`7BR1HxV8^DRlkE70576*H%K6GimGA)lrH2#w(CJdNTIXE-#gKyZF~zYi99OO= z$NQnjz*iVR`i(^q&V?kF4XRJqwetk$XIBGd)Ib*bgEMpDE!8T`zJHfqzs1+TL#97% zkorwH!Ay}(<@kWo7ELzd+HKN-{Pqpb>>;u18BY@75LFM-cY3|=B>j)xN%}|a_4ny@ zveH;|5^txYVOp-EzE(~bR0J2<_ zANg5Vq^pQaC0INEE!ut4O+uh<7_azNpMn9P^l_A&TFjOLhB*Z*ipp6T%XUdjc2BnE z(AHD*=ZPr?ps%`rSK6^gpyf4QSc_hRkV{lvx%3b{#sm!WF~49N& zlWF#wULUZc23Uo_X3y}GSp0pMNac0MvdEDQnUTgS(kZQFs{Nuh|Mgh!+Us3={YPrA zlU_w*9e(rg&hxS26KtNgpHV951CoJ=Yh9ipHE}A{{@>UKzi^Y%B+)fyc?9-VfH3L| z8CT1RC;ps0ja+@PU>wnPKnnnB z-P^&4GJxYkjQYH3x{7P0$gUgr%SQ7adbjqg03!x~y(vMXPH>z$h_tJR0x604!Y<9W ztZq^+jO9ETQZWw-p!T))MeBzXTbB=^^+jQkTicvxEhTu#5_JEbk{qxR1juhH{ndU4 z;6GIgF|g4O&$_WRsM^*utA2X!2eXfD4Lqi|6*>wvNY$&$uObE7=8u}pRkPUqll|4p zDf;_c6i9$mlck=Y7|hh1hE1hJ%flwy%}Hj>%J4|JG@g=w0r5}cfOx4BcC34!^F-P4 z!Gs!Y@xKz_+28&i<-PsKz5$TGGKV28B`I-!J^cL={#85(_)lLE z|F^P1&|f<|{XC0NT*ld-9jtdxFP`zz&fEg^rgzM}pC$iL!Wp~vCp zGurPS5F-U#FVU5<`s6go0zsftsD&3iv%YfM>Qj7$JJ_t&?rYB@-5mg>MQw1W0+Bc4 zZ=IR5qWo6=O|x&2yhH<VCMg|D2cnkfq>)T} zD?l)L4)`)$c~T)56I#Fdss&CTBxW=)M9q~K%{qb)y>&S++5rBgslk|`bPwnT8~_ay zm$CQ!U}!)%F+LwiBdiB}7c(Hd^l5Qz+$ZulVa&UqSlyJ-GLw@Dh7rM|SZ)+zl462< zLpcvsym?IEfo9fLr}AkpP;C@n0dsgZF-eXSO`#QzNP3pCK93y-rU*suG!~u zTNEXDi_hxS{44f1Vm^BTK|J2W#1uo>KiKi+b&>XYltE-w7qFeC)qX$(2Ktjfp3AM8x_XZ23#gu4me z6fQZMJIZ=o0}Slh6n$6X)nM91oJ17%NS9@>=JRP-Lu%loOk5Qj)mz$p7&jI8uBg9P z8-*Lk#NK0WIO~M5xeYQpLE|yc9dAb+EfyAx>hC=jFDt@YR^ciL9NN=6oxrFOW%f{q z_0cnd!CcX%&eD!aXU%>GZ*a_$0;&(wfsT(mWw4F`vd{F6x%DEynd=ZJkzeMiut;ecu)DUxeRLrf7S*JmIanzQJK zUS?f!_dPq%?DO+iq`4$~^E=6nREaH@3`sbl&KvT2O4vL%1vUFL%Jy*@-K|Dv+X3Hu z=j?00Xi+GF)l0$konEUAM^&|48QPC%CweT9Kmc@t0o4r6FpSj+8(g^(?Yx9uB0Ep7 zbs8bGdF{Y?!ra$@pP9&G=&PeOD&6D{tRN<1$S)cijO-TMq6>jI!@2)^G|H zbRY^;WZd5+gBDnXA$_^9d6>$nW#yzy;-!_IQe@1{?#*++)uGh%w;^f^-3ufU#VEN! zDDddBSf=~wDw!^aCF$uZC^`nCrNx30FAc<*EdN??pwDn_JYxLMq2F%U$swl6R<+_dJ(6Br^N zBWRmC%l9|IpY9CO zr}3I{8xpRIS?0@`?-#$wHo#-GLFU!w6O%yFKi(++#kmoy_H9WSNqTiwXug9T-iYHa z^kWCY;vhYiH^hP}F3rpIH(&=JVfmnspH#=4A$bXk$PdYgONVIO+OlWMA+F%3!!m_a zAOrJN;i1!Rj6)j+MAF%s*r08k5Q4nHFKRxR4R{=fL2Cqm1C7`%n?P^+{d~LhdFul$i zqb?yl08J7*9L$zAsx5Gy$L9;-{GsSbLYeXQP5V#meeq~`!qon5^P!+*q!E->M%Td@ zxHnW~cO^>3e$>$Zkm!s3LA65{d=37Re571^YKP*p_fs?J%QdXe)%;ob@Pr*?0Y!ZT z_EBGT&wS-sVEEu&mb`QqiN&+xCp;j(KC;yMAIn1sN+oDSVr60odFd&9`Q zl@8}2I)ZSHeOPOj+E|{`Q=i?5VSjesvY@NYmv?CdgcJPRI`U0^|Ec2Sp9)7N7_eJl z(%*I{VFLTvgArLIK>ds}9H~i7fKO_E$@U+0`;f^>X)V7}2fy!24)3Uzfl3El7 zp+;#ae;dM1=S_r1QHJ7BGyP++c{_VCw~E!ZWwVjTl3ZaW5qeJ$j0 zj24P(I9aKfIQe~U48f%@dl7+P8axz7h*f+&qY|Iaj>~%%Hl{2dR6~eiUpK`9gQP8^ zl?&j8Q>Qi}3TRnLr8iRKfb1%^=@XrsYIAu2A6U$`@_+OvLez`Ge}|=T~=&CyBtflx%|Yu`-hyl2amAMG(0ZoAybagNb^X? z2VI=An{41ujN@3qz?I^D*z3(D2Ym|LFVavXZozoTQ8!}^a^fz9f+jQ+7nP{5Hn7L$ z^pt$kryGSm5shg~<9-yD)OiBms=t&2tq+w|s1$)N*;8>|75y|&YD5ag4^9>VQi8*1 zGQlUYUu@<6qN);6$04jN316vc%CAZBvjEBMTNlG>M_MPm>cwc}Y!=GJ&Pa!}W(V#o zDin5S`pW5#pm7a)Bw*)to}n(o&P3!zM3?=Lr(PVM@Qmw}6(k^H*dJo-8%2>w87ze> zIj2isuqip)10VK0)WibfK$(WVJtM1o0R8-#(bL%r@7jlR zMb#w6rivmAjg_jbfS@gOv%*KYh*XdI0u&)pVq2P85??lhHQjdyw~SzgtzJtEtRez- z!VToq}tP_w_a2t>mXfOV)ApB5o1cAlnc z`pq3-Y{YgVH$D1aKE{_Tr&2nNxFP*=m=w-&ak4J?JO*Fe3mh&Z$vjxe>zht($4nFAuiNYIMI>BhpdKhZxJM&Rr6BUV7~_RM`f({f-f*R%yvzs~lf`&7%pnj2J~W%NkjEygIS4lNKSj zmhways?#pzCWh1MmkY@=)(Hr9%^$WbF#&zig&As<0`bV>2axa(RoS~oQ~53d>>L9A zw35%6;%Xa4U_Yk>_w0S_QiIZyfF|Q#)S(gjAH~g-j^eM*^3aDWto0q*JKW>znS|<~iOnvF}UrR?u6rky#D|ij@uYH`6ZDNW{%r1un z?KL&Oy5C-riq$u3oBT1tTlRgeeosu9ayg*kwF*E)x0o2tAr@%KhjkokkDiMy+YuS9 zkab%=bTsChVY!H<#_xlcNLXwgHieZlEwizW>PD3QO$^fU9-3ay7p1|iMwKMk9JhXY zqMkX^m8W)tb|HWQHdOgxYcM3=JwYTTzI|k>R(a2mFJRGUJlPI zmDa{1Kzdxgs5PB!I#;+5b znFN+=<|0A+V;wnH6YMljbU<+g!ySaM0mvmOHrYw4o6gtCi?|QB<E&(j$cRXttGyY4ebi_M*gu(4ZBc_NLlQNT0Tcr$bLc z`j=CK=$!FW;ONdB33GCJII?=7RSp!JfQ9FfzJu_AJ`a9nE8Hg4o|)GA)G3rJ6b;PNMP(C?tN<)U-wF4} zle*1BYElsfCQ-1~0Q>?<5}b+CVvwQ=O&QmW%7dJdXox)Q`p8jQei6TI4N`dS&(;nc zifYk>F}e0yt+i=Pl_8u{guIDV-Ja<&=C-RuHl{;s(CDKzZPI4ssJ9 zCaxY(6H^~j;G|Rt;ZdZaN?{zF(I6-9=!}`_XX~n|j7@T$7Oxm#>bMIic|mxfxbpaw zG+F6;SxW8QWEU4;X*nQA2UNLE**ETK*riKR54pe zs+}wPh?rhhFWil1wuZr{Bdgfr#Zdxs!OYM_@Ww&=kzccy=PC=WMj5m5yT6PJh9kpS zQ{D!{zZR)P*74n<`Lzm@QSMQ0G`PCBuR}9YiCXXQp3>N)%t;czMn32uP{NQm8Fr%HMM^d_-Y*_EKmReR0H0g*)5a7gJxWRf!78kKkHf`=C|EqizydG9c zoS7Y#iE*i}1WNQyMy}KjQ(GWM-cfi{0;|te;pqv)Ys}=*{)5v)Y)Yp>{L-KXy21v7 zOKgY5L=vY{KHccmSS+tXMaPr8<)f)U&#m1DmS}XiNljHnJ1sbh@Lr6nB%`6nZ&1lX zhPxG6I<#2fllH(fR_L>*@ZyHw!{#iZpOE)5k;mnx(bEo2I})BtgITF1a=jnOymBnR zM$l8H(HkPc0l=;gkp~C`6L@}7nVMK2%+q=#I?Nlyo4(iym1GGVmNKN(XTtP(k0RW1 zNPRLqx_|t7#%S2O((6-5Yk$`=VZu>B9nhR`gZg}`jh2z= zZ5Nv`QObeRWpPc^Ejnl^d;K$mZmx2jm%65dna{?&g`OmTcDFn0j0Rjs_v9^8eZ{p% zwmI5)pKyYKHB>C8T?6QMpq(8d*N^0Z)-TS%x?qX;eA$gWz3{m-l^2kclk?7v!$JJykG;!^ z(CTEm>YYqs^YM@&zhS3CjE((_(DB4AjQ&zYm%0KmL@;S8-8BPai|Rp5wuo8L#RakP z@VQUK_95LD6ZEVg7lDTz^cRCgswskaQY&8ODpYpDj&H-M&-`bg-C>dP+EW`os8Rcx z#=)GXVZ;m`-1Zc8=z3|cwv}&cgOxRQ60NI4ibE;#y(|YllavM@E;M=eoYSI87eB~j zo!gPe1R@vE!LU%1<}P)++%=N8MvrN3)iKDwY=E45D_Ev`P_bR9Vz5VwL9nkNYjNk38XzK5NZ}-0a_OA|qo!;(wi@&eGl{Wcb D;Sm$_ diff --git a/test_fixtures/masp_proofs/ED30921582F7DCEA42D960F73DE905E9DAFDAC88A291E7F1756931C8A85441E6.bin b/test_fixtures/masp_proofs/ED30921582F7DCEA42D960F73DE905E9DAFDAC88A291E7F1756931C8A85441E6.bin deleted file mode 100644 index 259d8ed81127715adf2313a84924aaf5cf90c130..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19839 zcmeI4Wo%u|mZr^g%#86d#>~vj%*-(}#*R5=X0~I-n3D8mEwbm|B5D*ZIznj)4k@mIdH*)xme?jVCx-IQG>g|M&{C!%KavknI&!DM8C$qG^(FUR5}-Xuw&4yYwJ{N;u6G>2!cn(RwVMNoW3v}) z-Ir^*O7Doka*W4DNN*9w8vxeK4_6%7ubkCuON#&?D-g_B%-Dnu82W0$-TCvjE@*c< z3qN&%hB84swwJz#jMGY!G03lh85ngZN7D*ESEnysEXJyBQS7t@Mn-B#8nLH4X7{FR|d^ z-cNKaXa-n7gF)FQpJV6%rJ-9SMy1?&KsQc|<8ZW1;;+Zgldfr8?< zvx)N}M?GS&+$#mR3A3_0>2<8vf&itYMEjNw8(=;07Ts1(sYv>^GfL~tL;E9Kn4gt{ z$W>_U6N9UW3LbDVylqCrUhEuGW{0i4d4Bqvu-WuOsTZe-T<~!B=?El{)(xgEEv!+r z=a$eoNOB-wHGs-a4g#)1HYHHSn4MG;80t)7PHhvd_Hv;8*jIFq#JW+$VXG?VsfKG* z%-Wl03Y6?wJik8bD#!)(+ViN}?xs7YxiF$Y^&!_lk}s)LBLUhjm~~{BE_>N|wyV1SaB-F^L)+r7^pY_ zy=zOxGZR>LMbGyt(ExH;_FD={9uBwr;p~*z6@3OQ+jYwt;`CwW;No?6H6Q>bV!cb# zWng73PF0d2TxK@JEK}r;Di)xV;Vs${qiqd=S!W@;kF932s*e@z4P|l<^`=$)Brp(z zxBlo5+_Sq5K$__-En6i#`3L7ga>CJKfSbRP9f{RF3+p)W1kFZ<8OT>e;{0YVXBkyL zn*zm56q%DWQk(R$wqeeg$dq!us|s|FV!=!_MR_t+|A4#)2&aMTRQAEF93&f`J>9Qy zM<)gPT2|+Eg^J2ys_Xn5|G;(rlgcoFObNzUDZ?*faWoh3^(ByV zB`D&GSzbsT6?gstp<&pb7ARvnUQ+KbeBME4G_}kHo*(f%ZhlTg&ExjHdW%M@*$Aku zj;_K3RvJfT3f!WL!4ZDZ25(K<(y#`hHBPi=GJ^(k<>Q){_SbKb8Cv7ZJxhUXM&v*2 z>zU?=BY27rQlTRO*>F?a>}c$py=+9Qd9 z6Gf4b=TSJvF4WD0+n#}xLwlp#y1d;Oq`;pZd>r;0(14tTnh1s}G)9b7uB;+>;9Wm! zsG^YiB{p#Q2e4$#^?Va<4=Xf~y+?BU7X(R+6vA70XXTD0}b_yw)_n-LU~$oTlDlGL#Gx?e<+gZ!cJ z9}54W@c+LQR-j%Dozo+}P;%*LR?_&%l7lL^E|Kz(LO9Sw1olLN4n*u>=v=9m4i!CY zva%qhB2!&%J>B?}l?zrXdK$pJtPl8)1pf8@KZwHLcU~`ZdsvxxzwePqf((q9_4>k= zk(%XvGt*Gdnhy5|5$h%SHKl)=!cn0bYb(X+7~)IklgPE+7`AOzH@LDR0rQ?#tHohm zT^eP0Ox?p;#HU2Tj&AH0T(pZ=B6!6qR7wJ_T(2NP;D0Fmhr)j-{6Ch$XqksE+-0&# zRY28O`y(|j~CCjZo&#g+alcf%;u0wop2G^Wp`}_OeD_5qespH}u)M${_i%%Nu6=yBu zCIlZ}Q25gRs|dWwUmYFYh=ROWWwR8Mfd=?AspYEORlx*tV%{uJ;w9O-6PK13h8AWKY*+On;}u|ja4>&=BKsw6hx zdgJo*TE1eo<^A_c82<=TkN5GSyxP`jTKEb6XtFkRpvyhO3wJnB-ZHxjp}6%J39RJl z>#EfkjLa%&*wdC57`9(%`pdxoj!eHikAJr*`$ey_7Ug;AqtG7P2;!&N*^7yWUQ%At z+E3bb^jQ0P@*hio(aVY$G{70P(4V)-MJH$6q``3T^ zbHe@OXZ+)5{7?Oi>T@&W_%Z=4jcJB?@ypw}9t!KVgs8Ol_KyBZT~_6Uz>bXgtbSJ8 zGviPOD-(>T5sfogDTGTCD*u%Ta1`*IYFraOz7Qw#lJH$U;2VfEeF89VxpMS|%YI&t+8vh~CJ?in6ds%z7^bF)~r; z^NP_l{7BW}>K;*X$x^BO2PXd7SN)yS?!UUPva$r>wE(=N!PkD6M}=2ei1~C6fd~G{ zrb(Afn?FSKYV!HdzUof|VpRIuinF7wwK*y%exXsucBa&F!4a;-BM=tS1zAEom{tfpmxg6XzqtX+FNl!(Y(d z@V$!Ah57BrrlAQsV~u=E*XhwG0OByrL`Vl?7#M7X2Cf?LIiQD&W#JdGeQf56Le5H? zGY0y?bcbT{?}^Mh1b}L-DSdR^@qT4F_nEjuGAjfU^DzTe#vWlXAz)x)CUjM*C^mb! z$UCSAdxoDY*JmJ@1KX*AKy0KTWb{enla^qRd*9IP7RNDgwoXI!^8g)7>+)FXso!M! zyP)oWl}w0WIC|EOuW?-Guaw0(;gRl5%ei7UA>UN9>3jL$)8Kt^q5d)CpA+Ao6W^Z` z-+y-E`+tE^N#-P7WcQ zvkl5$=R}SW$ZOI+rneIcL9)zI3Ib8_xthqkG#)pj7|e0P0{HYzroMlhI39;LR!JHR ztK^O0=5pCP|Lm|>2&7WD{7Kg!=7{C<1v(@yw#%7Y6~ z*WPJ$mtoFUzHvBta>XsfTu}I00LySO|85+xNIAW&$#CJkMF{my*v&)_`Z#KhgCwy{ zDl*DZ&(xjuyMLeHpWUW#q z`+J4|X;G_z8nsef&?95yt)6nM=D#>8Yg2{;eA%JBSM9c`flq%#{d=kZHOv1_QF9(3 zWIc5azKPyK5$%wg9uw#Fz#+8u?KH1G-|#}@{O{zQe_GUzKL9d6Q`U|PvW<*USar)2 zED#NmD;B|%kK?Ae{F_kzi27es%)e*--zn-=fl|E;4@A5~pMvII3pLs!mn)eM@D`dW zrD3D%8T(1U$NZ;7{ddW;e?$1L+iMS?yl(fipAyIXEfbo z0TGd7YRsm96_MpE{cae8eYF@87~R@aV8SL}ik5Tj@7-1enH3;*QmGcKVhDp=#@dO`ATK$yIg=RqDP zFA{CSj|@M7>~!jwhV>aSH)zg-{KD)XX`!Iv2QL`h>$c*v8qk8SE6&-z_p_m{ju30nw_~H z;QvGho$1Vs;sHYETmTmCA=RnwWVSE947p75bkSHzw%zW$JMiobq;et3uOmT-8(4>K zcUQJyPP(TVld+%e|!Wslp8oYKL7FW?j($#Z-&yXsOzOB|!w|FQdr@TSAh zKB*z26HhYnEe)&d`x^n~h-e|1b{9My3>cyioQFv*uOzSFJIPXfo{~X})p84Og(0-A zXCpsTcmF9xGE}h-tKEce6M_80RQ_9dZfKyQH?9m0y{#aX7twtrpf8jylFTn@qH+?O zfr^^A$3oU_E!#6(Px&ZpB$PeWc#JldsSU0qcw5Si5lSzS3JM2NZ(PJ&EbYAHp?+u= zjH=igr=-xZsLJZ=Cd4+U>2M!RK!*1Dj&k6P8D)2rChOc6{jsEj?9nMVb(A@BY2)UX z0m&ORWb>xTr5Je3oW?pjo)v0HyZp#MHDQ^)Z3;v?nhC=QG(hCPeoBe^2_qWCLOB|) zQ0nho(uiedrP>HcLx$J9RU7Z?4nZ*hVdbBVi0z{AG!C&a6r0AddJ_J?1 zq>!~I=)b|l%c8z3`b2`CJ2JBhdL>fsYV2~`Htv!i>zpE>L2`I7GRs-JWi+$yIEc_) zfwKqfElEwA8UR7V!Z(UIRnGp&B%u0kmIYzp{kRBjBJxqmh zNTd|nm#rca;3>|Ip@+;9h>OutmJ{_A@>+Ou%h+Y>P-37#OH!pXG1ALZOO zQw@`F+vDZrzMi*Q$HLYP@7QkhejrP>MTHz#@b|n#?ymb{7I9`vB~qDpiagQ}LqEo7 zl#dlOqKPjufDKZAMKBl=+6xW7`-Ei>q^+@wgm5u}nsFVF(Wng2Pc$8-D4tu@0Wt}% zdW`})&**?BU6D;v0|Njk%EV_qr)0OyNGdhW;ac)(l9}kSDZA{tUHwAGgPO~JrDt34 z_j-#UQ)v_JOnp2%{QZZb9ibsOK4mvFYs6`SIpJDQMN$Qk1z;4s1$oOr49Y|eV%xCT zTD-6x4BeAIuvyiPILV$}?JJ>1S$xkDe6fQav97F_Q6%afVn`?d!*Z+v9_gZMyWi~8 z99^2BMOnP9%6TzE`x` zS&D`RwLdfJ$5Xj@D)q7+u5xN#-98UQL&axcJJ>M%@O||-DUx(HcnYK2k@Bj>dHs`<%k%gGm&lHzJ@T2N6xljay<>csP;x8 z=P`W8#Y2;cthLvJWG)ue{T_PyzHI%$)o2xes1fe-IJiQs32t$?MtG>dHBLGxCpC+b zage?G+lfMER9)bQtJFO6#oQ;9%y9E7VOB0k+z>3S9A&K!PJ@^mw zf!w<hUZrHff*%+kR3-PgrBu0^lZ zN$lKQ7HNSpixqk#=qROTTa6@KI*7}r2s3!+nbWxCAvchYuj z&-V<{@5|wCdb@+B%LzZQ%cc_PJ%p+km(dNSXTpP_{NS!qy{zxE#+PK7tZijkn|GsX zzRXp&iVsvS+`J{u>GlNB)f=AlHQJ1i3DC8>~LA!CN2cdvt{Rzpa zPzQZK?P7U$>vg~Ho`UFTns9Sm#dXUW8F1! zmD&}8X>0LNW9+OTOo1_-)R#%RCAQKE!TE3+_I7ajgK@De@0k&#dB@DVQddI@6MwCz zi*Cu^XpdeL*)J)DYR69B_Zvj%Zk5PaTs6|sW1IMB20AF!`^9<{7B>~x_%7_|V&8#W z%ex$`YmyAAm{HEF*G(o|8tO#i1HsQVw=k^U0POPy7vOk5 zK9`Yg#i>`+*w7deU!s0rSxS@EArQ5n9a+qboB(lOOJ}ejpFU}6I&SmA9OeHpdGEz5 zJCUBN$@E1DXQ20#o^w^kB`CT`vqyvG4oVUPV#Y8bCtHz~f{L=zPOrcQJUq*%?T}h^ zd+lc#^AE4r%p%{(6K5;E^2;vl`TpGbZ>m^{xFuRM3g1ktteD2vVP__mJ6pF>I&0h< z3j~%;BIgOle3zD$cA@X4X!fEDD3cup6`4*_s@@mkqmPsup_#*Zb`H~ypygMF!VDEG z!W?QJ3>9E*%G8f!&#DK=$E7 z2Y16Xc%za=4^s+G153BoOV2U>h076^%?&g1$OuBsUgUxe5Ybk29FB*toNt?kG%mWx zgcACRK>b6Xvk_?8hC3t$4qq6_1rH$ZKrv_e%hH+LJz<+gL%0Ob4ac&V~-&iAUqJN%YPVsLDq)I-Ho7+CcUts&1G9e0iGU1?^v>c zM<>M>6f&mIghQ*b`mE-~DS1E2X&Z)`1=GjOziB$;K{oE4G5{w>$a)QNkbY`^hM>KA zB0a=CeA-`Vm&76H9&&S`4USgZxX4L#U3|)?%L@KRgyK!^g%L#r98PcG2q}>BT5dbF zAzoW)R%DNl)AX2NSjIHPVvW&T(N?XY1g<)j?2&TN-9}U>x?aT*Jr3@`nW_*%}hIGxg^|WoF_v!2mZ9KIL^@cAl>f0 zq*&WQE##DcLRONR9O;4~BX~NwB=%l3KjfpYm_Vf&qOQ=A=DYpFzOBU$2g5d-XU52% z#~SP!iLqiUDAl5rk{xIv#KzCh>e#$;z-g{Jwx%JdKE^I}%^pI@i4i%Bx-uZ1 z+nK*lEoU4Y3SQ`OR>bhl-@N0Yq+*f`y@A^xoVP&gRjkNCga?ZHeSItrB@G$6!)vT1 zT@~vSr0+{bu!+F1Q*tMYe!4fs>^jpDqtuAJLy6MY?dzgJ>^vg`XPsAY)3x!EsCr{E zn3yX?$>b5FICoN3{jlqL=+y%*1YO6&$+N9~i!a+_p?0X*g>++Npa!bK!Ye|mm?` zCB^pn!odE}L_Ei3`4i~ANF?w`b1qSdJ}&j_bZ;)np>O)p`sS(dl*$xQ zYq-2I;ac%)&C2E?j4sWM2I_?Km94-o=8b5zy_ci0yaqjluY+Vg*B7bLI#eR=MVQg> zc^FG((mT-X!?zRh7dWO*1R~!Rg1=Q-rd3#qQd0q6qt1sQH$ImxCh|F*gS>IUYJ4k{ zZ|R)exR@BTLw_{Xpg86wTGZdw|CwB9QqZVs@a7w^(qg@bg%=H2t#joK%)omQfb*NP zqM1rm4Wdma!ely>Ujb|V@x3Qqmc+sMO|p)4XAOc7EtDJAxPgo>Kax4(3N9aF>-8n_J6UPa)>ZX3b}7>5;j9^dVRL_b=93H zr1v|@v$-grtH_+j3r~;sOk!L@`Rd-uBj~2l0NL&!1idQV_VE+b644CklKxA?1Q?*s zk1PaCN|*ckoqC^;cQ9vcR~1iij)WkPpLqInu2RaXn!>rENHyPm+lb^0z9Pld6ho2R z#n}}h#c7s`yMMc1TvV3+2 z$yMby_;<{EFQEtfW{1JLohGv8RJdA}+io19bg8Do6dZrjwT1`1bH+r8X@%G=IQ*xnUV z516Jny_7)cKIUlNgY}1OI>n3dt<-Qb`ia4InQPnmHVaH362J)S&{|b4Mn1;E29E&1 zmS}RFE23)hvRNLpKC9A~*6udnk<^GEiJQ4>kwgGaeI5O|KOdv9@*yiUJy`lb*d~T` zRttyEkxhye9nIr0RzC~3( zyQzEcj5vfBpDMrv5Fvp0=qz5A$yEiNxt2@KvS^ssX8Zt6iKN{q4@dVj9^MSyTRnbt zpF-ckZF`@a>P1K={}QG=7pQ^C$Q20fx!yaTjP2M!i0xp%AQ(|{XMX!dV7$Yhd#hgF zJVvXY*u?0G4`OfV11YsPw1OU`pnk9u;|XHKNW_CIBAS$~Cqp-Elawj$U0BZf(Cbt8JiUPYWU*>opN67YZPC9Vq}8FPI!a}?a#Db6VS@&( zdjk?M46Hh?d2MZCe$bLOc2~1`?*2IJMX}dDV_1QCwN}V=thK`%P@ib746$sn_U~bU#srOtj9X z&rD1PbPS5>W9)rm!bfJeBnDLE+U+YRT0b$v*lG9^dW4$HN45bbxfogkE)|hm({D_# z)L@m>qb#ejSm?Z&H}Eb5knXu?XXQ`;vFI5OE1c94jm*QI+M*8nD9D9gJkD@R^u+oszlwcU@oqX&65a3qr|&G(5A zoDgE+7|{y$5|Rysha&Ofw9DMZHmdP$M3^^)5-eCvTD9k=Kb=Z#_O|x(;(cDbdL*AT z7~n}Sqotj2S)i8uXibYC=e8YIm&G3;OmRk1kF!vceAtXw&-PMiO>+a|odi~e&2m#B_!15mUQ-Bd*gcb zxy+XQC(|lC0kkD*S=2>OL-$&%D*r{}i@d}uvS^cpAn^YD`ykC*%y)76<)|fh7ML?S zai?BTE@wN;c%HSJMop8=eypoxXs_2+Ne4(?PwW;+WSu&14y6w@JIs*j)}wLE0{bC! z;Px@w3!T?h2otj(FR)@d1JkW*Z}6eG5p(lDM%6Vt=lJ#b99g!|e3pBq?wXfDR;;sx zFHrc|T@x?q4Ux@$Bd%QPF-yO|2QW>~G;7>u0mqyZRs&^(#pdzGp(#}z<=tJguLrmpxiqPm@HO!AWZbM=lNh@NJnL>r!0n?>@EJoh z_ZuK190$hYIkHNHn-JsCUkUZPZdvxYCN%5kSETqC24CLWefm*3)~#m`TW^I2&|fj! z-?y9nimv^#f86x-SNZF+;eW(-}bxi_iui;{ngJe`<>nMzW^CM BMP~p2 diff --git a/test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin b/test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin deleted file mode 100644 index 9fa6ccfe2b5ff834dec8619ce7ce41edf11e8329..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7448 zcmeHMcTf~rw;!S)L6E2<8OeD>ktksZG9U;@907@fB*|HU0R{m{k~28uI7k|@gki{t zfXIk|NK#-x!jSoN-@g6oRatM{+CO(s)#*NcZ~yMCbNY9?&+S71000=iEAe-I-EH_q zMf`Axi#mRi)Xvh&yHzteR2MwfZ3fj+0%+O+04vXkul7dP-mTN;hx>K=DCR>cJ0%`m z*B?Kid*69z^30B2LCL*WXufcH#w>u(-Ymu>thr-oE8OY*_u+q&pJ8`%=NS+q;sZ=( zUyCwf(XXBRQzvy=9P!oF@tWtYx3ByT1^f(KH}f&{Rh*hd(4|Lw+fsR7vP^M7Tg-}| zYABgk>lv|q{~z|ZK7#MZANuFcGkYCLN5$BQMbGZ_gW(7(jq#JP#Tne4Rx)y0=)y<`tE+_lFDr z8vn=$`~}kf>hk{Eq^0$r|9CHp4Gipd<)Fpo_R#qfUDOUczS(PW)^~YWY~Wwg{v)aS zU#I^9X@B**^0!G_{*)@a5;5L>PqGodaEEgL$#8{!7~bKEHpY z^%qF{s|n5DCT(-WIE8=XXI6dEHzbCEh9^vm1+uowyCGLu{H1W7(0bm~5dH zF^v!8q0A2X*K*3>-FVBnbS7=A8(b#3eNk6F+BkxCME3otE1N1-{L8)jIn2}w%Z-(9!j~)>R(bTL8$*L*D>)%LsM(e1!95s62$l=&ZmnL4$#q5VFFW42#~gF!VX;m%0vd&P0a#@)NT z7z3EVG|gCScRrIM)Q>EP+1DLj6&Nxx@Z}03#rwo-lXH@O?(p|W+&ecxhCIcG7hy=*%ujT4q+B+WYWQi;KG z79Qqla&|MPwpaUw9**(;7G}FsyqS#m#x)QD+bw_YR5kX(o@uCZM8C})$|Lrw99GA> ziU^S6RvYPAhO9mYP$gC43# z%W5k!hDAPj!Rtz|XHk6?tm-1{8o9u8m)c7OuZKKIvjaN*<7 zr6)*jQmur2QTMh6w-VT&!{{Lk+W9i6f}ROzUj9J17{-n{3EJI)5* z>&%%&Z7R)?gHRxFq9f!Bp~T!xn11#aw={y4-I!%a)$2AkQsmX^ey{sz8a*GhiD%d%IXtryk+3bvC32pUXQABaq7^ zw^C8a#bzbBMXdca3`CaRnmdt1qmJ zQ6B^$+V^OAy1mm=+DHkFVq^%n`)Vhuy}q%?wRb`iji2ASdN@oWO3kEeXix}*gygwP zJcT*48gYh;Fl3Y`uTGX9>hM(QI|;^{mT;8g0vft{d2EZep6nNePL~Y>L!WWl@0e&B zO>_A8%7Hc6Jk{S;)O{K7ZIkvQA7f4LxW;~y!(9HUx7VPtGo`tGLIwvBDWTQ--u#4D zBrb6|&icmr+q-D`BZQMs1>MS&|WC zD>!yKe)sAd(5tED^Yob${Kke=8Mih$aE)Z?<}URvN2S-NFjwdt(mm(Z#!_ncgt`ob z32iu++7FE|^;Uu`1K3Y0t`<=HJKb;9=NSihMuWy%zL*gQj(r@`!OEp4ut;50ipiHk zS(1LdhM9VUudw6k-yGQ^)_u!fo+O2>DhcVLHkKpN3`viefexUG2F{c@&ToDuRAC#v zba7K<$4UcVqYw0RSxmiDW{=8S4`Hv^(F}7cs=Q204nAW~YzAf}(>FIQM>XMXu;eq~ z)l2c}QMC&TTmu<6Tp)7!zT^V2C`AJ=&E1}qk<}efyYkrHYB&nLwJ{+b&R1n)gvjVh z$1m82$u}Dy-h9>Th13YP7fnoubhDs$cYw1hTpX_i;|`2H+bK`{qBO*o?WdmGBWGje zacKm$9BTnxm3BN~)pQ1cPpV`@h|*z(l;Weo*+E4=@?>W~=_Brqu&rD4AfVjggZL4l85Zyptz4Ri*Bc9i>5r-bWaIc6dj%L=={0 zgfhc=xRVB3XuuqMB+Uxeo7R^OX5=?&WCW70+6=@FWYa*^wzb7t5qgK>Qk>_Tonss$vL^P;vgw2?I{^(T znX0*oPzWyW!6=He|3lb?a&h;sADhLdr8Ao99+hg@2)`9jeo1eCNB%<=40N!~MF-}O zsfobPw~$Xdh$w`;?+TXUjapY2iX_?3N<0s;tm|B2HtKhSxvH~H--aFczooVla+bN1 zq)QU*Q&Q<9GA#?MG`KEKP(xC|Zsia1`P9XqtmE;BmJF-vD+}s%g)k{ z$#^&+p=G76m>J#GAe5Q~4V2;?5E;RPR(JH0BbF?)G$*LrR%vj7^T&P7ci>jZ{j$QO z_C28!lp&7FG2PDW#^Li4kn`$fVbG0>z#cHD>>l&h*AqB9-4n^+X&(rCopPb}T;@7`x+p{Q*)<3Q4?ZFElxO5m; zSKqhz_DM5Uz4-OyEdlV-5?6Hd$Wn>9`lE4_=QiyN#*^7R{S2{~yO9}!$ zpWAm}4jt8ll!w=lc$q#v4p}6A{W4(00147s{kWeGeqCsyQ6W)!BHn9=K{zGvzE$ao zg%jBmHKyt;i;Fu|3s7IJ60vq1E?{XJ_sJ@!d*J)ynQ;NhWT8(cw?4TjxS?(}PosuBs7>ph1K zdIT8FKegfNw(l}VY0<_2M(*c6f3)uHD3R3Q)GK_ys`6${8Ylgs4l-52Qmt=ZWm87Y zpM5<`Z4W@8Dl@XeNUv})j2=D3o9r!Jf4Q!<*0|893n>o@Cqfw0tR*uXkY;T-T&;vC zKTq=5m&}gMbW@Mf-bgL1oB|W=lJFTT0^JS7J=yhr>oZaLAPt+hErsT-8rMRXI~K=Q zpF|WJhG!Zs`7dL3r4(;)+qv8!1SiNiI0az>^>4~Aalq4q-gEDbVj}3ChQ}_gWsPr( z>NIEppse$2Sbh-wd}o^g-IucG#FKl~r+bKid6#Y2p-PbV+}^y2chNV*6LdIL;s8_t z)v+Pyon+G^<}Ah+J^Y!2`r>H0H0MJ*q^6pDAO_9oB9xgZlyYPccfos7CtpZH-bl;p z6Qp*f6l{|EM!|k##+s78OOAz1(w#Fw2fWMxp;|G{-Ot)>>XafFE$M%bi?k!p)+$=a zo4M#6ew!v6*s01mF|(GDHSUU2us$NKzN0OA7@L;r!``&@5@kto7+GCL(as{mHm}uB zs_14CiC05%l;1j4X3a$SQiOa^B>3}9)c-9tMAEv$BQ$|gufIyqX>j&C5H1;6PMo2O zB3c>sIZsiqtKFvXbn*4Q9cv4P+@I2J#_6@(L(q~hL(6gyDm1xrxPNw@UL+T&1z zIU~bTykX@5SBbrB3zX?)q%De&m%w%x9=5*NT8sWhi-*nLx;ZXth2S}2qTUS?G{4*9 ziuIOVAFr)zHqAT4q#Zg;Wc zZ#J0^(6mqA!*xv3$*oRUC%6lPSSk?~SF}GvXEToD4TABSLVTd+(%nK_G_UE!tyF&s z{Ktf9Qmtu&0R})tSlTZ0o7B6r=9WvnG%dQRjU|LtkvsDqbV4)Ngw0s4%mpF7@~uG} z3!`K7j;D)1w=hz@sgTu5;u%JK^j@}W>_i4$ye?h*IKrVc+!88;0 z2Em+06kok}&#~nDV+LvML~YnpbJYS%|GG007TGM$@Ycde*sY`&q^{)AG%+s5P!f=4 zbnNMMt#sgHEp@s4t5IieG6go|V@95l8ncb6xm@)ZRQireVe^~!a>-FC0*WrYI>6DR zYL;BDm!Is{v$=dTSihj^ZjU*nbOswmNVy#}KY0Tk2=rwh7DDOvesg);cPdV^p~J23 z46k8^k}oS`OdqIGB9zLLi`b|V+Dnoo&z7&Nq-yackd=V^BQ(_(h}4**B)P=^J?63o zY~> zs>f}u2ZddFWf!I6Dth^YN_vg_xrPwW^edHc2m&B$nQ4dBE1(n|wI#D4a*eOBz^01l zv_WPc&iarfUEUXDiuMf5Ih3@*@eUlbLk;u- zAgaY>f(noiRR#Kviy{oZ4aV~rRpW8XS2$Fp`_@jg&td7D^Qd#5F{YZLSWAP{LVnELrb#g$3glL=pqlofhAF&2n*>eP+z zs2oDVv$KB|B?Et@3Wjtu&!NyKvC>w~@UzDSdBz_puD^Lr`z;zn?xnxW$)RuolGh26~I73xI*{;bq3?rxs zp|)muA40i2e8&b?$PJpA=}zp2Jxjk;7Wxg~f9=+sBT5=8mp#1Se~&;k084hzv)5*B zM!DBY5;moU>iUWIl%0}C;2AX&nhhZZFh#JDKhemo5 zY`BULPf&lAz*{*Z*CI>Dki}qOF{QE90w2W$xO2?VLp1dH1AX(omJZu=PVqmH7PWK= zCu#6ulZbfhmxw3`ZI^DtLchbLD?`>DCaHZxn(Hy)JAYy6oP~Q<$qYtLNOsf+Xk(oj05^K=RrIa&LGy*9!!E{0Fsswd8>b8U)-NJJaXVR<1l9=em}` zhLEgpEs*9o7M1j))#sdgiebdI!r4}X$mR^^XXW!v;08H|ED&fynW zYRxNg`;%)2AzPOhr8#CDwZ|l*;-SG8{FH=&tUoz$;6DxUtf!@*?DTG)idS%A$1Rqa zHjkBHO2O}?x^y2P6?B_35>3o3c1*O&_e{6gz|EPkS_4SdJlLeq?- z`!KFk&X2L@k^=^{%zh>__(d4o5TJ3TML20b>rI1VsxMAO?g*xr{FVE^`FJWB=a@!I z5r)~!J;Y96eL7>zq<5=GRg}Hv4iijKFwB+PKD5(??HuL|t8|h7<47?6(meh+0z_gq Y6BuD6dz(Z?U#j6PLnD=60F&e#G!Mh-$p8QV delta 1002 zcmVNCGPtQKJShxO6wDY0hWRe}jO?rU zVrw~$xv}JlpjU=@-QH7f=(ldJ+^o?zZyUq77fq5=8IlSb@M;Xj8-f7t ziyx!U1M@3PR4kf?##ywGgaH&Lm%D9Gn5D<1(QWw}Tpcsc@0GLFP|DzK0yIp-gc9xHW#oS(CxvrFtVZh$vgR5;!gjin-AG+J*pv zmG9wb*VTXROxWgz1H+)wIt3--p@h2SR;)*8H%m!5w+E#z)cet?q;{#buquA?w9y)1~Xcs4u@gJa1L%Pw&c z7!h}Y)ehID_XHfkOU*dh5T_&ViZ%4}MQ z)$)Bzbmg83cd{WGw1kW|A?PHke+cU+T0@^<5*9U?V_6~B3r>nXQ3b|CA+TH-Fl#|a z=l~gtt3b47_VQ|XbFV^+^nG3{N2D+JdT)Pa_?|>ZRTIeu=D3>!sLoH9$|i)=29XB` z!|8XO09M*VAJQu+Rfz(u~4FJvHZXJB@imAoQ+*PcOPLt~Z&rHq(*@G{TW)I6>fuap?U;93One0Yt62Lc1Q13v zo)6L|X$vJlp{E``>O6(kbiHBP*%sv~H`Y#PBfeo$Hh8Y^TK=5VDEiC@Lh*W)k-a~z Y!D$ZpN{}RqgSPwJpSjIC3zOs=H1Xx?t^fc4 diff --git a/test_fixtures/masp_proofs/F3FE67606FCCCE54C3BCF643F0C7F5019CA3DF4CB89D10CB4E38DA9CDE3A9A0A.bin b/test_fixtures/masp_proofs/F3FE67606FCCCE54C3BCF643F0C7F5019CA3DF4CB89D10CB4E38DA9CDE3A9A0A.bin new file mode 100644 index 0000000000000000000000000000000000000000..b10162bc6b0288fcfa3d1721fd716ec3c2a776b3 GIT binary patch literal 13799 zcmeHuRcu|$vaK;@W?uF(Gcz-@moa8$cI?=WnK@==W@d~jj+rs$n3?hJ{ocvd{kYQp zIOpeT?~+=j*>kE&Jv1~_v%4A`3=9nGkMY~U(N~(1Lu5t3>X(!Ya)gSbw4caDqVOST z_xK?5NYpkJ!TxGHjbsEgFqB`y<_6Y<@zqx?aQu_P9&w~J)FW46~yn5fBH&q zA$a8I$r2z5*PbjY&=5~L>J{exz$BrTdIQfVbGl0t3mqX=0fCUW5wWjeBGK8(xxZ;F zU&l%u8L_8?3_Ynxl;!CDbPid%ucF0A+5QoLJADU+5H?6jz%{7kt08=sTfLoVn3I7L zpOYNh%&xVj04d)rNElPkEl6S>gf#&+{vJye|0VKCACC<4s<6+7lTCN?~B6vHey6(wQU=$vb>o;PI5O*kPp>Hj&UZ_U`QCzV5UABtspsb#4Ny z%YH!O%<`y!K>NJ4p`Jw~M)u8#1noR`5W^D^T`Bm5YuM<06n}zw4AFqZt1%rxamV*W zT-TEA{41x|Nt#I9y^vS{k3Ay-6acUx_BiHzq6^XnwT;I!e34Kdd_YeLw2{b3DXwo zX%eO42blUPcx<#;Lj0TA05Nxp^c|4%HQKD=^=cIZhYAgCSkTE^E|7^xiuLoK@Bu}V zM2eBKs*bEs-OW_K%K81i9-j9Vsjm_fwH-x$ZQuYt)n_zT88ie5^DrOJGAn+*{Uqn2 zHUujeh!6HD=U93GIhc0oFDFJu4o-M4_%M&5&kw#;#cAE zPt5M(>I8tL$j(`DN6B++`5n%#wuPA-F^idpavvU3#n92dlkd;~1`pWA^oSOT{#z20 z5SgJujUZYFMJV`ch16hm6E1RXK)5T=lFlv`^zuvh@mt9~3dhe9ZhH+yZxFt5DaSy8 zIY6d=>HPYzwU-5D9ZCAl{T3#`d zFr;j`M%~h9)2Q}NSh9cgk&~yNqfR-dK%0J2xr&U ztuTUptlxifo7VNK-v(ep z@y+cz0qAG8bnI0Llpb7%DM`jk5j+A^9e_6X>>Lw-V+=caHh^FW(Dlty(K@DSF7*Q& zX>?xl_lD$`^`Dk3Nvx^YyBYw`7oO47}*jkKFfk_MioBn>zn6E zA$dy;(_*3kcqXGU(k5~{>gieDiKpfEX~!IY8DNydX!d8hw}}-A0xX>0XX=8>ho+Hw zDO8p>vK@a_$#RRO?R>L1vm zPe1?{R2jg=K>+PsB|gL=KNX*Y$6!@D~UA@p>WJj?Y$9F^S&bL)h>v zlMsDbAExg1Du~=8lYIM*4lbFztvUzf{X z3jd8&Sa$1r~@9*!6 zrybbCtXgt9Oga={?rmW}V`4n;R=;5vdC9Vp0Wkv8KLPo`2Ys|Z znXMQ$6-vKketyygc&_zULLYEjg+6~SW}6h%NGarId{JZ#wu10{S!?+W2Dm&At`;_W z6O3w*HRHc}M=5YMQ_@snhnFCDLZ!$+VFm1yr>4LZYd#LnJg@A}@F!1R4>M0B8L79Z*9;#|JBP;t-SOSSH`Flt=Hdrc@yFrm#4O zFaeu&lCNuzOE!VKENNk+tmr}D=yc0kB%kiFAZLdmH5K9hETaA}=syPdU;RZ}70FS` zuXOUMjPP*2j(B_=EHNmxHq}LhGXjto-)7tymn@y*GSIt4LKOX#h=EbH(fpZLL%Z+v zBUCLBX^m>$ErA{RfQrny-OK7uh8F^q<)lH-U*&;FXWc3`i8^qjF1TRk_sTqJSDb&l z&>-P`CH!R8Y0eyb_h*)W>~D*}f0EQRC2KRoAR_>W!^z?k*s>Mpdhh376M6f7U#E2Lo&smUel2K#QHnY4_YbLmL;d&k|4CBA zyOK=oe~F1qBbUO~oGVyM1I<@J3Y4HaE+XswvBM_+8{vPO)X|FiTT_u@1xrwxJ#d>z zUQ7veTECJdH)$zo#=O&ZHvS>?Z>ayC{y$0T`K4p<3e;4kBlOM;i^LfiA8WG(0>K%} z9^jQxrd#6qZ-oDCQiH&aJE$+1P_YWuPPo?!UtH95X(AE)xL|zO9Jc8IPydkmH`ISm z|CglR{4#S2EXOg!XqVE}tJ5iHo`-i8yuJqmROmYvr`U3_r<2ChFj8HAk2n0wn%d4= z;ZKyyR}W0T&u1i8`A@2`$=Jb51jfI#Xr4&dxR!cxVbO2=RUQcE9|-bisq05YxyB}F z93QI^t&ojStCt{CjuNJM16tAkA@$#r%)eLt=g}@aRS9LW+xy|4bN~MU8KPeqG6Xd=*S)?Kbkmq{z+ z>US6XHOE+~oX2-F`a{G%4B;9_T$LINFQ)j>VEKW91^0zU1xC-qt`q2`8YQ4#=!evZ z(R~x!S3T$xk1x2-V!p=?Ca%cZl1mLIuE0}XX#{S8yx`yGlFQL?)Mk?}eTf*>+J;$Y zYwwy*rlI$1{fq#*JnaI02r;@}%{dJ0Yy0-ivCzB*o&silnNXgO0r00;GFfgt#w zJ0_wl$@dR#*{gN8j;GHCgYR!N&Pg0xDTu&ze6@BHHAImgsyjS)o;m6ohRkR^4!B|x zdf#EgV(4h;$m~zew<;WDq!p&v8ED8YDZBBJjFQ8BtRPf@D0U@A#%i&x9R6&AVzs6I zmZ#9hpVOXre+62I(q%oY`|NbJJK!}--FL@mnlf)W8upa1)Ump#1&=Q1kaU+<&9mSr zq#i&FhTdij(9L>_Sx^oHa21Z`-;DV&1y#r-vl9zCrh z594)jo3zDbJzn;MB~6Gz?5%eR){-GBiwO=vuq@~4NYqlZkH%^V1sZ0)F} zY-Oe`lhaicJ@G&4F(DpP9!R~K&yQ0bed=F~u0!M3KG8eR(7bx}B9z&~N%#~_0%wuy z5sd>KGiIPQV1n_&7uc)iANlxMbtp$1zLEhC#w~S$e^Qrjt<2I#;nCJT=j%k+X3K!R zRqPvcr5W5T{iy3|N3O{NOhXjcQ3pl1cvdT}4Q`EYK|kB{QE`Xw{_I%~@=vmlRcJkA zsLvj((Sfl#Ei(}6<)8GZbOSWf_)(SF1wein>UG+D47*&rEPnZXyqNWx`lu3xE5zP_kd?L&O$Tqx~J71lv6c1|8Rmi?(~OPWF6Votuyf zbb&yfVwRrFvFD|JA2wFGzP;TblY>qe_6iX$7{_fS9C96jS((TXI}6qGtw&HpXl9zQ?P8Hfdzwx#L0b)M8LY5W_rRl?2JZHNxTl2C%J?82N8Tipw2!r?wD{T5-WgyZB^wBWXp%<30tPSBBY$~Pg+s{u7N?R67_w>#zC9(xGHIP5nmF7Qb z#$eRi7ljjdPuttqxj%@jOelm;Djqi!05VLjKn#ASTjtX@3+Q;Ytg>wkHD30@>#3?( z#jU;E9vq0P*IHjb+DE9gU?*G0U2AQO2Vq1Mw+@{?VyncYeQ#^sX^=r*KogbXmF71~ zR0fZTX@|9d{zY+g zEhr{hH#WX82aRhKIw=PV&;2!hbF;o<{yt0EDaES+aSg$(kMU0i&ZZC^6InBn<+6 z>*bTy)Gic{I}xbzkWL7_OL(f5$p1p&EDTZ&Z$~sZ#!PM7Ae}2c*pje!D@d52W5H7S zP;uBukkki96AjKPXA1>)|`Fgj!UfVm7=WuEno&WnE%C{e%S%ByqpgtNH-<(QbY*d-f2i*y!qp~d z7iT+FcqCYpWA_hx3rWpHh?sX~$4R;vwJyf5wjET-J&+}@@jKqDT2quO7Fh@Vpd%?e z!LKD*-OrXZ=65MlcBKm(ek|$>7v+N{W~8=1`?LW4V8XVAs=p514B;&7(@BG4hhiV3 z-x^tQwZY$Tm8doSYT!at|(UcPdCUxfSY}- zoPK#wBzjO?`Gi+N(n!N>Y~m`;(-HlboN$7S3W^KRu$x8~4;yt4fp6z&JcLcvWP$Fb zL+iLB-&Do~N6__b$E?V_^|XP42n*f3JHeDm>Gu(bkMGJhQWOPzy#m7skAaAdb%p~b z(nE5N0z)iICG?c)zD@Y2r@1CN4u0nXqHHiiXTziIal`gBCnL3mjR_vGVNxJEziuc^ zq*Jy<^DVN{s8i>*Z$HnBAxJNnoku&YahR_O6f(EU#{D?Z=cBpmGO0mgdwR06(py5p zB4+Cdz2nGp4|_i3RP?VHgRi0wJkTTf|ZxZ`2xXg_0dQd9XhHQD7#N zm$Y4mFRy$d%!_k03EjKcBo=Y1YTsZ#2-<{h;`L z!hE~oE&NSfT3_gmn;Y+KTUU^a+)nHuR3dp$Q*0!nP0DbtMn=g>hp5V*+2o)FI0i`j zere4@7xVN?S-;Ju)?|Ev7VL9>@d4#Jpno6^z4aZvm;1z)QffJjRy5Gj922s>J3n1S zj>oNU=X(tt9%T7-Z_r|Q#0hCW8^*7k1stsNwNIw_ar&J38)F^!S{C20lGODETHqS2 z0Gm3}KYid`XU+Y7aGrU4#P?tA1rb%OJimH81#mIf7I2(yP(OnY;>P+m(xa>(*M*b! z$iA^HVAYvp4b5v|%Vi%Hx*lSUb0T`c7ful$(0FPrv%>U0jnmhFAu79D{Zfd_lLMma znysD-9$n_yq4*YMo<=wc#^;nkJr4{~jBvET>swODz!#&C95tTQRwVD&nfvb@(6=JO zVqHPZMp!sjk^)2IlB{k$*K? zIb)?l!}e?M5d&{jv4K6{g<~uk@%%yJNnW8a#bdGxSKTH!3z>0Xbl|!gI*&bFZbq}w z*n2&Pn#`H|YKol}T;U0MSuqdZ$J5Bb*$B(_MKpMZ&dzNY|4}1LySuJUQ#QcbFBu|c z3i;2O3dP-nBN_cxjV`{Hd!div(Ge4Btikz2Q|xgp)tVm{ejE;~h5ra!;l`?huS?Hh z4;h@VHxkbw&fogHW+(yNtb5W0w0qWp^ejYDAU+r zG_ZhX?$qIP>7VV`{^7 zf09%E24IFINP@AC^o;T#w-Hd>>9wG+rfZ3JMR_zx-HKFjJsenPjHsN_%t=mfTQ^Mo zV%^cBw)`rQ7-z?~^QF-?foUKMlseD3i-rUH?kNWkef2fp9rs+=KH&2)Vam0l8PB=6 z52yfi334jcvG=x9)K4+Lf4$>u&t;6?%OA$RPh@_s)NnaVzAUY~xpzgQ&QTY-oIU0a zo8o`=E`w-iXJJTpCVDl@N)+8qp2_VLlXhrnr02l|$SzJ|CEpWjvK+j%xx;M{VqOKgOLKi1at ziU{&N|JhcO=k01(xqd9Gehly`ntrN3DkAyoP0Hs12p0m=H^co^)xha6%6P_{-9W(> zocBxaC}nZijCVC{Ux_4lUN45*atEo%!5|_7&K@IdU_17Ty|}dyLt_XM558#NNBQkZ zzue%h!n>T=mkS)%EIH0#)A(;L+h-2Dj>s%Ko6SsC1xL+1gc3D3{y~f4DfaK$I)v!) zC>xl=?)-?h_wfP#Mu9!yDAzLW6xyOYI((HS>7;G2!x+yeZ(%FtBBM%RD_Q2SGy>hv zjP-jhlOpublGyjE$6z-;CyND)ZWNgEXmU$jU@Hu>2_*C-u5-<}4KJ-iOLP#2GQD(y z<%!;ekKUuxx$Bu^!RZw`1imHD_KQij^i^-7xAhbv*olj3=ZGZz_Z{*EVcAV$?a56J_ zAzrRT5XvX$^tFFN*Go$`4c%^pp=TurZ+Z#QmaIr+m}q4u1Qjx>;M-G7r9tv`UUs5x zQB?HoMl+RVC5`VG(KliSkdE(ZHXqeEmh98UA>HvcykBQPPX#evSV5ycA0(`ly&aeROlh-!?Z}H5)-QjgS*$+X##Ta8tOH1YV!;cb9Xg92#v$!9;1%BsF*KPnw35 z5I)GTj`9?!=*sRS0Jgyj8H_83h|0`4Sk zzHz=TD%-0ga1XRSFrT8OsnVOlruNl(eRXlk!NV~d;}#lgMm3i^tULlSGxx4^A~eh- z$C2tdB)}AXJHa?aA@sLbFgaHv4h9 zcV6^sjqUD%mC{F)0jCsvt$f;hiDL!R$EE}{-{X6sp(>Gt6;bJkhKZF2RJK8`8%E=2 zw4`j#N1x#cB8R9%Sb^uKx-Wtk#)idwrdF}W!Kokmvc$LxWh|)Q@0%e;O^FJ>_6Be6 zAcKhnp1?$u;b(2TTtA3^X#bgm{}l0*SAYXk){SFxi+N>A!XLE-36!7vncs=Xa?KdO z?H&=`y6GSxK;UwzCiDx=FBjf1YycGUZRZN|9>~u|oSuK&noW5>l7OIeH)as^6m9_d z%#i5gzLg$&Fm$YwX?gd|jcZ&kl6kL#=#g@a>a z!ahkNqJVlARvjZpUBsIkf~PNaMD~oY#wE)HNdZcL)k!NP)HT2E3DIXX`5tLu7UIVu z^aQ7Szo4U|bI&*4TsAx_LxERz`zwWb{1PG{m-Yu0QCgb=j`EW?&GYrATrK^Xwxx|g zvU>L~w&w}~TI!Q@akhd&_?3SYd3)RX*EHOCHo9w%Q}A3=Xov|9-0YUEEv4m19sv^0 z^$mArxD8tTzowzu76jmkBNnI2X{R!zDxHJ86YVMI^F7WCHNzq7lC-~iCB|EgA*HuG zG08QDD%>w8G}w4R5is8Gjfa^7`1_?-D8~jMRFLF#J3wG;(AE9n4I&ncK*aO)l1sAA z&s5dzZDiKurOchRKjY_!AK?Koj5EiwsWFm7b8(U;xO$T$}Oe2Bj1#A@}1Cz?W}H+ii*#Ud%P zNl@?pWsooYS{rWa{2XWiq6QmT`cbX;nOXQ z#7nl29_Hc73G;4TDKfOydOhIB=om{Y{mD^wAEOA~q{sWe`+4|&zd}Ptzf5y@W>1CS z(V}l@m#T;NSHY!h<7gw%^)E#hT$>7qQ6t^IsI+*UIY(eyvGqmNxpubkb&YEb`6y8q|m=5 ze{M_)B)jp^h?|0H(D)=Orsqx^2G1_!Q_j$2a^>pAv8RKdI|n-K6h?HjW1mMtO%`e_ zLT{n;La{Hl_4%rdYU-1?9ONE0+0GH*2I2W!q9HnJb1v3p0J>lc?bua%!1c=kZ<<=I zX?EvI-iUAz?@**)?LsfKm6()O!#v9ThTh#(E`6DLQ}Og6PXkR;Nh-7UC37~;H@^Fe ziA-3)q3&*)Ke0^i7KR=uQ2LWtY)XqflKc@ld?{Sk5R_A*SoRKiEzbFUXk7BKe4udo zN}Jg3`pupP9S*BgYFVOxb}<(_N;YfL8W%ADDnwu8ht`LyxaiEj3PhO1I>zP5$x1A# zhG22bYy?xw-6zl>W0H1B?d}Uy*Rm=h@6|?Pzh;Xl2}`&4O^TN=tcw19lJ`-1G>Bv8Qy) zws}0L{h3Q@8oi6JRfjJy@0qsi#}j)0IV6qnFOmv20%UbFIM)3jt@T_@;1QIFWNuX; zbAju?KEj1F_A^`bdms||0g(BbGET^A^N4vyMiEP0!0_pja{x-(h?NVCI7%HSuD5hU z%*s>{anxGI*qM14OpTOn7FP-82n{hi8dN-+r-(Y*)o6k}@sL4w3S;?TM!8zZ)eynj zT9n`G;UX;~p3ri2%5%kIL@$21_d^}LC!?_m_J3ZS`k&3CzjxpNY}NX0zu!atYXa(D L{&$QI|9bu}n12XU literal 0 HcmV?d00001 From b0f988fe3d5f197b2a9ab91968cc4a7779f70450 Mon Sep 17 00:00:00 2001 From: yito88 Date: Tue, 17 Oct 2023 23:45:13 +0200 Subject: [PATCH 138/161] evil: fix queries for new sdk --- apps/src/lib/client/rpc.rs | 134 ++++++++++++++++++------------------- sdk/src/rpc.rs | 18 +++-- sdk/src/tx.rs | 13 ++-- 3 files changed, 78 insertions(+), 87 deletions(-) diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index c0115b1e80..72ebe41901 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -205,7 +205,7 @@ pub async fn query_transfers<'a>( if account != masp() { display!(context.io(), " {}:", account); let token_alias = - lookup_token_alias(client, wallet, asset, &account).await; + lookup_token_alias(context, asset, &account).await; let sign = match change.cmp(&Change::zero()) { Ordering::Greater => "+", Ordering::Less => "-", @@ -228,13 +228,8 @@ pub async fn query_transfers<'a>( if fvk_map.contains_key(&account) { display!(context.io(), " {}:", fvk_map[&account]); for (token_addr, val) in masp_change { - let token_alias = lookup_token_alias( - client, - wallet, - &token_addr, - &masp(), - ) - .await; + let token_alias = + lookup_token_alias(context, &token_addr, &masp()).await; let sign = match val.cmp(&Change::zero()) { Ordering::Greater => "+", Ordering::Less => "-", @@ -325,13 +320,8 @@ pub async fn query_transparent_balance<'a>( match (args.token, args.owner) { (Some(base_token), Some(owner)) => { let owner = owner.address().unwrap(); - let tokens = query_tokens::<_, IO>( - client, - wallet, - Some(&base_token), - Some(&owner), - ) - .await; + let tokens = + query_tokens(context, Some(&base_token), Some(&owner)).await; for (token_alias, token) in tokens { let balance_key = token::balance_key(&token, &owner); match query_storage_value::<_, token::Amount>( @@ -341,8 +331,14 @@ pub async fn query_transparent_balance<'a>( .await { Ok(balance) => { - let balance = context.format_amount(&token, balance).await; - display_line!(context.io(), "{}: {}", token_alias, balance); + let balance = + context.format_amount(&token, balance).await; + display_line!( + context.io(), + "{}: {}", + token_alias, + balance + ); } Err(e) => { display_line!(context.io(), "Querying error: {e}"); @@ -358,8 +354,7 @@ pub async fn query_transparent_balance<'a>( } (None, Some(owner)) => { let owner = owner.address().unwrap(); - let tokens = - query_tokens::<_, IO>(client, wallet, None, Some(&owner)).await; + let tokens = query_tokens(context, None, Some(&owner)).await; for (token_alias, token) in tokens { let balance = get_token_balance(context.client(), &token, &owner).await; @@ -370,15 +365,12 @@ pub async fn query_transparent_balance<'a>( } } (Some(base_token), None) => { - let tokens = - query_tokens::<_, IO>(client, wallet, Some(&base_token), None) - .await; + let tokens = query_tokens(context, Some(&base_token), None).await; for (_, token) in tokens { let prefix = token::balance_prefix(&token); - let balances = query_storage_prefix::( - context, &prefix, - ) - .await; + let balances = + query_storage_prefix::(context, &prefix) + .await; if let Some(balances) = balances { print_balances(context, balances, Some(&token), None).await; } @@ -481,13 +473,8 @@ pub async fn query_pinned_balance<'a>( ) } (Ok((balance, epoch)), Some(base_token)) => { - let tokens = query_tokens::<_, IO>( - client, - wallet, - Some(base_token), - None, - ) - .await; + let tokens = + query_tokens(context, Some(base_token), None).await; for (token_alias, token) in &tokens { let total_balance = balance .get(&(epoch, token.clone())) @@ -540,9 +527,13 @@ pub async fn query_pinned_balance<'a>( .format_amount(token_addr, (*value).into()) .await; let token_alias = - lookup_token_alias(client, wallet, token_addr, &masp()) - .await; - display_line!(context.io(), " {}: {}", token_alias, formatted,); + lookup_token_alias(context, token_addr, &masp()).await; + display_line!( + context.io(), + " {}: {}", + token_alias, + formatted, + ); } if !found_any { display_line!( @@ -585,7 +576,7 @@ async fn print_balances<'a>( ), None => continue, }; - let token_alias = lookup_token_alias(client, wallet, &t, &o).await; + let token_alias = lookup_token_alias(context, &t, &o).await; // Get the token and the balance let (t, s) = match (token, target) { // the given token and the given target are the same as the @@ -608,7 +599,8 @@ async fn print_balances<'a>( // the token has been already printed } _ => { - display_line!(context.io(), &mut w; "Token {}", token_alias).unwrap(); + display_line!(context.io(), &mut w; "Token {}", token_alias) + .unwrap(); print_token = Some(t); } } @@ -637,30 +629,31 @@ async fn print_balances<'a>( } } -async fn lookup_token_alias( - client: &C, - wallet: &Wallet, +async fn lookup_token_alias<'a>( + context: &impl Namada<'a>, token: &Address, owner: &Address, ) -> String { if let Address::Internal(InternalAddress::IbcToken(trace_hash)) = token { let ibc_denom_key = ibc_denom_key(owner.to_string(), trace_hash); - match query_storage_value::(client, &ibc_denom_key).await { - Ok(ibc_denom) => get_ibc_denom_alias(wallet, ibc_denom), + match query_storage_value::<_, String>(context.client(), &ibc_denom_key) + .await + { + Ok(ibc_denom) => get_ibc_denom_alias(context, ibc_denom).await, Err(_) => token.to_string(), } } else { - wallet.lookup_alias(token) + context.wallet().await.lookup_alias(token) } } /// Returns pairs of token alias and token address -async fn query_tokens( - client: &C, - wallet: &Wallet, +async fn query_tokens<'a>( + context: &impl Namada<'a>, base_token: Option<&Address>, owner: Option<&Address>, ) -> BTreeMap { + let wallet = context.wallet().await; // Base tokens let mut tokens = match base_token { Some(base_token) => { @@ -688,13 +681,12 @@ async fn query_tokens( }; for prefix in prefixes { - let ibc_denoms = - query_storage_prefix::(client, &prefix).await; + let ibc_denoms = query_storage_prefix::(context, &prefix).await; if let Some(ibc_denoms) = ibc_denoms { for (key, ibc_denom) in ibc_denoms { if let Some((_, hash)) = is_ibc_denom_key(&key) { let ibc_denom_alias = - get_ibc_denom_alias(wallet, ibc_denom); + get_ibc_denom_alias(context, ibc_denom).await; let ibc_token = Address::Internal(InternalAddress::IbcToken(hash)); tokens.insert(ibc_denom_alias, ibc_token); @@ -705,10 +697,11 @@ async fn query_tokens( tokens } -fn get_ibc_denom_alias( - wallet: &Wallet, +async fn get_ibc_denom_alias<'a>( + context: &impl Namada<'a>, ibc_denom: impl AsRef, ) -> String { + let wallet = context.wallet().await; is_ibc_denom(&ibc_denom) .map(|(trace_path, base_token)| { let base_token_alias = match Address::decode(&base_token) { @@ -815,13 +808,8 @@ pub async fn query_shielded_balance<'a>( match (args.token, owner.is_some()) { // Here the user wants to know the balance for a specific token (Some(base_token), true) => { - let tokens = query_tokens::<_, IO>( - client, - wallet, - Some(&base_token), - Some(&masp()), - ) - .await; + let tokens = + query_tokens(context, Some(&base_token), Some(&masp())).await; for (token_alias, token) in tokens { // Query the multi-asset balance at the given spending key let viewing_key = @@ -830,7 +818,10 @@ pub async fn query_shielded_balance<'a>( context .shielded_mut() .await - .compute_shielded_balance(context.client(), &viewing_key) + .compute_shielded_balance( + context.client(), + &viewing_key, + ) .await .unwrap() .expect("context should contain viewing key") @@ -936,20 +927,22 @@ pub async fn query_shielded_balance<'a>( } for ((fvk, token), token_balance) in balance_map { // Only assets with the current timestamp count - let alias = - lookup_token_alias(client, wallet, &token, &masp()).await; + let alias = lookup_token_alias(context, &token, &masp()).await; display_line!(context.io(), "Shielded Token {}:", alias); let formatted = context.format_amount(&token, token_balance.into()).await; - display_line!(context.io(), " {}, owned by {}", formatted, fvk); + display_line!( + context.io(), + " {}, owned by {}", + formatted, + fvk + ); } } // Here the user wants to know the balance for a specific token across // users (Some(base_token), false) => { - let tokens = - query_tokens::<_, IO>(client, wallet, Some(&base_token), None) - .await; + let tokens = query_tokens(context, Some(&base_token), None).await; for (token_alias, token) in tokens { // Compute the unique asset identifier from the token address let token = token; @@ -994,7 +987,12 @@ pub async fn query_shielded_balance<'a>( } let formatted = context.format_amount(address, (*val).into()).await; - display_line!(context.io(), " {}, owned by {}", formatted, fvk); + display_line!( + context.io(), + " {}, owned by {}", + formatted, + fvk + ); } } if !found_any { @@ -1056,7 +1054,7 @@ pub async fn print_decoded_balance<'a>( display_line!( context.io(), "{} : {}", - lookup_token_alias(client, wallet, token_addr, &masp()).await, + lookup_token_alias(context, token_addr, &masp()).await, context.format_amount(token_addr, (*amount).into()).await, ); } diff --git a/sdk/src/rpc.rs b/sdk/src/rpc.rs index 2d9646f288..88f9c64753 100644 --- a/sdk/src/rpc.rs +++ b/sdk/src/rpc.rs @@ -11,6 +11,9 @@ use masp_primitives::sapling::Node; use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_core::ledger::governance::storage::proposal::StorageProposal; use namada_core::ledger::governance::utils::Vote; +use namada_core::ledger::ibc::storage::{ + ibc_denom_key, ibc_denom_key_prefix, is_ibc_denom_key, +}; use namada_core::ledger::storage::LastBlock; use namada_core::types::account::Account; use namada_core::types::address::{Address, InternalAddress}; @@ -35,9 +38,6 @@ use crate::error::{EncodingError, Error, QueryError, TxError}; use crate::events::Event; use crate::internal_macros::echo_error; use crate::io::Io; -use namada_core::ledger::ibc::storage::{ - ibc_denom_key, ibc_denom_key_prefix, is_ibc_denom_key, -}; use crate::proto::Tx; use crate::queries::vp::pos::EnrichedBondsAndUnbondsDetails; use crate::queries::{Client, RPC}; @@ -1095,11 +1095,8 @@ pub async fn format_denominated_amount( } /// Look up the IBC denomination from a IbcToken. -pub async fn query_ibc_denom< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_ibc_denom<'a, N: Namada<'a>>( + context: &N, token: &Address, owner: Option<&Address>, ) -> String { @@ -1111,7 +1108,8 @@ pub async fn query_ibc_denom< if let Some(owner) = owner { let ibc_denom_key = ibc_denom_key(owner.to_string(), hash); if let Ok(ibc_denom) = - query_storage_value::(client, &ibc_denom_key).await + query_storage_value::<_, String>(context.client(), &ibc_denom_key) + .await { return ibc_denom; } @@ -1120,7 +1118,7 @@ pub async fn query_ibc_denom< // No owner is specified or the owner doesn't have the token let ibc_denom_prefix = ibc_denom_key_prefix(None); if let Ok(Some(ibc_denoms)) = - query_storage_prefix::(client, &ibc_denom_prefix).await + query_storage_prefix::<_, String>(context, &ibc_denom_prefix).await { for (key, ibc_denom) in ibc_denoms { if let Some((_, token_hash)) = is_ibc_denom_key(&key) { diff --git a/sdk/src/tx.rs b/sdk/src/tx.rs index 4792fbaad0..cde84686e9 100644 --- a/sdk/src/tx.rs +++ b/sdk/src/tx.rs @@ -29,7 +29,6 @@ use namada_core::ledger::governance::cli::onchain::{ }; use namada_core::ledger::governance::storage::proposal::ProposalType; use namada_core::ledger::governance::storage::vote::StorageProposalVote; -use namada_core::ledger::ibc::storage::ibc_denom_key; use namada_core::ledger::pgf::cli::steward::Commission; use namada_core::types::address::{masp, Address}; use namada_core::types::dec::Dec; @@ -1587,7 +1586,7 @@ pub async fn build_ibc_transfer<'a>( .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; let ibc_denom = - rpc::query_ibc_denom::<_, IO>(client, &args.token, Some(&source)).await; + rpc::query_ibc_denom(context, &args.token, Some(&source)).await; let token = PrefixedCoin { denom: ibc_denom.parse().expect("Invalid IBC denom"), // Set the IBC amount as an integer @@ -1801,13 +1800,9 @@ pub async fn build_transfer<'a, N: Namada<'a>>( let balance_key = token::balance_key(&args.token, &source); // validate the amount given - let validated_amount = validate_amount( - context, - args.amount, - &args.token, - args.tx.force, - ) - .await?; + let validated_amount = + validate_amount(context, args.amount, &args.token, args.tx.force) + .await?; args.amount = InputAmount::Validated(validated_amount); let post_balance = check_balance_too_low_err( From a5a82dcb8560aefeab84eaae2422207f15274cc5 Mon Sep 17 00:00:00 2001 From: yito88 Date: Wed, 18 Oct 2023 01:42:22 +0200 Subject: [PATCH 139/161] evil: move GenIbcShieldedTransafer --- apps/src/lib/cli/client.rs | 11 ++----- apps/src/lib/client/tx.rs | 20 ++++-------- benches/lib.rs | 1 - core/src/types/ibc.rs | 6 ++-- sdk/src/args.rs | 19 +++++++++++ sdk/src/masp.rs | 7 ++-- sdk/src/tx.rs | 37 +++++++++++----------- shared/src/ledger/native_vp/ibc/context.rs | 19 ++--------- shared/src/vm/host_env.rs | 4 +-- tests/src/e2e/ibc_tests.rs | 5 ++- wasm/wasm_source/src/vp_masp.rs | 7 ++-- 11 files changed, 61 insertions(+), 75 deletions(-) diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index 71ed8d5e73..ec60584642 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -497,15 +497,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::gen_ibc_shielded_transfer::<_, IO>( - &client, &mut ctx, args, - ) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::gen_ibc_shielded_transfer(&namada, args).await?; } } } diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index b5432a4cb4..d939d5691e 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -1,4 +1,5 @@ use std::fs::File; +use std::io::Write; use namada::core::ledger::governance::cli::offline::{ OfflineProposal, OfflineSignedProposal, OfflineVote, @@ -1093,21 +1094,12 @@ pub async fn submit_tx<'a>( tx::submit_tx(namada, to_broadcast).await } -pub async fn gen_ibc_shielded_transfer( - client: &C, - ctx: &mut Context, +pub async fn gen_ibc_shielded_transfer<'a>( + context: &impl Namada<'a>, args: args::GenIbcShieldedTransafer, -) -> Result<(), error::Error> -where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, -{ - if let Some(shielded_transfer) = tx::gen_ibc_shielded_transfer::<_, _, IO>( - client, - &mut ctx.shielded, - args.clone(), - ) - .await? +) -> Result<(), error::Error> { + if let Some(shielded_transfer) = + tx::gen_ibc_shielded_transfer(context, args.clone()).await? { let tx_id = shielded_transfer.masp_tx.txid().to_string(); let filename = format!("ibc_shielded_transfer_{}.memo", tx_id); diff --git a/benches/lib.rs b/benches/lib.rs index c3ac819a5e..b83a663559 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -95,7 +95,6 @@ use namada_apps::facade::tendermint_proto::google::protobuf::Timestamp; use namada_apps::node::ledger::shell::Shell; use namada_apps::wallet::{defaults, CliWalletUtils}; use namada_apps::{config, wasm_loader}; -use namada_sdk::args::InputAmount; use namada_sdk::masp::{ self, ShieldedContext, ShieldedTransfer, ShieldedUtils, }; diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs index daf736df46..a58d042eeb 100644 --- a/core/src/types/ibc.rs +++ b/core/src/types/ibc.rs @@ -64,7 +64,8 @@ mod ibc_rs_conversion { use std::collections::HashMap; use std::str::FromStr; - use borsh::{BorshDeserialize, BorshSerialize}; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use data_encoding::HEXUPPER; use thiserror::Error; @@ -124,8 +125,7 @@ mod ibc_rs_conversion { impl From for Memo { fn from(shielded: IbcShieldedTransfer) -> Self { - let bytes = - shielded.try_to_vec().expect("Encoding shouldn't failed"); + let bytes = shielded.serialize_to_vec(); HEXUPPER.encode(&bytes).into() } } diff --git a/sdk/src/args.rs b/sdk/src/args.rs index 36e32df04a..1ef3aff0be 100644 --- a/sdk/src/args.rs +++ b/sdk/src/args.rs @@ -1995,3 +1995,22 @@ pub struct ValidatorSetUpdateRelay { /// Ethereum transfers aren't canceled midway through. pub safe_mode: bool, } + +/// IBC shielded transfer generation arguments +#[derive(Clone, Debug)] +pub struct GenIbcShieldedTransafer { + /// The query parameters. + pub query: Query, + /// The output directory path to where serialize the data + pub output_folder: Option, + /// The target address + pub target: C::TransferTarget, + /// The token address + pub token: C::Address, + /// Transferred token amount + pub amount: InputAmount, + /// Port ID via which the token is received + pub port_id: PortId, + /// Channel ID via which the token is received + pub channel_id: ChannelId, +} diff --git a/sdk/src/masp.rs b/sdk/src/masp.rs index 0189891377..b2255c7331 100644 --- a/sdk/src/masp.rs +++ b/sdk/src/masp.rs @@ -52,7 +52,8 @@ use masp_proofs::prover::LocalTxProver; use masp_proofs::sapling::SaplingVerificationContext; use namada_core::types::address::{masp, Address}; use namada_core::types::masp::{ - BalanceOwner, ExtendedViewingKey, PaymentAddress, + BalanceOwner, ExtendedViewingKey, PaymentAddress, TransferSource, + TransferTarget, }; use namada_core::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; use namada_core::types::token; @@ -69,7 +70,6 @@ use ripemd::Digest as RipemdDigest; use sha2::Digest; use thiserror::Error; -use crate::args::InputAmount; #[cfg(feature = "testing")] use crate::error::EncodingError; use crate::error::{Error, PinnedBalanceError, QueryError}; @@ -80,8 +80,7 @@ use crate::rpc::{query_conversion, query_storage_value}; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; use crate::tx::decode_component; -use crate::{args, display_line, edisplay_line, rpc, Namada}; -use namada_core::types::masp::{TransferSource, TransferTarget}; +use crate::{display_line, edisplay_line, rpc, Namada}; /// Env var to point to a dir with MASP parameters. When not specified, /// the default OS specific path is used. diff --git a/sdk/src/tx.rs b/sdk/src/tx.rs index 8548218482..b23a8f98d3 100644 --- a/sdk/src/tx.rs +++ b/sdk/src/tx.rs @@ -34,8 +34,9 @@ use namada_core::ledger::pgf::cli::steward::Commission; use namada_core::types::address::{masp, Address, InternalAddress}; use namada_core::types::dec::Dec; use namada_core::types::hash::Hash; +use namada_core::types::ibc::IbcShieldedTransfer; use namada_core::types::key::*; -use namada_core::types::masp::TransferTarget; +use namada_core::types::masp::{TransferSource, TransferTarget}; use namada_core::types::storage::Epoch; use namada_core::types::time::DateTimeUtc; use namada_core::types::token::MaspDenom; @@ -52,13 +53,12 @@ use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; use crate::args::{self, InputAmount}; use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, Result, TxError}; +use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use crate::io::Io; use crate::masp::TransferErr::Build; -use crate::masp::{ShieldedContext, ShieldedTransfer}; +use crate::masp::{make_asset_type, ShieldedContext, ShieldedTransfer}; use crate::proto::{MaspBuilder, Tx}; use crate::queries::Client; -use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::masp::make_asset_type; use crate::rpc::{ self, query_wasm_code_hash, validate_amount, TxBroadcastData, TxResponse, }; @@ -67,8 +67,6 @@ use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::tendermint_rpc::error::Error as RpcError; use crate::wallet::WalletIo; use crate::{display_line, edisplay_line, Namada}; -use namada_core::types::ibc::IbcShieldedTransfer; -use namada_core::types::masp::TransferSource; /// Initialize account transaction WASM pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; @@ -2126,7 +2124,7 @@ pub async fn build_custom<'a>( /// Generate IBC shielded transfer pub async fn gen_ibc_shielded_transfer<'a, N: Namada<'a>>( - context: &impl Namada<'a>, + context: &N, args: args::GenIbcShieldedTransafer, ) -> Result> { let key = match args.target.payment_address() { @@ -2177,7 +2175,7 @@ pub async fn gen_ibc_shielded_transfer<'a, N: Namada<'a>>( }; if let Some(shielded_transfer) = shielded_transfer { // TODO: Workaround for decoding the asset_type later - let shielded = context.shielded().await; + let mut shielded = context.shielded_mut().await; let mut asset_types = Vec::new(); for denom in MaspDenom::iter() { let epoch = shielded_transfer.epoch; @@ -2207,16 +2205,19 @@ async fn get_ibc_src_port_channel<'a>( use crate::ibc_proto::protobuf::Protobuf; let channel_key = channel_key(dest_port_id, dest_channel_id); - let bytes = - rpc::query_storage_value_bytes(context.client(), &channel_key, None, false) - .await? - .0 - .ok_or_else(|| { - Error::Other(format!( - "No channel end: port {dest_port_id}, channel \ - {dest_channel_id}" - )) - })?; + let bytes = rpc::query_storage_value_bytes( + context.client(), + &channel_key, + None, + false, + ) + .await? + .0 + .ok_or_else(|| { + Error::Other(format!( + "No channel end: port {dest_port_id}, channel {dest_channel_id}" + )) + })?; let channel = ChannelEnd::decode_vec(&bytes).map_err(|_| { Error::Other(format!( "Decoding channel end failed: port {dest_port_id}, channel \ diff --git a/shared/src/ledger/native_vp/ibc/context.rs b/shared/src/ledger/native_vp/ibc/context.rs index 2d3cd78307..7926f3d838 100644 --- a/shared/src/ledger/native_vp/ibc/context.rs +++ b/shared/src/ledger/native_vp/ibc/context.rs @@ -183,27 +183,14 @@ where shielded.transfer.clone(), shielded.masp_tx.clone(), ); - self.write( - ¤t_tx_key, - record.try_to_vec().expect("encoding shouldn't failed"), - )?; - self.write( - &head_tx_key, - (current_tx_idx + 1) - .try_to_vec() - .expect("encoding shouldn't failed"), - )?; + self.write(¤t_tx_key, record.serialize_to_vec())?; + self.write(&head_tx_key, (current_tx_idx + 1).serialize_to_vec())?; // If storage key has been supplied, then pin this transaction to it if let Some(key) = &shielded.transfer.key { let pin_key = Key::from(masp_addr.to_db_key()) .push(&(PIN_KEY_PREFIX.to_owned() + key)) .expect("Cannot obtain a storage key"); - self.write( - &pin_key, - current_tx_idx - .try_to_vec() - .expect("encoding shouldn't fail"), - )?; + self.write(&pin_key, current_tx_idx.serialize_to_vec())?; } Ok(()) } diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index f935f39d02..ce6e616b1c 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -1795,9 +1795,7 @@ where let write_log = unsafe { env.ctx.write_log.get() }; let events = vp_host_fns::get_ibc_events(gas_meter, write_log, event_type)?; - let value = events - .try_to_vec() - .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let value = events.serialize_to_vec(); let len: i64 = value .len() .try_into() diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index c2d27a8d1c..9e8e8d4eba 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -60,14 +60,12 @@ use namada::ledger::storage::traits::Sha256Hasher; use namada::tendermint::abci::Event as AbciEvent; use namada::tendermint::block::Height as TmHeight; use namada::types::address::{Address, InternalAddress}; -use namada::types::io::DefaultIo; use namada::types::key::PublicKey; use namada::types::storage::{BlockHeight, Key}; use namada::types::token::Amount; use namada_apps::client::rpc::{ query_pos_parameters, query_storage_value, query_storage_value_bytes, }; -use namada_apps::client::tx::CLIShieldedUtils; use namada_apps::client::utils::id_from_pk; use namada_apps::config::ethereum_bridge; use namada_apps::config::genesis::genesis_config::GenesisConfig; @@ -75,6 +73,7 @@ use namada_apps::facade::tendermint::block::Header as TmHeader; use namada_apps::facade::tendermint::merkle::proof::Proof as TmProof; use namada_apps::facade::tendermint_config::net::Address as TendermintAddress; use namada_apps::facade::tendermint_rpc::{Client, HttpClient, Url}; +use namada_sdk::masp::fs::FsShieldedUtils; use prost::Message; use setup::constants::*; use tendermint_light_client::components::io::{Io, ProdIo as TmLightClientIo}; @@ -209,7 +208,7 @@ fn run_ledger_ibc() -> Result<()> { fn setup_two_single_node_nets() -> Result<(Test, Test)> { // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); // epoch per 100 seconds let update_genesis = |mut genesis: GenesisConfig| { diff --git a/wasm/wasm_source/src/vp_masp.rs b/wasm/wasm_source/src/vp_masp.rs index fb26ed7bbe..b22fceafff 100644 --- a/wasm/wasm_source/src/vp_masp.rs +++ b/wasm/wasm_source/src/vp_masp.rs @@ -192,10 +192,7 @@ fn validate_tx( transparent_tx_pool -= transp_amt; // Satisfies 4. - let target_enc = transfer - .target - .try_to_vec() - .expect("target address encoding"); + let target_enc = transfer.target.serialize_to_vec(); let hash = Ripemd160::digest(sha256(&target_enc).0.as_slice()); @@ -251,5 +248,5 @@ fn validate_tx( _ => {} } // Do the expensive proof verification in the VM at the end. - ctx.verify_masp(shielded_tx.try_to_vec().unwrap()) + ctx.verify_masp(shielded_tx.serialize_to_vec()) } From c8f3a7d28f645bbc236971a2689ec045fb52eb3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 18 Oct 2023 11:12:05 +0200 Subject: [PATCH 140/161] PoS: comment out unused code --- proof_of_stake/src/lib.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 41ea1e70d4..35ea602aba 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -3059,14 +3059,14 @@ where // TODO: apply rewards let slashes = find_validator_slashes(storage, &bond_id.validator)?; // dbg!(&slashes); - let slash_rates = - slashes - .iter() - .fold(BTreeMap::::new(), |mut map, slash| { - let tot_rate = map.entry(slash.epoch).or_default(); - *tot_rate = cmp::min(Dec::one(), *tot_rate + slash.rate); - map - }); + // let slash_rates = + // slashes + // .iter() + // .fold(BTreeMap::::new(), |mut map, slash| { + // let tot_rate = map.entry(slash.epoch).or_default(); + // *tot_rate = cmp::min(Dec::one(), *tot_rate + slash.rate); + // map + // }); // dbg!(&slash_rates); // Accumulate incoming redelegations slashes from source validator, if any. From e4b415ac995f774355c4f4a6b0996420a3426c98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 18 Oct 2023 11:56:06 +0200 Subject: [PATCH 141/161] evil: moved PoS error type to sub-module --- proof_of_stake/src/error.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/proof_of_stake/src/error.rs b/proof_of_stake/src/error.rs index 96123d6feb..d3eeecb3c8 100644 --- a/proof_of_stake/src/error.rs +++ b/proof_of_stake/src/error.rs @@ -97,6 +97,10 @@ pub enum SlashError { pub enum CommissionRateChangeError { #[error("Unexpected negative commission rate {0} for validator {1}")] NegativeRate(Dec, Address), + #[error( + "Unexpected commission rate {0} larger than 1.0 for validator {1}" + )] + LargerThanOne(Dec, Address), #[error("Rate change of {0} is too large for validator {1}")] RateChangeTooLarge(Dec, Address), #[error( From b2ed599229fc59baa0c6ccd87837d80477075ab3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 18 Oct 2023 11:56:24 +0200 Subject: [PATCH 142/161] evil: sdk IO changes and missing arg deref --- sdk/src/tx.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/src/tx.rs b/sdk/src/tx.rs index a17c917f9d..5a3264b9fa 100644 --- a/sdk/src/tx.rs +++ b/sdk/src/tx.rs @@ -544,15 +544,15 @@ pub async fn build_validator_commission_change<'a>( commission_rate, max_commission_change_per_epoch, }) => { - if rate.is_negative() || rate > Dec::one() { + if rate.is_negative() || *rate > Dec::one() { edisplay_line!( - IO, + context.io(), "New rate is outside of the allowed range of values \ between 0.0 and 1.0." ); if !tx_args.force { return Err(Error::from( - TxError::InvalidCommissionRate(rate), + TxError::InvalidCommissionRate(*rate), )); } } From ce20a157e6d4c4a629a9ae31d0d1c4e58c8ec8f7 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Tue, 17 Oct 2023 13:29:24 +0200 Subject: [PATCH 143/161] Increased the precision of MASP rewards. --- Cargo.lock | 6 +- Cargo.toml | 4 +- apps/src/lib/client/rpc.rs | 4 +- apps/src/lib/config/genesis.rs | 4 +- .../lib/node/ledger/shell/finalize_block.rs | 28 +--- apps/src/lib/node/ledger/shell/init_chain.rs | 7 +- core/src/ledger/storage/masp_conversions.rs | 90 ++++++------ core/src/types/address.rs | 17 --- core/src/types/token.rs | 70 +++++---- genesis/e2e-tests-single-node.toml | 14 +- shared/src/ledger/queries/shell.rs | 4 +- shared/src/sdk/masp.rs | 6 +- shared/src/sdk/rpc.rs | 2 +- shared/src/sdk/tx.rs | 4 +- ...9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin | Bin 7448 -> 7448 bytes ...FF8BF07596031CD460FEBFAEA4F75AF65D5402.bin | Bin 0 -> 9208 bytes ...D76149D3088F539CF8372D404609B89B095EF7.bin | Bin 7448 -> 7448 bytes ...EB66F886B3A3B6C71D33F456B859D01DA47ADD.bin | Bin 0 -> 9208 bytes ...7C98D1E5AAAA9988F26B1A47090ACCE693572F.bin | Bin 7448 -> 7448 bytes ...59181FA326C06FCA9A49B5A5C394C75942820E.bin | Bin 15573 -> 0 bytes ...FC3DA2C57E4FD8FF0811D9CB129887F0F9F706.bin | Bin 0 -> 25031 bytes ...8B6780B6F18A312AE3909BEA19D16FCFE837DC.bin | Bin 0 -> 18792 bytes ...A700BB49387329F8FD049D5F66C95B11B55ADE.bin | Bin 22648 -> 0 bytes ...4B1C762CEA0436B0ECA42C52A830A0FD66BC00.bin | Bin 10312 -> 0 bytes ...E72A01F0B169F946835583DC2C71B550315603.bin | Bin 0 -> 19947 bytes ...CFE8EEC08E2D8512695A667D294AE1A4A8D4E6.bin | Bin 0 -> 9649 bytes ...8DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin | Bin 0 -> 7448 bytes ...FA3F379DB351AB4AE081207ABFDFC429C9FA48.bin | Bin 0 -> 17018 bytes ...344FFFAA6CA273027CD480AEA68DDED57D88CA.bin | Bin 7448 -> 7448 bytes ...B827EEEDA858AB983D16024AAA415579A68953.bin | Bin 0 -> 9649 bytes ...C7F5019CA3DF4CB89D10CB4E38DA9CDE3A9A0A.bin | Bin 13799 -> 0 bytes tests/src/integration/masp.rs | 137 ++++-------------- wasm/Cargo.lock | 6 +- wasm/wasm_source/Cargo.toml | 2 +- wasm_for_tests/wasm_source/Cargo.lock | 6 +- 35 files changed, 149 insertions(+), 262 deletions(-) create mode 100644 test_fixtures/masp_proofs/29AC8DE3B07495BEABEAF50FE8FF8BF07596031CD460FEBFAEA4F75AF65D5402.bin create mode 100644 test_fixtures/masp_proofs/52984E26D4A044A259B441C1DAEB66F886B3A3B6C71D33F456B859D01DA47ADD.bin delete mode 100644 test_fixtures/masp_proofs/917B7AD5FD4F2F0CB33924511A59181FA326C06FCA9A49B5A5C394C75942820E.bin create mode 100644 test_fixtures/masp_proofs/9883C2EF7971504BB1CF651BAFFC3DA2C57E4FD8FF0811D9CB129887F0F9F706.bin create mode 100644 test_fixtures/masp_proofs/99393E3AC8046F86ABA05519568B6780B6F18A312AE3909BEA19D16FCFE837DC.bin delete mode 100644 test_fixtures/masp_proofs/A08264B610C5903A47D48E90ABA700BB49387329F8FD049D5F66C95B11B55ADE.bin delete mode 100644 test_fixtures/masp_proofs/A9D6D90370C747C254D4DD4A2D4B1C762CEA0436B0ECA42C52A830A0FD66BC00.bin create mode 100644 test_fixtures/masp_proofs/BA4FED83467B6FEE522748C6F7E72A01F0B169F946835583DC2C71B550315603.bin create mode 100644 test_fixtures/masp_proofs/C7ECE8C02C2E764EFD5B6A0756CFE8EEC08E2D8512695A667D294AE1A4A8D4E6.bin create mode 100644 test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin create mode 100644 test_fixtures/masp_proofs/EEB91EB873807EC77BBCA95D4CFA3F379DB351AB4AE081207ABFDFC429C9FA48.bin create mode 100644 test_fixtures/masp_proofs/F36A8353F15FD6D8158DBC67DDB827EEEDA858AB983D16024AAA415579A68953.bin delete mode 100644 test_fixtures/masp_proofs/F3FE67606FCCCE54C3BCF643F0C7F5019CA3DF4CB89D10CB4E38DA9CDE3A9A0A.bin diff --git a/Cargo.lock b/Cargo.lock index 84cbd6f48e..1c0d1cb6d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3748,7 +3748,7 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "0.2.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=1345b463e8fa3b3a6fa13e4a43fb1c410690ad62#1345b463e8fa3b3a6fa13e4a43fb1c410690ad62" dependencies = [ "borsh 0.9.4", "chacha20 0.9.1", @@ -3761,7 +3761,7 @@ dependencies = [ [[package]] name = "masp_primitives" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=1345b463e8fa3b3a6fa13e4a43fb1c410690ad62#1345b463e8fa3b3a6fa13e4a43fb1c410690ad62" dependencies = [ "aes 0.7.5", "bip0039", @@ -3792,7 +3792,7 @@ dependencies = [ [[package]] name = "masp_proofs" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=1345b463e8fa3b3a6fa13e4a43fb1c410690ad62#1345b463e8fa3b3a6fa13e4a43fb1c410690ad62" dependencies = [ "bellman", "blake2b_simd", diff --git a/Cargo.toml b/Cargo.toml index 9c731f0fdf..db71e92989 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,8 +90,8 @@ libc = "0.2.97" libloading = "0.7.2" libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} # branch = "murisi/namada-integration" -masp_primitives = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e" } -masp_proofs = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e", default-features = false, features = ["local-prover"] } +masp_primitives = { git = "https://github.com/anoma/masp", rev = "1345b463e8fa3b3a6fa13e4a43fb1c410690ad62" } +masp_proofs = { git = "https://github.com/anoma/masp", rev = "1345b463e8fa3b3a6fa13e4a43fb1c410690ad62", default-features = false, features = ["local-prover"] } num256 = "0.3.5" num_cpus = "1.13.0" num-derive = "0.3.3" diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index d750ccc759..1645009011 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -2126,7 +2126,7 @@ pub async fn query_conversions< // Track whether any non-sentinel conversions are found let mut conversions_found = false; for ((addr, _), epoch, conv, _) in conv_state.assets.values() { - let amt: masp_primitives::transaction::components::I32Sum = + let amt: masp_primitives::transaction::components::I128Sum = conv.clone().into(); // If the user has specified any targets, then meet them // If we have a sentinel conversion, then skip printing @@ -2181,7 +2181,7 @@ pub async fn query_conversion( Address, MaspDenom, Epoch, - masp_primitives::transaction::components::I32Sum, + masp_primitives::transaction::components::I128Sum, MerklePath, )> { namada::sdk::rpc::query_conversion(client, asset_type).await diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index eac5f5bdca..3331a30c03 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -1012,8 +1012,8 @@ pub fn genesis(num_validators: u64) -> Genesis { implicit_vp_code_path: vp_implicit_path.into(), implicit_vp_sha256: Default::default(), max_signatures_per_transaction: 15, - epochs_per_year: 525_600, /* seconds in yr (60*60*24*365) div seconds - * per epoch (60 = min_duration) */ + epochs_per_year: 365, /* seconds in yr (60*60*24*365) div seconds + * per epoch (60 = min_duration) */ pos_gain_p: Dec::new(1, 1).expect("This can't fail"), pos_gain_d: Dec::new(1, 1).expect("This can't fail"), staked_ratio: Dec::zero(), diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 346a3f916b..525ed86ec4 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -618,10 +618,8 @@ where /// with respect to the previous epoch. fn apply_inflation(&mut self, current_epoch: Epoch) -> Result<()> { let last_epoch = current_epoch.prev(); - // Get input values needed for the PD controller for PoS and MASP. + // Get input values needed for the PD controller for PoS. // Run the PD controllers to calculate new rates. - // - // MASP is included below just for some completeness. let params = read_pos_params(&self.wl_storage)?; @@ -653,15 +651,6 @@ where let pos_locked_ratio_target = params.target_staked_ratio; let pos_max_inflation_rate = params.max_inflation_rate; - // TODO: properly fetch these values (arbitrary for now) - let masp_locked_supply: Amount = Amount::default(); - let masp_locked_ratio_target = Dec::new(5, 1).expect("Cannot fail"); - let masp_locked_ratio_last = Dec::new(5, 1).expect("Cannot fail"); - let masp_max_inflation_rate = Dec::new(2, 1).expect("Cannot fail"); - let masp_last_inflation_rate = Dec::new(12, 2).expect("Cannot fail"); - let masp_p_gain = Dec::new(1, 1).expect("Cannot fail"); - let masp_d_gain = Dec::new(1, 1).expect("Cannot fail"); - // Run rewards PD controller let pos_controller = inflation::RewardsController { locked_tokens: pos_locked_supply, @@ -675,27 +664,12 @@ where d_gain_nom: pos_d_gain_nom, epochs_per_year, }; - let _masp_controller = inflation::RewardsController { - locked_tokens: masp_locked_supply, - total_tokens, - total_native_tokens: total_tokens, - locked_ratio_target: masp_locked_ratio_target, - locked_ratio_last: masp_locked_ratio_last, - max_reward_rate: masp_max_inflation_rate, - last_inflation_amount: token::Amount::from( - masp_last_inflation_rate, - ), - p_gain_nom: masp_p_gain, - d_gain_nom: masp_d_gain, - epochs_per_year, - }; // Run the rewards controllers let inflation::ValsToUpdate { locked_ratio, inflation, } = pos_controller.run(); - // let new_masp_vals = _masp_controller.run(); // Get the number of blocks in the last epoch let first_block_of_last_epoch = self diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index 46d4222983..abe231e9af 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -338,11 +338,14 @@ where // Init token parameters and last inflation and caching rates parameters.init_storage(&address, &mut self.wl_storage); self.wl_storage - .write(&token::masp_last_inflation(&address), last_inflation) + .write( + &token::masp_last_inflation_key(&address), + last_inflation, + ) .unwrap(); self.wl_storage .write( - &token::masp_last_locked_ratio(&address), + &token::masp_last_locked_ratio_key(&address), last_locked_ratio, ) .unwrap(); diff --git a/core/src/ledger/storage/masp_conversions.rs b/core/src/ledger/storage/masp_conversions.rs index 5b195dd29d..d9b13526fe 100644 --- a/core/src/ledger/storage/masp_conversions.rs +++ b/core/src/ledger/storage/masp_conversions.rs @@ -10,6 +10,7 @@ use masp_primitives::sapling::Node; use crate::ledger::inflation::{RewardsController, ValsToUpdate}; use crate::ledger::parameters; +use crate::ledger::storage_api::token::read_denom; use crate::ledger::storage_api::{StorageRead, StorageWrite}; use crate::types::address::Address; use crate::types::dec::Dec; @@ -17,18 +18,11 @@ use crate::types::storage::Epoch; use crate::types::token::MaspDenom; use crate::types::{address, token}; -/// Inflation is implicitly denominated by this value. The lower this figure, -/// the less precise inflation computations are. The higher this figure, the -/// larger the fixed-width types that are required to carry out inflation -/// computations. This value should be fixed constant for each asset type - here -/// we have simplified it and made it constant across asset types. -const PRECISION: u64 = 100; - /// A representation of the conversion state #[derive(Debug, Default, BorshSerialize, BorshDeserialize)] pub struct ConversionState { /// The last amount of the native token distributed - pub normed_inflation: Option, + pub normed_inflation: Option, /// The tree currently containing all the conversions pub tree: FrozenCommitmentTree, /// Map assets to their latest conversion and position in Merkle tree @@ -44,11 +38,20 @@ pub struct ConversionState { pub fn calculate_masp_rewards( wl_storage: &mut super::WlStorage, addr: &Address, -) -> crate::ledger::storage_api::Result<(u32, u32)> +) -> crate::ledger::storage_api::Result<(u128, u128)> where D: 'static + super::DB + for<'iter> super::DBIter<'iter>, H: 'static + super::StorageHasher, { + let denomination = read_denom(wl_storage, addr).unwrap().unwrap(); + // Inflation is implicitly denominated by this value. The lower this + // figure, the less precise inflation computations are. This is especially + // problematic when inflation is coming from a token with much higher + // denomination than the native token. The higher this figure, the higher + // the threshold of holdings required in order to receive non-zero rewards. + // This value should be fixed constant for each asset type. + let precision = 10u128.pow(std::cmp::max(u32::from(denomination.0), 3) - 3); + let masp_addr = address::masp(); // Query the storage for information @@ -69,38 +72,33 @@ where let epochs_per_year: u64 = wl_storage .read(¶meters::storage::get_epochs_per_year_key())? - .expect(""); + .expect("epochs per year should properly decode"); //// Values from the last epoch let last_inflation: token::Amount = wl_storage - .read(&token::masp_last_inflation(addr)) - .expect("failure to read last inflation") - .expect(""); + .read(&token::masp_last_inflation_key(addr))? + .expect("failure to read last inflation"); let last_locked_ratio: Dec = wl_storage - .read(&token::masp_last_locked_ratio(addr)) - .expect("failure to read last inflation") - .expect(""); + .read(&token::masp_last_locked_ratio_key(addr))? + .expect("failure to read last inflation"); //// Parameters for each token let max_reward_rate: Dec = wl_storage - .read(&token::masp_max_reward_rate(addr)) - .expect("max reward should properly decode") - .expect(""); + .read(&token::masp_max_reward_rate_key(addr))? + .expect("max reward should properly decode"); let kp_gain_nom: Dec = wl_storage - .read(&token::masp_kp_gain(addr)) - .expect("kp_gain_nom reward should properly decode") - .expect(""); + .read(&token::masp_kp_gain_key(addr))? + .expect("kp_gain_nom reward should properly decode"); let kd_gain_nom: Dec = wl_storage - .read(&token::masp_kd_gain(addr)) - .expect("kd_gain_nom reward should properly decode") - .expect(""); + .read(&token::masp_kd_gain_key(addr))? + .expect("kd_gain_nom reward should properly decode"); let locked_target_ratio: Dec = wl_storage - .read(&token::masp_locked_ratio_target(addr))? - .expect(""); + .read(&token::masp_locked_ratio_target_key(addr))? + .expect("locked ratio target should properly decode"); // Creating the PD controller for handing out tokens let controller = RewardsController { @@ -126,10 +124,11 @@ where // Since we must put the notes in a compatible format with the // note format, we must make the inflation amount discrete. let noterized_inflation = if total_token_in_masp.is_zero() { - 0u32 + 0u128 } else { crate::types::uint::Uint::try_into( - (inflation.raw_amount() * PRECISION) + (inflation.raw_amount() + * crate::types::uint::Uint::from(precision)) / total_token_in_masp.raw_amount(), ) .unwrap() @@ -163,24 +162,21 @@ where // otherwise we will have an inaccurate view of inflation wl_storage .write( - &token::masp_last_inflation(addr), - (total_token_in_masp / PRECISION) * u64::from(noterized_inflation), + &token::masp_last_inflation_key(addr), + token::Amount::from_uint( + (total_token_in_masp.raw_amount() / precision) + * crate::types::uint::Uint::from(noterized_inflation), + 0, + ) + .unwrap(), ) .expect("unable to encode new inflation rate (Decimal)"); wl_storage - .write(&token::masp_last_locked_ratio(addr), locked_ratio) + .write(&token::masp_last_locked_ratio_key(addr), locked_ratio) .expect("unable to encode new locked ratio (Decimal)"); - // to make it conform with the expected output, we need to - // move it to a ratio of x/100 to match the masp_rewards - // function This may be unneeded, as we could describe it as a - // ratio of x/1 - - Ok(( - noterized_inflation, - PRECISION.try_into().expect("inflation precision too large"), - )) + Ok((noterized_inflation, precision)) } // This is only enabled when "wasm-runtime" is on, because we're using rayon @@ -196,7 +192,7 @@ where use std::cmp::Ordering; use masp_primitives::ff::PrimeField; - use masp_primitives::transaction::components::I32Sum as MaspAmount; + use masp_primitives::transaction::components::I128Sum as MaspAmount; use rayon::iter::{ IndexedParallelIterator, IntoParallelIterator, ParallelIterator, }; @@ -288,12 +284,12 @@ where (addr.clone(), denom), (MaspAmount::from_pair( old_asset, - -(*normed_inflation as i32), + -(*normed_inflation as i128), ) .unwrap() + MaspAmount::from_pair( new_asset, - new_normed_inflation as i32, + new_normed_inflation as i128, ) .unwrap()) .into(), @@ -320,13 +316,13 @@ where // intermediate tokens cancel/ telescope out current_convs.insert( (addr.clone(), denom), - (MaspAmount::from_pair(old_asset, -(reward.1 as i32)) + (MaspAmount::from_pair(old_asset, -(reward.1 as i128)) .unwrap() - + MaspAmount::from_pair(new_asset, reward.1 as i32) + + MaspAmount::from_pair(new_asset, reward.1 as i128) .unwrap() + MaspAmount::from_pair( reward_assets[denom as usize], - real_reward as i32, + real_reward as i128, ) .unwrap()) .into(), diff --git a/core/src/types/address.rs b/core/src/types/address.rs index 416b3f059e..b0b709d69f 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -681,23 +681,6 @@ pub fn tokens() -> HashMap { .collect() } -/// Temporary helper for testing, a hash map of tokens addresses with their -/// MASP XAN incentive schedules. If the reward is (a, b) then a rewarded tokens -/// are dispensed for every b possessed tokens. -pub fn masp_rewards() -> HashMap { - vec![ - (nam(), (0, 100)), - (btc(), (1, 100)), - (eth(), (2, 100)), - (dot(), (3, 100)), - (schnitzel(), (4, 100)), - (apfel(), (5, 100)), - (kartoffel(), (6, 100)), - ] - .into_iter() - .collect() -} - #[cfg(test)] pub mod tests { use proptest::prelude::*; diff --git a/core/src/types/token.rs b/core/src/types/token.rs index c6fefea9c6..ffc21f817d 100644 --- a/core/src/types/token.rs +++ b/core/src/types/token.rs @@ -596,6 +596,20 @@ impl Mul for Amount { } } +/// A combination of Euclidean division and fractions: +/// x*(a,b) = (a*(x//b), x%b). +impl Mul<(u128, u128)> for Amount { + type Output = (Amount, Amount); + + fn mul(mut self, rhs: (u128, u128)) -> Self::Output { + let amt = Amount { + raw: (self.raw / rhs.1) * Uint::from(rhs.0), + }; + self.raw %= rhs.1; + (amt, self) + } +} + /// A combination of Euclidean division and fractions: /// x*(a,b) = (a*(x//b), x%b). impl Mul<(u64, u64)> for Amount { @@ -816,18 +830,18 @@ pub const CONVERSION_KEY_PREFIX: &str = "conv"; /// Key segment prefix for pinned shielded transactions pub const PIN_KEY_PREFIX: &str = "pin-"; /// Last calculated inflation value handed out -pub const MASP_LAST_INFLATION: &str = "last_inflation"; +pub const MASP_LAST_INFLATION_KEY: &str = "last_inflation"; /// The last locked ratio -pub const MASP_LAST_LOCKED_RATIO: &str = "last_locked_ratio"; +pub const MASP_LAST_LOCKED_RATIO_KEY: &str = "last_locked_ratio"; /// The key for the nominal proportional gain of a shielded pool for a given /// asset -pub const MASP_KP_GAIN_KEY: &str = "proptional_gain"; +pub const MASP_KP_GAIN_KEY: &str = "proportional_gain"; /// The key for the nominal derivative gain of a shielded pool for a given asset pub const MASP_KD_GAIN_KEY: &str = "derivative_gain"; /// The key for the locked ratio target for a given asset pub const MASP_LOCKED_RATIO_TARGET_KEY: &str = "locked_ratio_target"; /// The key for the max reward rate for a given asset -pub const MASP_MAX_REWARD_RATE: &str = "max_reward_rate"; +pub const MASP_MAX_REWARD_RATE_KEY: &str = "max_reward_rate"; /// Gets the key for the given token address, error with the given /// message to expect if the key is not in the address @@ -874,22 +888,22 @@ pub fn minted_balance_key(token_addr: &Address) -> Key { } /// Obtain the nominal proportional key for the given token -pub fn masp_kp_gain(token_addr: &Address) -> Key { +pub fn masp_kp_gain_key(token_addr: &Address) -> Key { key_of_token(token_addr, MASP_KP_GAIN_KEY, "nominal proproitonal gains") } /// Obtain the nominal derivative key for the given token -pub fn masp_kd_gain(token_addr: &Address) -> Key { +pub fn masp_kd_gain_key(token_addr: &Address) -> Key { key_of_token(token_addr, MASP_KD_GAIN_KEY, "nominal proproitonal gains") } /// The max reward rate key for the given token -pub fn masp_max_reward_rate(token_addr: &Address) -> Key { - key_of_token(token_addr, MASP_MAX_REWARD_RATE, "max reward rate") +pub fn masp_max_reward_rate_key(token_addr: &Address) -> Key { + key_of_token(token_addr, MASP_MAX_REWARD_RATE_KEY, "max reward rate") } /// Obtain the locked target ratio key for the given token -pub fn masp_locked_ratio_target(token_addr: &Address) -> Key { +pub fn masp_locked_ratio_target_key(token_addr: &Address) -> Key { key_of_token( token_addr, MASP_LOCKED_RATIO_TARGET_KEY, @@ -920,7 +934,7 @@ pub struct Parameters { /// Shielded Pool nominal proportional gain for the given token pub kp_gain_nom: Dec, /// Locked ratio for the given token - pub locked_ratio_target_key: Dec, + pub locked_ratio_target: Dec, } impl Parameters { @@ -937,19 +951,19 @@ impl Parameters { max_reward_rate: max_rate, kd_gain_nom, kp_gain_nom, - locked_ratio_target_key: locked_target, + locked_ratio_target: locked_target, } = self; wl_storage - .write(&masp_max_reward_rate(address), max_rate) + .write(&masp_max_reward_rate_key(address), max_rate) .expect("max reward rate for the given asset must be initialized"); wl_storage - .write(&masp_locked_ratio_target(address), locked_target) + .write(&masp_locked_ratio_target_key(address), locked_target) .expect("locked ratio must be initialized"); wl_storage - .write(&masp_kp_gain(address), kp_gain_nom) + .write(&masp_kp_gain_key(address), kp_gain_nom) .expect("The nominal proportional gain must be initialized"); wl_storage - .write(&masp_kd_gain(address), kd_gain_nom) + .write(&masp_kd_gain_key(address), kd_gain_nom) .expect("The nominal derivative gain must be initialized"); } } @@ -960,7 +974,7 @@ impl Default for Parameters { max_reward_rate: Dec::from_str("0.1").unwrap(), kp_gain_nom: Dec::from_str("0.1").unwrap(), kd_gain_nom: Dec::from_str("0.1").unwrap(), - locked_ratio_target_key: Dec::from_str("0.1").unwrap(), + locked_ratio_target: Dec::from_str("0.1").unwrap(), } } } @@ -1032,20 +1046,20 @@ pub fn is_masp_key(key: &Key) -> bool { || key.starts_with(PIN_KEY_PREFIX))) } -/// The last locked ratio of a token -pub fn masp_last_locked_ratio(token_address: &Address) -> Key { +/// Obtain the storage key for the last locked ratio of a token +pub fn masp_last_locked_ratio_key(token_address: &Address) -> Key { key_of_token( token_address, - MASP_LAST_LOCKED_RATIO, + MASP_LAST_LOCKED_RATIO_KEY, "cannot obtain storage key for the last locked ratio", ) } -/// The last inflation of a token -pub fn masp_last_inflation(token_address: &Address) -> Key { +/// Obtain the storage key for the last inflation of a token +pub fn masp_last_inflation_key(token_address: &Address) -> Key { key_of_token( token_address, - MASP_LAST_INFLATION, + MASP_LAST_INFLATION_KEY, "cannot obtain storage key for the last inflation rate", ) } @@ -1336,10 +1350,10 @@ pub mod testing { H: 'static + ledger_storage::StorageHasher, { use crate::ledger::parameters::storage::get_epochs_per_year_key; - use crate::types::address::masp_rewards; + use crate::types::address::tokens; - let masp_rewards = masp_rewards(); - let masp_reward_keys: Vec<_> = masp_rewards.keys().collect(); + let tokens = tokens(); + let masp_reward_keys: Vec<_> = tokens.keys().collect(); wl_storage .write(&get_epochs_per_year_key(), epochs_per_year) @@ -1348,7 +1362,7 @@ pub mod testing { max_reward_rate: Dec::from_str("0.1").unwrap(), kd_gain_nom: Dec::from_str("0.1").unwrap(), kp_gain_nom: Dec::from_str("0.1").unwrap(), - locked_ratio_target_key: Dec::zero(), + locked_ratio_target: Dec::zero(), }; for address in masp_reward_keys { @@ -1360,10 +1374,10 @@ pub mod testing { ) .unwrap(); wl_storage - .write(&masp_last_inflation(address), Amount::zero()) + .write(&masp_last_inflation_key(address), Amount::zero()) .expect("inflation ought to be written"); wl_storage - .write(&masp_last_locked_ratio(address), Dec::zero()) + .write(&masp_last_locked_ratio_key(address), Dec::zero()) .expect("last locked set default"); } } diff --git a/genesis/e2e-tests-single-node.toml b/genesis/e2e-tests-single-node.toml index 0020dd995a..1096be0d28 100644 --- a/genesis/e2e-tests-single-node.toml +++ b/genesis/e2e-tests-single-node.toml @@ -40,7 +40,7 @@ Ester = "1000000" max_reward_rate = "0.1" kd_gain_nom = "0.1" kp_gain_nom = "0.1" -locked_ratio_target_key = "0.6667" +locked_ratio_target = "0.6667" [token.BTC] address = "atest1v4ehgw36xdzryve5gsc52veeg5cnsv2yx5eygvp38qcrvd29xy6rys6p8yc5xvp4xfpy2v694wgwcp" @@ -55,7 +55,7 @@ Ester = "1000000" max_reward_rate = "0.1" kd_gain_nom = "0.1" kp_gain_nom = "0.1" -locked_ratio_target_key = "0.6667" +locked_ratio_target = "0.6667" [token.ETH] address = "atest1v4ehgw36xqmr2d3nx3ryvd2xxgmrq33j8qcns33sxezrgv6zxdzrydjrxveygd2yxumrsdpsf9jc2p" @@ -70,7 +70,7 @@ Ester = "1000000" max_reward_rate = "0.1" kd_gain_nom = "0.1" kp_gain_nom = "0.1" -locked_ratio_target_key = "0.6667" +locked_ratio_target = "0.6667" [token.DOT] address = "atest1v4ehgw36gg6nvs2zgfpyxsfjgc65yv6pxy6nwwfsxgungdzrggeyzv35gveyxsjyxymyz335hur2jn" @@ -85,7 +85,7 @@ Ester = "1000000" max_reward_rate = "0.1" kd_gain_nom = "0.1" kp_gain_nom = "0.1" -locked_ratio_target_key = "0.6667" +locked_ratio_target = "0.6667" [token.Schnitzel] address = "atest1v4ehgw36xue5xvf5xvuyzvpjx5un2v3k8qeyvd3cxdqns32p89rrxd6xx9zngvpegccnzs699rdnnt" @@ -100,7 +100,7 @@ Ester = "1000000" max_reward_rate = "0.1" kd_gain_nom = "0.1" kp_gain_nom = "0.1" -locked_ratio_target_key = "0.6667" +locked_ratio_target = "0.6667" [token.Apfel] address = "atest1v4ehgw36gfryydj9g3p5zv3kg9znyd358ycnzsfcggc5gvecgc6ygs2rxv6ry3zpg4zrwdfeumqcz9" @@ -115,7 +115,7 @@ Ester = "1000000" max_reward_rate = "0.1" kd_gain_nom = "0.1" kp_gain_nom = "0.1" -locked_ratio_target_key = "0.6667" +locked_ratio_target = "0.6667" [token.Kartoffel] address = "atest1v4ehgw36gep5ysecxq6nyv3jg3zygv3e89qn2vp48pryxsf4xpznvve5gvmy23fs89pryvf5a6ht90" @@ -131,7 +131,7 @@ Ester = "1000000" max_reward_rate = "0.1" kd_gain_nom = "0.1" kp_gain_nom = "0.1" -locked_ratio_target_key = "0.6667" +locked_ratio_target = "0.6667" [established.Albert] vp = "vp_user" diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index a766846916..520c702608 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -30,7 +30,7 @@ type Conversion = ( Address, MaspDenom, Epoch, - masp_primitives::transaction::components::I32Sum, + masp_primitives::transaction::components::I128Sum, MerklePath, ); @@ -254,7 +254,7 @@ where addr.clone(), *denom, *epoch, - Into::::into( + Into::::into( conv.clone(), ), ctx.wl_storage.storage.conversion_state.tree.path(*pos), diff --git a/shared/src/sdk/masp.rs b/shared/src/sdk/masp.rs index 739f941b9a..f9459f6cab 100644 --- a/shared/src/sdk/masp.rs +++ b/shared/src/sdk/masp.rs @@ -34,8 +34,8 @@ use masp_primitives::transaction::builder::{self, *}; use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; use masp_primitives::transaction::components::transparent::builder::TransparentBuilder; use masp_primitives::transaction::components::{ - ConvertDescription, I128Sum, I32Sum, OutputDescription, SpendDescription, - TxOut, U64Sum, + ConvertDescription, I128Sum, OutputDescription, SpendDescription, TxOut, + U64Sum, }; use masp_primitives::transaction::fees::fixed::FeeRule; use masp_primitives::transaction::sighash::{signature_hash, SignableInput}; @@ -989,7 +989,7 @@ impl ShieldedContext { Address, MaspDenom, _, - I32Sum, + I128Sum, MerklePath, ) = rpc::query_conversion(client, asset_type).await?; self.asset_types diff --git a/shared/src/sdk/rpc.rs b/shared/src/sdk/rpc.rs index 58609bed42..cb881e88a2 100644 --- a/shared/src/sdk/rpc.rs +++ b/shared/src/sdk/rpc.rs @@ -228,7 +228,7 @@ pub async fn query_conversion( Address, MaspDenom, Epoch, - masp_primitives::transaction::components::I32Sum, + masp_primitives::transaction::components::I128Sum, MerklePath, )> { Some(unwrap_client_response::( diff --git a/shared/src/sdk/tx.rs b/shared/src/sdk/tx.rs index 9d7fe0cfe4..0381e6f6aa 100644 --- a/shared/src/sdk/tx.rs +++ b/shared/src/sdk/tx.rs @@ -15,7 +15,7 @@ use masp_primitives::transaction::components::sapling::fees::{ use masp_primitives::transaction::components::transparent::fees::{ InputView as TransparentInputView, OutputView as TransparentOutputView, }; -use masp_primitives::transaction::components::I32Sum; +use masp_primitives::transaction::components::I128Sum; use namada_core::ledger::governance::cli::onchain::{ DefaultProposal, OnChainProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, @@ -1643,7 +1643,7 @@ async fn used_asset_types< // Collect all the asset types used in the Sapling converts for output in builder.sapling_converts() { for (asset_type, _) in - I32Sum::from(output.conversion().clone()).components() + I128Sum::from(output.conversion().clone()).components() { add_asset_type(&mut asset_types, shielded, client, *asset_type) .await; diff --git a/test_fixtures/masp_proofs/1362F1CF9B836CF8B05D8189EA9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin b/test_fixtures/masp_proofs/1362F1CF9B836CF8B05D8189EA9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin index 20634c1de5ed13fa856cedaf2127d8ef6c39a987..741f4d5106a2c0107b1dbbcb346b1da4d04cc5fa 100644 GIT binary patch delta 1002 zcmV9f)2Sk z3xbx1LbHDsB?Et|ZB$N(t^fDH(Z!P~CeI;tdVVf&h2S1wq=85^i?EsNm#(Mm6$^Cf zpC8<1GlLI;)q|le%A6-?Q+2d;stU=K%z5>nI7}RsG&a?-rCM1L+COsVHOB?q*YDh@ zIsBgz=9~rh!NBTxxVa6NwkgW28uW+QHJxwwyYH+rn$>^H!`?!wi!mAd2v=B$@b&SQ zg!K%D(SRWq9pE_EHcF&+@c+)14>h2TW&>{i`eh|6O4bVCl!?Vf$K6E7f=d{p2=-yk6!A@ zl0#=b&JRjJD>X*s>9I_ zP1x~#w{k9|sC`T|Bb`m-#UfCqdIC7_m-HCXMUsagO)?& z_{RUTXEh01|Me_i_Fc<0!Z5O+mo%Gj;Ug9UtsHmMrrZ7)NCL^Zed;swsSt_SSP;mi z>mlbu;x^W&^DW)*RMl6i&1#O@gill1ZfSp00xyKj{9G-UU|I4}AG{7`wq!%W4~qOg ze}^yEajuZ6*eme06*RkTiFTF(tMxVzX5u%WpdGv9nJAs9F9_`k>wy&*t;|(*DrB)S zRTx5sj$A2>$x z${IOmv3m~<(z;w;1&4oJan(%`2ib_cn{lnvFKU!eaE1cwq{t{&5xdEzj8c?fkOhk delta 1002 zcmVpk*o95o+sy!j3?)2rzLiDn_ld*Lk;u- zAo>W*4+W*yW8ST5ntdnZX%9pYUkB&FaY^gtM?x5Sma|t7^a3E^HG97fO%V;Q3GW=8 z7Q-V7YOwL(c2Xh6Jv}Z>+AB4)YZLSWAiZ4^lX^sH^9rQ&gXP(NuV|+^k zbjJWR=d*tnB?Eu1Sg7AKIHb>%y>pD=gV38zfmhBe^c}z4=K(Ob=%{M{GsRD$3}sZ< z`13s)&J$p$1=4PqDhg)QZNeU(xii-KDhxNL6T5xkfZ&S2@@+#p5nc1D>+w{YLX>>! z&(-(_=Uz8#)G0G3uXFL2P6s|XkLbR8wo}Gyo%@r%Hd=q(I=pa6vcnOgR&z_bEBNqu zrEU7CxQ2`KTrvb^`>S8#c{R3!F-PAjJ(>y-n$8UaSCF`*SYvzvFG=R}eVh&=w&UC* z2OqS8afQq3o2A^)AHcBKI0{XAQo);6Abp0KC3t+&_G*<%7>tZW5tFZNZV!N%e>k!_DUBk>tMM;|yBYaCd)5W;LY=eH1w9dVaAr9_cIbU*CL4 zTofue!39yve(U!}l`*Mh&KFjp57vqJd9MNH!5^T^@Nz^P*J+{qUg%%B(7?TX9gWDl z-T`_a2xKz$#1sDxiW$g95=pNmb!S~*pq(l-ri>;=)e>AUV-)|RwpH`XqN!aOXsj6EXANhRAs!h94?&m`>x|&9` zg73G;Mu5c<<7(^@(ZS6rK2ivl2=1-#Y$9a<8KL{`4OVh4?lJn{9oE32)hTjhl?czs Ya}+Rcru$9usD;;K>Tn9_0h8n$G(5oe8~^|S diff --git a/test_fixtures/masp_proofs/29AC8DE3B07495BEABEAF50FE8FF8BF07596031CD460FEBFAEA4F75AF65D5402.bin b/test_fixtures/masp_proofs/29AC8DE3B07495BEABEAF50FE8FF8BF07596031CD460FEBFAEA4F75AF65D5402.bin new file mode 100644 index 0000000000000000000000000000000000000000..b748454de8fd4dcd1fb8fc32f944bc6abc828039 GIT binary patch literal 9208 zcmeI%RZ!f^ngDPH2os#(mM}25y9WvG9yGySgS)#7Zo%E%9fErxNPyrBZU=YvLzBHH0ceZuX8WfZ;-Hc~XYr zL}8b!+1l~aN?5qRkNbOH?kR|b7ByJ}CgFlgV}SJtBqJYz*Sm&s)zm8lUg=XE>NuFl z(Q?Sdy!9wOc@qit=JsD2Msu~yMbS{YipVe%3*NEpUmwk&N%oXAc`4f5g7BxVV30%m zDT%oH6};7iuCgmP67;gt&||Zbq8r&Ye#pVeHVP0!YPkhSOagHxV8-tRr!3>mOB~vU z&gaUzFWhMz5F3F~Lc&`P7aVDOYq5BQdueWos^e9le&yIB-v=kBA>?Abt9NcEv?Ck4 zeDUTl@jc~H#O!}9tLM?hEa%ap^;1xhbW8LA=7iQu%8Gp1$vBEy3~D@ImH-5nCfngkhwo&}~#F28t zMA53f8yQ-Xw**L7;&LW&PIT{!I3nLt0b#<7+-6!m$GI@Fa#EsQYo|4+foz>|J*QMO zZNu?Z+r>@CEmF9j>(>JAFJVx>% z!##&1FF*_~FY7;tH;H|@Bryz@8pu}*q_vfYN2-)d2~suWBG&|kIfBjTtfSvQ>}cQi z6?Fz(Zz8c<{UP->tnlvKS3 zgKXwaJJU@Ty==W(mGD0;Eb5EtQ8t}ptbb(FEEhhE(*rG|0V9|za9t5%$|_li*04}$ zKf|G-8V~(Jd&p{Z6vPJ#W7pI!(|>teyM5<0vpyYk;O0jB(HVmyeB8i71`n1L)WYpH z+$>>j$)g<4ACT=nPfI z^x@_Z67_U9qJku2y-U+%5#`JeRg>YJzO9N|q{#18&I6NC%-a*AZS+BzN1IYJM#Ls0HY;{oV!779JXTs$(l@*H2900bTS%DE%L(UN8V4E zosiP;yOh_g(`&UDfV9=IRrz5{<7iDl>x?l3A`jYdZJ*aQtY8=n6YW?{fgs*|LbKBT zhII;kD`NS3X_&Rh{G08MCOHzQo)Uw!*ytc{w<#eBx_HwfyW|ZFKApF0H&`h-@Uo_+ zhV_QDZD;%-q3}SV5g|C02gb9FWA?oJ8^0u(?zh?2Q7PoO&Ry_&pebq9Of%YB?uDhS zv4}$^JmK=*0jIT<1@$^AnA5p+R?uOy)YC$GK)$lTPE}}ZWHZWGiRv<6dNtPL`h*hi zkuvNrs(wBHA1OT348nhF4Y72Q%fs_@k*Fwcf7%{03u0$QajsI5PjUUVr2n(R$dD2ig1 zwyN@TNu^7>MkBLnNfmyRDr{2LNMZr84^8Tal2wky?p!$ZE803WT6vALNR9&M*D#>6 zk9LL0R$T0#Nn>dksm@1Y^t z?CbmxyY0OPxbkWn>GRevGKjaEY#{P=K;h{2*l0C-22p+a5M-aoP{MR?mpNEghjSy3 z9Ts%J=74Sld?%EG|5cAF4q=iWH%o?}fKzOo?p?0~gxL~wMY%zcVV^(*R&honJ}t;? z8In{bcIT_?hZ}{i}q^7|q|t{%7}B49aah|5JeK9oKV9=Aq@8 zs281(Ny?mx-nX4Jh%Fm4p-%j(o4}jtzaPzitMyy0e@nHfvQ&MgJc*4Vx;zFHcY0)F zm4Gj=aN-myq1x)soS)_Y`UX$l2_BVN@qo9CK7^6p;;Ugb)lIDdIpEF@DcX_367hpl zY1m>y{o2{yZdm7cbF94YE1QtE=dJM32(*Mpmbk+Jnb>o9ua}F`fZZ9MGyxHau00W- zH{(w(weIf#tjrp`piGXXsqV(vEo&KBw`9lL3!;c6-<1`u!&fC#CXFNSKZ5!q56R=h zUzAVsRr6_(^X{!?DRVjBrLH<$`Y7`eX25{dwYhUc)S;F3D6oknU~SSShkSBB475!* z2ob(`MFAtoN{TRIdWs%)Jl1cx+-@rE+Yi7}&oWkLeIg?mrC$+^=_gnJh@WI2Q58`Z zIz8|G{PnjCG2pK#`|onYaHZbU^mjeD0+@a>OqgaK+yRMc+5-n9Y`3ilO<`XY$ya=6 zzTX=b_KwZCE*4|?I&_qsYBK76IwmpX1r8ggd(UPvf@#{^dA~gh*Sa{Lf}izBk{9ya zDdQAGqm2YHP@p9?M3n^ts>_WZsBMB#IylO4+x-dE+Ng(&Vnklb)TbQpps;DJ2)v9# zn2nANkGyn=h}Z?tO(qU0y&kH{b3?7i>b!{VsqFWPB^20ZF@dtfh|04zWm6-H%JGzz z>pS#r*%z+4U*eD{MvK@5Pk1l!Wx&b$t8z@|YPUk90F{kb=<_DT86xvL!d)T>jJO11 zYlKhXDV2tQuo!W%yI_uqpL_QiB(+BiT@{BH;6a^KN+fC+-w4Q*@FHgNiUDM1T3~}J zBO9YGUgF_EJ%Vgk{O5rBnZjn|x@hWMC;hMp@cf3A)f&=He5NI9^IOqhCMk^C6R)2!%XdFl^Z;U78rxhB~g^+s38phhSI9`q0_5Q^wtzyIgC35_L@H)tKR z!1@s-$m-6+yX@H7fWe%MoV5V6K_(TRI&U1CcFPMwxskv* zPs<<0;oaT6?*PKHj+8569JaK9V%E56eVY8?;7!fulqD#U{1p?%LFxv6e`nB#v!Ei*+&=~lUP6oCDX^P!h_gw6Z$cqTYhYxi(k&`6f&V!1AS;+-PeMxxI9 zh{c3NWKZ-Tt&>d7u-D6A3T>|abfeadtQaNN#6&q|u%VXFtU*RgR~Y2&z9}o_q;+l$ zm4|}`UlzOxT}%9TCm9^vz3e}W7`w`zn&7q{UMB*Pc0$Nu^GAq-B*-ZkUh4bNaOuL& zWF((JYTVR`#B}NFdnz_j>dYvs-<(-awt(YdGt&{lSgK!Z?yWTObv5VrRU#(U^j!A! zwIG)UvQNEo7+=u~hd9>9Hm+W%5P`KkK6ac}?xAeWo@-LtbcKfNjr5!`g=-MSCqW}4 ztwN-HX9kw!r<`@DkW1Tdg1Gxxa=4L`dLowe<@TLX@`C6?dBWdQ&pihW*8j){4?|h6 z@rq4S2yKWIVMRR@a=?x-9Rc zrt(4&ht_*?_Sd;cW{fy9clfC!Yr>jMe3YfADZ(G>Gc%R=Xvyh0+2*6(9)1_{J3~*d zBz&h4L2M&oCXgbTiujZ#&g!ypD3#*{?GR5Lz-p)W&oO5k4`p}PR4@(;A2t>-7Ss3H zml7H3dNB{Ws;al(ZS9`s2$L?e?-)>6m9a+H;;>b6F0@0WDToseOj0xmg-}LuRo;W5dk+hE{%-52s$#^ONU(|FgEWvrLmJ6 zY|HX>*!|CH33i=lgn=qd%+`lWCV`v}cFm!B?Nvq{q9h>31RhkRRcmZpK(>3ysaVcH zR0LRlv~}gv8=jt691<}GKi8QMIBKeJIxC~|AiUR?^u^bOs>Z$yh<-raxi5j$1E)fD zelkK-{7{WOsp*@^snqXPT#uE)>7;1jHfsO;9x|GYy*{NxGa2$lKjl7Vn?A){AOJ^0 zM{>`j2mU)yr&=rmQz2{Q#+GHIi>Xko z62md+Sr}?Yl|f89$O9h?=;pKUJd;M|QiTp;3YOcS1b-Akb?6~jviv~c@pZDqjRN_i z{fZM;W2Ur2%Rh`vh%#vGA@y}pRZUo?2D9Q(*MdTWlVP7ajMU>a-tuVDdh*l}2fS@U zY)~MqiSk?2z-3;t+j-5}f`JpE@KG)_-WYNlS`{4_sDHP~4NM%N(FeRK(}93aIO5Uc zR0<~wkx>Rk)DDS5+^szBPlbdVz41HCgl}Kowl}XpR%)G}4s+$?{4REx86!7>4%UEmdve3F0WuVYaW$Klcb9%Jrksrpr^iWN zW1ZTW87VV8!^@+e=`_P5Wga4!+3?LRi6)~g(^SubfHZ6R$krb_Alf1&R75?-Vn$?(yW9mh1?15~BF6R*)Ce#pJ@MXaxn?SH&a|6{X< z`Kz++UsYd|FnN6jU*8G0J*qz7p!wVKCv*~osNJ~}YWZa_S;ER=01Ktv)OLPxF7v0Zeq^!kvh`5S&^HW%3>|D->eEf>? z7vS`!7cGTpqVdCmLw<`;aU(*#BBZLyZc zvFg||2K0L#x3!HU8rnEZms&@83aJDWUrF?WjH5hklnIl72595^{?bQ|TJ7M?E7p=3 zr%ymRyW!V}CHl^obzN2K3z`e$(+%pdf!4jEGM|A7FI4klLM5IZqpAG5_P}VVvJ_xzS$94mKK~@8VTNkOkyqMyFJv2B>0>+nWW=T#U06qsOwL?>3Qdl zh%UwB-*tW1ejw+bqlwV{*ur@(zHI+g_ca)+`_=k(YNA@0?h${}Ths48Z7a_q0&4c|BLp#RRoh zwVA{D2`yN=`>XHZaUo1O1CSm|y?b=n+u^A`p06Gkai_H`X@QgdZ^_Pz*_iU&rVC{f z3Ih`9q-#!>AHKhcUQEDxwY!ieaUVpE${A&g&l&7XBEwdp2F4B4a|x6B(U6Gw0Ly>g z*_l9(g(eg9qr!E-oyATBs{I_vjb-sk@izH+_OKQVN37ELqV1Ii$;?XNBV(jtp7CD8 zDRkdfNUHZ~@SgblF=vtxu<=PG)-ks!*CF{JyK@C_V* z8bjd*J2;Gx;GMcUhbQ=(q&M4??9L$=h}6Qv>Y|c^tVNA$gSpg#R=WQ9K zhu9kqrK^Rl@to$D@XzFW%)2p^nD&J7%vDsOw@TxS72O$W*W)_tTWIQ{{FWyL0;Q9X?92>wf1n@f$`rekD|S{oaQFW&TiA1t$9*yT3H@2ZX4@gqs~ zR%oosQR_kPY}P)n6}oH z#0Y30`JBu3C`{idHOILKHk!;Oo`_vLi;46u@*&lQ*^W<&DQKs6?NYMzx#)~!S#Rjn z(N9xZ1RyxWEMHn-c<6BRuPVDgX+f8z-WgtysA`YR>+>VT;SEWET{*(#JalM8+2?5y k$kjdXM_<3<)yz!efB9dt{=Y79zv{8SzJFPd@{gDQ0Ix9h(*OVf literal 0 HcmV?d00001 diff --git a/test_fixtures/masp_proofs/37332141CB34FC30FF51F4BEE8D76149D3088F539CF8372D404609B89B095EF7.bin b/test_fixtures/masp_proofs/37332141CB34FC30FF51F4BEE8D76149D3088F539CF8372D404609B89B095EF7.bin index 1fbae64602829c00c5f1da4fa116e6a3dc28caa9..18f83d0543674902e112707b9a890c8baf916c7d 100644 GIT binary patch delta 1002 zcmVvYhohcmRZRsWyRVE!v#6`hu zk(qIeQddQ$q}|-O06{Kq!C&2!mkmP*8(k-`36 zA?XXmR0V%_E1S@M7R-2&ymF~G#ClQiPR1qVyGQB>+3-q|7QhoAG7358k$!IX?!n*R zJR9%dF}LkDLJe+ny9RU`Q$S@q#_W~>MIcM_tw99FAb?De;iQo4Eiv=XpE*Pm;nxMP z5vGD!i8Q!rRiQaXyb24kK`=&z%p5ZsB1s68;Cp{_tD}#j77w96T{ssDLXxU_4B~U* zZ9MpSwU}u*`QcSk%B&v0JTaeAwf|@}gThnR)l`*6Q?UjGNWokP z?l6@LKM*Zv!J`e|Si3kNq80I{&Y;Fi$+GBnuv|&J+JmSK5#%nGaG=BPFuZQ-HZCv5 zCx0i|R(ih$S6745pYZmQ`^z7Yz2d@6W(|Mr*1iIyAwaMi3aidjT>rP{QRuJc78I|W zXK1&i5+1?S?CqCL06QWUOH@w_VbdV_FBi!S!>jukac-;KrW{>=z|UJ<+X~xuL&>W~ zn}-|e4kGFLKmr;7b`LBB-5LFoq3fWsVD;2Tn$A zARF%H-*q4qcsBAYxPVlin2eOvOMoo!?qTg37=%L8sD|1#nCT+f;8z@QNgY>fO%TYi YE$!*C*K|(TI4fpp@^KR`0F&e#H2>)7?EnA( delta 1002 zcmVe6>1H+jGSqT`PNLuuCrGV^a3DLVLmyV&#ydNPhCsu zM2L~U&v{E|(5ctv@ZIaruv#;-YZLSWARl*%nEx%fG9+jZOQ2|RQBwB1w6)HeI?t;v zi!}V6E3-~B&4&sKlc=-Npnv%n*-s#fNnN@SBwzXwTAbL@YsCZpj>}s zmMD4_iHoL7Mcp^PB2n~iKxrCIR7G&rsxv^O=upSz%T<4y!GEG5tS}vI)@n`s75lT@ zp&JxP75=|#uG63X04BaXv{mi?1;PP8KK+U0lLR>b=BMk2NY?LUSN)8bMY;Owi>|UJ z8}_M>b7b`U5b=7q=sLOYsQbV36V|>mXqe8L5J`Fd{(Fm)WHqA~m`yt$gOcP@-F6~* z@DWtVao2ys$941zZ7A0@5_=@>=%xs$(@N+NmL>KNLuP5#r2RsUXX z%jE3-*wdFoiZ0R`TXOkj%_!<3k#;^Q@tye^^e0mW@rB+UmV&V1I2&j zg4u9pG$u(HmsxRui^s3DqlE>ieCZ(bQ21X819yLtK*zbY2}HsAUE?=XBWALdePvmZ z;0OsMmb3?Ht!E-r33Iya-{6K8^$>owT3iss!|0~Zxyyu6f@Jnw21t^=ftP<6jTJ&jhU{2+t#bE=2NVa@6i3n65c(VD zfI)W2o>X~<1Kter1^D7~_hxv{Zj1nk4VgJPpyk6JZKPUH7E5v(JxOB)V=rYI@NQ?- z5K5>zTt^&&x$^B+HWZC8VOPbN~PV diff --git a/test_fixtures/masp_proofs/52984E26D4A044A259B441C1DAEB66F886B3A3B6C71D33F456B859D01DA47ADD.bin b/test_fixtures/masp_proofs/52984E26D4A044A259B441C1DAEB66F886B3A3B6C71D33F456B859D01DA47ADD.bin new file mode 100644 index 0000000000000000000000000000000000000000..c51b5ed0d978ab920e1c513daaffdee73adc4aed GIT binary patch literal 9208 zcmeI2Wl$aMlE<+?$iad;gS!WZ;2Jmy2Zsa>?h@SHEf75Df#B}$79_a)K|*j6f3OD}ny&ux>*?-6L_k0w_+$MN;vb7LZ~c@LN;j!dE&-Lx zug`SO!I*-`qSL4;yhxfDM` z&FIEhorRWtIW(73)+rA{R0?F|mfVOJzM}tf@IP_#%h4|(uI&|N^0fOksmc1jYXiu2 zYQn8CBme!}->+g)XWFqyN6y2lolj2_FAhD;#7ro98AR|RI4}A0 zIxpnHvnwQ?eJcs+drFlqM9oa9`YamJXGqNoF*o29<8^W^7=XBl5xYH7}CT9wg5Cdg`}22tPL>UXr#o{QE{9e zJK=_1mMLvxfqK?altvlPiUgoXnQ4S5@)E_HKDA-4ldjF>Vmd zfB(ttO@E<5k(G%a2D%|YS8$$wVN|$qLd?$Q5`~nH)Y>=$@@m>9*Fa6aKXHG!IK^vy zX5^KRkvk`17d(DrgJD4BS@!`1-ts+^(X-$^N#OT9Op}be6qgPZvf}`u0Kl5`^@!u4 z9^#Pj14%%^h?{Bc#sJpmnbXzoZ>zbG2P~kR;JWdVuMGDO5YhCgD~U>wQ1$BK0tny( zkc{PwO*nvvdlQk)MEUq$&K`OpYaQ;;fCgS5_5#e-iacAX!y{9p zR@%F#Qrk@MC>idAoQMgonRO=x^5rPNQPU6y9+Zv-M&DPtJ-%)pm!+xqvSU@P`Q6Qk zfC%LFDUae)kgQp_k4LeksK}Qj$NXlHB^nZ{UCIf;Yv2W1i^7PKhbZFuf$@71J(Gm9 zu_H+RSmC;(2Mt!TuRI_re?F5mC$rZhhc2?5PntNRvXx%PcP0sfBquqvbl3v*G;j_$ ztVAY#)A?!ZkE{0UCy@cxZ)Ltm#NTka%BYh83sG&;G7fSlgvwj|?agyj+0y1yS0z3| zCg8B4?!)0n0K41cx(|_!vOVWi#-WM>A2otm?7=8cDpXQK)Qtt`bbttFS_@X&cba#* zde?o0m)Lykg@Se(U~f%QqawcEJTpMCXW`^*uPZ+n(PziAZnK+XpY7C$3D=K78~bH( zr6w(4H*emNVYcLB@7tnE{C084P!`J2c!mSl=hZ2bJbVuYR{L=L9XwL!x#D4$m#_V5;f2) z=rPnJZ)+_CNe~Un@f@f)K)h&6Av2d)a>f7XQ>+aLTlHH>Dt;Ym4T@H`M7MPz3CEx}jnppYe?W z2Y9y1yukB9TIZh@V5`{rnN(C>>KIt^aBcG4>beDY5>M*cwg%u4`;0Kj?CFh}7A4LO zD2nY#rN#VD?%1bsP+1Ij8gdIn8h%dUY|JknRw&RCTm|? z0353`p)4+Axi=debQdDD92)(d_ob8;F6rDAX2XyJJD_YfR$Lz_ebT{UG`S><{4x4= z%;JQaRn)Dl8qTKMYy{|O;j4=xmAq#$1K=ESB+_?!$gLmX+BOL6#z_u5=4gQMM^cND z{(3mQp$$3sRtaGv`s39XeKVLmrnmec3qCf0?jidkVh~-rZ9Q<7XP187733;EM8{7y z1)#UDj`d?GH%nviG z;^R!Gx?oe-mI}s|uV3n-Pa)O=0`V&aFSZI=-|qdd-#EQ3+j^A)LYD*0DT-*7{Q}cy zYXo{dPGcAl{-W?-6#k3C|H2do@T@<$z~!e>>Mq!NHQA9&slRmIXn$%jW+fY0-jscd z_8V2d?LRw(jqD5~GS01y%Hhxz^n2T{4wvUmakO+Bw6Vk=mWKzo6y2wa4kl`51f3S? zjtxi(^r7ci)5Z_FYrZ0k^%( zewa}Tv>H~9wA zfu8`K$flGT9?e{UOYK^Rqsp_|S%UIL7U93Y`!8z!MXi5HYN0eSbBV`(QxAo>(!Y|! zI;}g=Q%AD$=~^7-Boiv?u>3!$;FGh$(xroP?|VS`j0V0U-u~<){RbVzxNW%x zH3XBmu}%@04fG3F{}9q2_6cJlLxIb~-mgc0>kuRU8Or{v-Y_zASF;p96B#AD68jY9 zu3Log6Dop5-YNEpu*S>ik>~o!)#if^WsuW-_7IVo&jP~*hT2K)?-=zI&iD$#E<*K= z^K);B1lRnW!aZ7gaBk`q>_|yu>u@V<;aa=8sWb@1^YcG9wof04MGhLF@ z${AwG(6LXwqA(cv>7G^x5uDGm5~Y@G&eM5nV6vAXx9Pi=;S+#KHodhfCHl<~g%Ksv zO<9>+zm+4Q9oZD)LrUq&g*|C|US1b+N?9w@do8~m^_BSr<{^J0Z}1kb~~cB$7_2d`FM+k9gV-GmBZpnMD6Du6;|)Vo5>jVg9gss?>A4RAH( zxfVWuF~`RKiVziblwSQ78_~}$)HR|pI!=n=Ddo3HWmj%&6O^<$0*{!+Z25FrvNlzYB@f$?Vz(2;eT z9TSWJFSb}b07GW4Kk&ocnD(+XC06?^THUZ}?E3DVR8D+N&>$=mwh?48$f<@~=2j9L zp}o3XlujVOcrt-g>q}tQX8nUyWjJ`w+j^xava73C3Q=;_nPE+q&z>b%)|N26Pe(M8 zc1x!zb$N#je8GuxkhV$O-x2cWdq_5e584fq5{acqa)dO0nUpyW^%IG)@+z;!G={rH z-T8;}X}=PmsdZw-M2xbj$A#IoIW@y!Bd=X68?}^}1oFmSX&L8qMY>$2rPmYK&oF7(%#Kra zPfAjO&>HJX%o=4jcSgjD?VI!9j@#x|GkQ5$iDaXgu{I|xxV#`B+{@v@Y4{59)}d`( z@i}=O?H~b)m_I@vqynYl_-O6FL(Y(VprLvTXb3VTk+WvN_tflSw74+Vr(L;Ew$VOE z%uGdv;;Bzm-`eO98|cjMt3{1#K;8BYbz{$slz;ZB;7ni_eBy(TZeBc6Bcs*z(r-Vj z*u&VKJ=0;Z>kN;C4tIa&jMOGeNZyH#wh2@8pBY#NPrANl1eLU1g$VX@!vsOe-BHVi zD*LV&c_D1!LXnzGa}Pm-bt@lfhjw@_iHgipN$tp9A<1~XgwZP0$jQI@m@o#BnOV9u zoIzC>T?-ll@i)Ib2=_a#MY)@&AHY&!VkTL~`Y!!+aU#+WkE^cXNzU6?9kg}MR%6D2 zW!YH~?l{K~a>W!%L>5K-d!jBYr%vk|#mT&I^iS|^*gjlcU7irh}#;(4k<8v4&XIUcvoLq(X+PP<&% z0A3qgAk32YbNDk)otLH&kwd1^rm}{9`-;+^Iv>pg7nOBZ!Yy4>d=W~ej_m_4*I(G8 zZu8j;8N?b4MwHVfx(OK~AFuB7d=H8Y%}AY#`NGfYHSiHYQ%^6&$_!`jI_Z9ZMhaHp zzhfxV`kWjSam-6){b)@kK8{F*bW5AZXdI1jbt#8hL&Ev0;dpn6X}jLaepi{|%s*D% zB3(8aEeNKCg`RqD8dlpYmnK**h;HW+7qa57*-p|u_g0X8L!p|-pe{W^98dke+&?h3 z{vMSu_kB7IUE$8~crPn{@$=Dn=Sl6|+)#V&i6PHFrX~2b-cd&C2yxqQDmb6R-Z(Uc zL)$7%+GVH!jzl5MC+oKO_J}#2#iz2cftV;-@JP$rTS}qscmgU}_5k;pFk~jiNLCw@ zvk)SRbGD+(0(Db=cJu%=!nvN{s)199+JF}ljG{Z*dx}$6+w_k{Q%kw5?2m zAG>LoGxz#blPq4)Ed@aOxa|5~<)R@HXunq2^Xf)fKzm&!8-@EaVhc$Q>ObE(5`5tD zl8_PB-RXV##+GqCGOb!#z(;%AI@--#B3_mKnEHDJW@e>PTpQp;OpDkh;@I(B2_&Gt zGl(l*=6DjSFOBKcO|oqLhQw=Pyx4;t^rP*9pHO?Iq+K^Kf<}TNWb`hLqOh_$B1@a= z)lui-%X$~%J}m^r`zfN;kz{zviE z5i-+xMImy-fk zGrWLG$Zmpmv)rNJT90gBAMbxVPyZv?BmCLe_RprTsoC2PH!bAW2s`XH6fxFa{ZFxt zMbIsDNBqIn2i_N%HH1c9>S;4Q6|cKkk(&z7E7o?~e-srq>yG3K6o|jVmYw7BiL13@ zBo%48={n_LwM+}ko=ocW)i0OBJrf{+A-C)@^TdgEeX1(0s!=uJyP;Mi(G>T?DRM## z?C;|t;h~!~M}>5_&s3bk=W`~P31&(A%(os^IDgNZ zTw>&X6%uhD`X#H5y0Pm}2+C|D<)*WLNDA`&M)G+@yViw_@`w z?#j;ljx-1xJNCuF^VPzc!ZM`L{uN&hiNJ_0Svn+&2yIGTt}W z2KxyiJaItg(I&0VE%AJYu@d38Pjv>Ilx4bNb2cG`Y`1g0lep}j#^H1LYQ$AUwIqJp zch=tgqdUo;RzVC!RB>z`PX@Z_7OC*wQR6g}MD{du2S-Whyp5Aj7;cImlPtn0I+a3h zPPWuKmt4?{9(t@HqWLq9Z;6=;=lZNB-Y0*EK_()4>Pwnf0QmM>FJ(sH^}vteN1xtz zEFi3$-AWvg!3kmf;X}8#L!s3p?8CH_Caphh4LC+*WII$*`23WFu$d#p$Un5q9-&64 zR+z?oxC*7w94WZYi4A@?4w97(awYm6w)bo_Ss=FDW3hGWSO!YaAxBDQle|6$A;* zsFX02Hx^aeqPRDkkU2@$HR*hcL3HGr!+UHyK}O8^aGQARoMt-KO^+W75YZ6PHK4U%o%~y;RWupPy2>qTd&4X%G{6`xA_1P zui@*QZt-tXiig?oh6^Pxr?H9+$&iwhDF>ytmUBVxF2f4@$5l_f$yjl(9~TKpslkh-Vyq=ho7OoBQt<6J zUh-N7k{12=L4Ik#ThGWXe+XiPfHPIZ%q(a6(2qtRM_0Y&*hIDdO0(nUI z6D=r##?`CUqeQ|d7OwHw@sWl5QkZQ{)(o`2g_9C@d1uz}@_B7xNU8qxe%eor{h|F8 zN{IM?_gPl=IBELzX<&TEMMhdBLcONEp9A|3x=MUpvA(2N)u3^55|_)0Qn|~QTLF{E zIdm7W=m9#7CjrbRs)M3rU_vtJ=Iaxs`JvoUcLlSwjOu0BG%FMjTaSkvDsO!+S28jt zDp@OXengp&ScV&1E61J$SYX0SEwPU@M2p&$|<@ux2UbgEc~oM{f~dY{4U~tRdB!b Od;j+x0s`nCH~$7@I|zCJ literal 0 HcmV?d00001 diff --git a/test_fixtures/masp_proofs/8B29BC2E1A96DF331C7C3A2B227C98D1E5AAAA9988F26B1A47090ACCE693572F.bin b/test_fixtures/masp_proofs/8B29BC2E1A96DF331C7C3A2B227C98D1E5AAAA9988F26B1A47090ACCE693572F.bin index fc528241a153f61d1e8fa210caa13bc95cd1c670..afcad7c65449afcb2942ccb2e2db2169ed4ffc23 100644 GIT binary patch delta 1002 zcmVjs{Lk;u- zAft2SEe1x3k?ZIxnig@KV4LceiY%E2A}|`F#FwW3C0|O24V0wR-o}80J!|EP{BR`kSd&qp_E!W z);Ig9!|ds@w=9ZZLq)AYOysu|Qmlj@p`2^Nth#k_AWBUMT z%}*;6o^D7rY6W9@LYR2vyIs83Rk6EQNLis@vBX{$I!S+20DLZ$Vy(oYNVcfJS=XUG zu7jf&@5U2aQ)*oy5u4M4kqUD%<%{l!F#VG5vP<6hShmlNy@@WEg zIEy{C!k&KyqT`1-R>4i4_-DFFqbS^e83T~T)7zTr9cI?`|77>bg&BiJs-HBE5hVmF z+4CpKI)Vi;;^cytA~|Q3|Cv78&VLGsCHx|W`53^;7mjLr@uH@Ngm~H^V+M=u-yeRRDLdv z>MnoN!@p;7?&N*iS1A5HZ$WI0nuRx|g%1G=U%&!fxc_%q*!Mum@EB$&QYql+`R-$e_6I5>2$B3i(bqJ!3s13fdFc1X|mIZ(G-`HJFNw~>KqEHbTh+cVa#QefkszZEI z_qDJEDMrb$X!XMZ88XwU4N8qagDj|Sf;9K%iFEyL@fG|qGm|;vcn4W1qsup@;!`rR zzhsL^J?9T0f@s~TV$lbJgeN|YJWWK4k-L-TVe0w_@_p@0F$j?h{~|qGMW2CIr+Y@t z>V)PF0vx!0lPy#YKO@38bb}SI6;oaH*vb+Fd~LX7Z&Jl;aWez&Ty~MW6suvKtiz|n YJe73yQ(X`P$Z74p=MaZ7Dvz4$JtiYUa?TP5 zHV|Z0vW0r36Af3MU{~BXL~+TpYZLSWAko2KdLrr~RiCgbbe_xnoT5dVY$d}z%#ehK zr4vq3JhOimB?Eu35Tx*YDvx@r#_Z|xY=sp_jaiNw7v4;ciB>ceyJagBqYH6j*$Y>7 z6Fk%gOVpu)1Na(R!ud4v7wMXetet!c6&EXwwOmR(kO~Xw#dU?1v{Bw)lbrFCBKh7} z%*|yAEV;n>CsHM+abb6bsbnwu;@bs^96uQ5>M07dg6)6U=dLIpb+19`K!UqW1+*rhiA; zx{Gt&!le&kUWY+u-@-#7pch0#xC?uVKFhq zlXt=%+LcXZz(Fsl4#gU$@c9xosQ=-+iG;_#P(^>-|8262A%s)jYfCBHw5YhI=)Waf zXxG@p8TRQRR+Q?(c*$rBt*)NCvJ;wMzSyS1wZVd@>%H=K5HY_&g&g(Wn)Wu`3M@Pq6`>y}y z^_zd^ZbE<9`eL2Y+Y=K9!5?i%5uo3!Fs!{D6Y2x9AL`99^v%}yr?Uok&+^7NmvTD; zz1-@zV+!&T$OcphO!t_*&4mM~95i3?J=Uh?q0mmLCzr(-_ktdOwT4xfPf z6-B1Nv3(UA-X=$*38k(Oh1j`STkl0_n%7yL=PD+Df80&$2c7idFk=SGx|*HtZ9l6B zO~C@}rl7tpj$w!bwBd}PVid6GVXJexKsCLac^*1BDY|CL+CgpO<^=Ek@f>hcks3y$ zGi-QV`-o37XUs$*4%}2OhxMTlf6k{K4H+sG_*C?)eK|(Tra+T!jYRW1p;{R-jY_#7 Y3Pcn$N)EMb8cUh_iZU5#2$SR-G*c?_y8r+H diff --git a/test_fixtures/masp_proofs/917B7AD5FD4F2F0CB33924511A59181FA326C06FCA9A49B5A5C394C75942820E.bin b/test_fixtures/masp_proofs/917B7AD5FD4F2F0CB33924511A59181FA326C06FCA9A49B5A5C394C75942820E.bin deleted file mode 100644 index 4dde0ccf5f17bae502d207612170f73df7e81ba2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15573 zcmeI3Wo%x{vZl?<7(?vO7&9|7bIf+k%*@OTU(9yQ%*@Po%*@OXJ7)Im+&LNDkw)4- z&iOG{J1s4h)T`C4D!qEtUEKu=0s?~h*Y~@D!ldHrgfkB(iejKkr8t2Rmyn4x&YXqO zb8J!MlYg&;2L0Da&>`=jkA1Wc>d%ph(_@;C=BUg>iF7_h<^VpY`?oeIcJSX*{<7s> z{BTIolSL$i?E6wEBzia!QLoVVM~1*^iVYm^jOh+FOf>jdS$KS|dW4?5i3A68hu((o zxtiu8NC;g;#At~HLX0Q(=W|FBJ!MVa3O0{`57T!b@L>bwcv*A6z8W5<7@Cp$S7?kuNj(Q|3wfa$Hyogat-?VG9vWW&h45?&|^bCNavel|uta zP38k4+c(!Tu(qZ-c+ciNd=;pq*9*7zyG!&o76Z=$gxm#go1n=jD+FCakNPxtxm}+# z5iK+3i?3`RXQ_g5_x!>EoHlgu5CFi2@Z)!fGcC{&u6G>&!tbuebz6f-<8znmJwMlT zmEMs66_}4rkUk=eH}KfAV=g$dft)q#%S-S8Rsfixn4u9JApF&cyX(hoeaPNUCVuK7 z4P}COd>?%;8KAQ!0|a8?uvX0K%O}%&4tnRbLLZ9{Y>#ky$s3IBZnpyi{=widp;e zOaPL-OBdJ2-37Uz-uoW)J3VwKG?xYxXujm?$nqsssw4oLMbpj_*C^XM%o^o_XFxr`IudjwLj{%_R9smlBiHKsbwr zR+&D`W9`9{^W64K@Tt2yzK#nDYs9#Lg)}x85uk;`eWY33+LBZ0D^Fmy$6)0t=v{j< zo~gi!3r4s$UG0*du%hEQ+cfHXe^U^XfUbfCxL+&x(&d9 z;GWyF2hhxHYuYH`$v-#^krRF|hIb86vL&&+XJMTHoT6LHFa!9CNSxlxU2FS%|Ov&(zm>^b0p>YQOm~%B_ccIdUEvY3k6`pD5~4Zt1TpiLKvsG)D-62Q>1$UYBnZ6!`JcL zB|!qfqwp4Fh;hU_!qYRf2Z)4@60(#DYh|s ztN7x18`cn$y0j*}CRvzEOBjg(0S;)h-;kq>SV>WzFdm4tm+UW>N>`7JM(5L$D*Puu zGfP^-i3Y~MG^w3QR5=xUuw&9}Xld7I<~7d4Iq_ZJg8<5O>oa$1fF~XoaPE>MpZAvW9k{vBt9bwb#P@j=b~N0 z7Qrh{p;8iX;d%uT0{=tdKNS8$;s3D|?!zN?(AL@4o)b?PX=+E#D`R1Q{GP?el{zBQ?#* zGF4a0oQVuD`fJeNp)ce8{rytPXIvvcCJn-boWp|F>r!BCZ2}SGd&A`GT90z!VXDBF z!C!0s*(3frssA~t|36Ra3;POS9@8J5aau~xpfIcZi zS%3R;Y+s3=B9kXRo{G={wDot|FDo$Tt63b5 zUR=Nxm`e&jb3hp`*59oI7A>cKTHzaq9UXG?v=W`arZ9= z|CoPf1pbqvK2ACwRe$e2rzd@HUG;`yrtfb8t4QI<1++nv^i2qB{EqOyEoxOzgEopw zdKApOwKI;j{1?a1T9lFSe(cabYqmSofTusA{vGOnru|QfTF8_veckjEp}<5f8F{x> zWo|*b1{=bKd;x<{X4?{wy1YsKdw^f^JiA1nAZWH2*1U5{wy<4g&Z*(qW_Fm z?ppds)W1Xh&$NFl>i?GW{JUJ`-|nf!`bd67`QOfo#)PY{uNJ3ciZ5SGq15?c+O%8V z;L45$EqGb36-RV;{}J^+Q_R1;{&l?z_o;}y(E0uNSKt4B#smJq@>*j-vCAIA@eEh_ z&3;$77+)n%)NfHr!9kQ7qE9!&z?-hZgIg|-rr~wx@EyJ06$sfYD^11w=%SsVSQRRz#Mwv|PWlw5e@M3BmQ8$zO}@(?$EFvDb-Y z_*xoEcmi8taTscMGZ6aeNS0QDK%+>0Nj@zh0*r=*mhG5ItEdS)Kg?`Cla7S4&4X+9 zRj}68<&xxGk1%;T&x0H&FA{6SkAl$Cf0I?1{5cVXM|&)KJSAgWkknn+|$5N@_R@%AKf{kP`mD z7}C=Dg>oqVjZX$8=YrC&C<%D2`#G936;GIH?Alkp-aI>B5>_GFm*6I5w}zeTg^S3h zHau}HOqdf<6rBBUYu6hRd-juj2;$r+(#hUBsd$b(bRXtD#AODgl67C-)6t)WGvA(w zIZ5yY@f1#H0;oO&T8#HZw@GdEW4yWrx^0|uZASSXlmRd&pA3{Nl|#$(61ltYi&?~( zEtE)Q-YN1(35LhUX+EDQsK*jtW&j$c@Qc_O5;_X?%6%g;KBld+i-d78f|_z2lF=v+ z(oZ%Yr6^ul*28}zyy`Ou>N=+boOVYyOAQXfLs2F^>o_L6Zbeh6YK+v8&ydW6H8()y;0XN!OEP_!#F49EA`72OJH=Ht9@otGl1g2*Bu z2Em-XbubQfvKFa*L~Q-5unr8}Qvjev#g;h9j$Jj7P`&JH?=nKMy)E$d?M4*;uIc);m_tAydg3dv;2|b zTnK(#GcFg7sF!@{Yv`dEwpvHb{KntO>J<#_=$~g_wh&b`u&MPv(eiyalr-??#&3en4!9uZ1I6Vd{W-R*R<33u82wIdOZN{{Zs?RVs@zlu#_rjdl)_Ow~ z@%5@te2gWF_YYY#nz}^SE9+CFq$HZ3!O98!wZNpPHVs{qr7Z z_jUndXN}#;qxL$?Mv0@*qr)(93vU|sd5}%}XAFR;QL;XL9ONHbZVjgF7rCjK*6hB&z%mlfwX~4PM*aMpmz3G^ z6{z7c$*_WDg3TJIv#O<1O9@zWEZHaJpu3H#RLUQIUjp^DbcD9AmE^yQcXprBeg}J6 z@Ta4`s@81oJN{pRZ&Gd(-wnCYBwmvNB{-4gu6s`4a5oj|(7XIZhXlUm3z z|CFpGH96WDQ%3M?YFX^PXkpk_S22M~BTP-9HO+5l%&xuF76;QNn`hP_fX52#8kw!ddO`Fi+?mvX}Yd1s5Z_nb~g0?y+p4*jwKrLq& z8V+9QabCob6=2r+P*OQXhSA8a7s*>7^(t0mFTw*w{k|~{L`_41?(`mSO;^D-hV*-> z3^fuMaZK())lK)InA>1lW|SJ0w=Yo&+_^3q`jBUU=%oD$Zn8d664PKv1`~IsD49Hp z40I=D)s47rfL=S~Lj2-zGdhPQ`n3{iyCM>_Z-4Zp?IM{1wGdBBHnHuwendQyPOLHWNh}h9ztnVN2mCf2r1V zsKRkVeahMp1bi@Y60k^I7C+Fk0~~QRf+l%6C#b#3yx3=2a2d<9upvhAXzmpi{erqz zd^RHSj%ZHBq-nMz0~q2Sb=3#ym7NtS(omt-+GQ&{);t@4t;_80_KxS z=!}UmU`Z@7vD%|s1pN}#PHfEAON#oa^?j0uhB}e>P|z)K%8ipVa`xu($M|CSjfD8U zxqi(i`~h!h0glh(a~at-P_44on#O?mCtCSxF0P>l*G?SkXd zUCnPGYq=s2_ujm+lj*q{OrAp#kv$Gl&^75Pn_I$7#e{Ota)Fp#^DrGlM^TcSCukY!SB$uzM6 zJ3G13)wZ3|RqN_dAh2Q-z3_3|Z+S&&5BhGJW?f2>ZGReFd1CGPPsb^O`|&@^iwW&r(*$Aut9Ow?fyWVooMt3~Yd; zTMtbInbkdE6x`8*K!zi6tlOX{*d2vZUbVVEC(X2=Vc&xcMJMZ*jw+&&^3TVE*iqlE)E>pnUTaQ)Y< zO9W*?8tZ9noW5y9&|;kd@kD>-S~Pat=K`tk+I zJ$IL(@nAPQSKm_d$eE~Sd}qB#nXX(6%B|=QUmyzA`qGZfMH5{ny3S4qlDSy$OL_R& z`-&BTi@_THa1)%{1h_(-5pHp$dSrNj6;3)RCpC+bVTi61rzD8DAvDRO3M@)NeF@E% zpj^6=jx?^-eQj@K^d#$9YkerkpjGF2eG)@s9RvdUVD7ya&&GpMGiDi4DAF*)SzXQh z-5rP>ZvG7m;>4#FA>$jj*gFQN@N4tdCqcmxN?~3asgMHfH zYzfrM%IF(;TY;O$6DG{_Jb|tzLF7e6TbuRd*rV?JO*}4P6v7jq9>`l)Qb5m#l&i0>z@e!=bU3Pao(3o(dvgrTXAU zrneaKlgg7?mx0O}J>jE?Xk?uqeLY)Rl@b4bCvD62e9s{Lz7pxGvo~b2k}&q6Y&wzN zL#SqH1w&tYHZl~-AMPsE+v+}ZVp*2S%0`y8WiO`IbH1uge6VWq<}GplOK%WegZ^oM zlXaTNMZYnzNgS-sC-u8TAwS_L+RY;!2n8G~V`PIuZH$Ao%ayt9*Mojl@BC5Pkik@B zJI7ui!dMjwX=4%NWvX2Z*N=46txL0)T5h>&F zpTA>3>>MvxpmihjrTgYxG%t>#TDQ5|#1?ms`+Dwko03!7>+uDhc{{sIY-Lwhf4tFq z0rJ5D&w@D5xUS@J4CEzBr*?A%+L}fEDgi}8G9Au%+L$mzVK@&{n%+s?p?8v{_&g;; z=4%z^J_^I=-OmR8Chh^#iezYF1ZzEnZTu=F^Y$TMu)Od{67O9OcBzW7Oo1&Cnq7@VlW8S!k zxmY@Q$;188FBw%nsGpHS!=imw+b|-wK1+uqFajAq;5*KNGh~$ARhp`IUkbpM4za_a z+|vHckxL6)SbtHGjBhUzu|7x59 z`~f2x!$SEzQlT`!smQ~3F(WbW?Ov;g39$-ud1}SatfnS4gff#A7W8;=R0Mz#&4a(# zLrF0(TRsd`v8<4}FBq`N#LJ?#C;CK!pF29c26`n@;bQ1~+dko(AMcbRpiXjhI6B8! zw{0-H;V^{QQ;D+==qpK0n;rx~$Hq5^I{TdcgGoT;-82(QuM3_bGWq>L~tsuCIeh%@jpXkP$4*6SOdb=Yo8$wg4Ni4Q7!j~aYks<6PlqYw^ z5HN}qR*&Sl(@?Yp<@+iJ6sCkTiu@V%EpV(u)vnD6F?Fb51^qC(h+&6IOC(OWz`%=vutp^nRT15@PDFrqmIWdY>t34>-#VXHt}h zr9>X7_V*@92bT^7Mk3OhZTEXqCMdz;%gGhEZflW%4x=1;M5se?wk@r*mGkxOpNZdl z8#`gdn=4@Yfc#yZ(chI9 z#at?l+Ij7dn&vIGH*LS9j2>$m2*f?>2xQ(a167xiyi%yb?6n(>X)F#u$_BGT6m5H9 z2Dh;X43DN3*g>6W=D`z|S>Np#whldaL#33q1=1P03SC4=c}gz}JmI(|VT3n(8z6-m z6)>dv-9L0R+~OVu%=Aw|cp~s9v&}0fegComrM!nuu<=5=I|G*Z7hK-tm0Gl6NN7!b zR@^c(pz!W|)+p1EVYH!j1F*wZ5}RV`>)Nbou*hpYE+Dq?fkH)9FP;gVCK&S(|W z=pK0tq#sNVx6aSRb5F~8}@wZU__6BD0=t82H4yQu|^zU7reQ)!1q%;lST?`@?A-xLih$S4> zD=>c1XQ+++oud+$tFKn>wQm9ymfC! z69>!Y2RHj41V*t+_j8z;$%Nrg9}(f~w04w@Hv7tImtUumt`=CtGok!jp}P7yDCv@B zplcgtwRi8k=?C;@DP}=c5rmW1J=PI_^sToTOosEdy}ma@P+}3t1L-hB4y6q*)HUSq z3?1b!y3b&R|} z#s<%7w6|n(LE6oMDy}dVDO_PYBTvKgTs!#>Zf)j6ya>z6(7igqYx4bU{#}SSrt&(0 zw_EG<)%-2@gm1n+v8%@girsc%1|;USaSY8c3$+C*o(GpAkdY1N?Vu~p9GX*P>t+Ir zkN$+oi*w~PsY>GJInet^=;k1_J;1V7^$;rc&F7htU&UG zp_rphB15t@>F07HIhPnNi-8c88+_btdtLQ7caOyx^y@Z2fGm`@R3{R%g1*JWFMAt# zD<;@-RVmy_Y;f-DW2ajS60}2bdty)49{yRQnToQo>R7_5VQ2^K8@+)l4wnhMcP5S= zNJoK&ee-f}4L(vse36d*)PTw|@MVTBp9i6UI*wr`kHfONilXoy7Z>*B&<|(&u_uBz z7kZwLH;xIGd%$`WZ{*NHkA0;t7`CNpx)3_qqHXE)x%?YB;rqy!#O+h4fq}B7gJj|m zJ8z}o140nTtgPZIvZOoJ6ED;{7X%J8z_$^q5oDCt=ng^a{@T7e(Z@oIXU_cQ52WhZ zD!evh;jz{36K`15(jLb8CApA4lNIyA1(vy_?oR~!G36oMjqx!riI%_Z91avpV&04m za}}=)|E$gx!%-E+(gW)s8IYm)5zw}$It|8SVl~MLn`j$#zyj;yK4#4!nCl2uvke^X zZqU}Y{?JI6)7srYewMV8yEH+)XW7p4q0x;?)2cL|xK{p5nuS-Z2rfr8iQchrZ zPBlJ?MTN|<_tKwZ3q;xAEqBrqttw%d9a((C&NihnrT`$UY*9xlISy#z_Lf>QN^_zJ<)2V(c9Yjn? z@fs~lCS+SNe-%0QwfLY=4-6_~r9^)z!(tDJr{C$nTZS zb^U^_YwuG6eUesEPN-0CC}Jv8&}_0$Z^XHpOEc!F%(FA}EndiP&@ z8FMA_kFksOj)0(Y=XxwXN}WV%9Z`le{)j&ZNi|Q?CK-h)6%A85Z6_5jFr@M5e8vy7 znwacKYm diff --git a/test_fixtures/masp_proofs/9883C2EF7971504BB1CF651BAFFC3DA2C57E4FD8FF0811D9CB129887F0F9F706.bin b/test_fixtures/masp_proofs/9883C2EF7971504BB1CF651BAFFC3DA2C57E4FD8FF0811D9CB129887F0F9F706.bin new file mode 100644 index 0000000000000000000000000000000000000000..094103100a780895ed109ac82128ed1b142bfc78 GIT binary patch literal 25031 zcmeI5bx>W~malOMuEE*3yIXL#KnNOKH}38s5ZqmYySuvtLU4C?**GLfpY%PvuDrAi2j9L%wUNL(rQj1!( zxYLahOqS1fZ0*(D(S*M%_D-P?Vi4cpR|o%{PJVUttAPg%Y2PgEe;lde%&Jh}{rs^= zNz-H2ZZMyhDk@f#R-@+)V_G2zw~`FoKzGICDC# zB|d*C7h*pv{?v;`oXSH&gQvuvmMsYj5LJl)Z;wQzJ$#>|J3-$xKeF|>>;Yh=ZmkHH zfkCGC7VS8a=^dZM!D$+<15S86PC2FhV2JzFNUdN7c<;_5vWP%epT!mi5tlS9h}vz< zTfa*?p>D`kM>h=BAm+Qv8|)?o4f51)1m?+7$}P0=DShRkDOj2qP}K);*c{FN0GkJL zE_ZtDmb5W*8w&ok(}y^F4(q)}*ReKKB_TRqC}t!Q3eh1HDryL>|PUd13`|`~a0vW=x}-+~#zZ;;C~Y zye|928(oY-%4VIH~S+k z@Il@uT)+HpF2*%${m9>^K+9cc%h}3L$bd4ey9Q`)F_ue&*OSA}xbh!)s+Q;H5C9wi z2tx@&BSt{*gArfH$yIIO)_~oMdzlQ#S>Vg$7HYUp*Tj;#z^e1#-GVgH^2h zT}jWrd`%M;bi%p=WE_%0^|UJjOX*wpr?%j2l1r>Y-r_u9i{k z#mKR8-AKWzy&EZVg100BMcZ z7mtxb&|ufm5Hx_n1+F$Fq(Qv-P$ts(1#496dQE0O{_z(~JGiylil(T^hV2RyRZ`A^ zzlMQGn*xc1Wc>Av_K4N!_!A!>m|a7wL?7<1dgq7J)cVAyLpL`99cNUIknaZOvacaX z08QL(gN>3_7Cg$a{2w#j`^yi(uUixGO@$VmF>}3&)B(KaedfZ_dxLF$xSJ(51+EAS zHeK@ixV_j}c=%nN^+*7zXz$`wIe2;CkxC+z)8wjzd6L3@`7CTABCs_+(pn#oar}Af zuElif{atxmeTl+tok^u2864EWl|LpF-_({JfPP|K(^?r{@z!yGlK5L8f{VYh4VlFa zJI4s%5W`B24e+Lb%<<7o!91dFDhY;-Bs?o&s3zfldDV<1o;B%W>pj3Nf*m{F1nq~3 z8WHL?LMZ*Kj(0xT6$2C_Q%5`1ZkXf%U-PP*&R}r`Y_$(3BSgHXC$#zj6goKB?1BxC zjTYWk0EGyRF*!jo;Qn;L$NNt_i-BS1Y>Fak=y)@?NcDrZ3;;Qkk)k?((bINDgYgAk z$lS1>BW9;0bo?%*RqOPcO$GohHB1$Lh~gMp6TmuS6t3vK7Gz7xy1FG8gJHZat0^qt zyMId`s=%}Xn7_TeA}=!Ir)c!u)}k+j!*DSR1dp$103VS-UsGhf-mOZdrs6jAZC8|w4-K)=||9eV%`S=SL0R_=1A|ZgS|5*Phe}0oL}Ig zzbRoMnVgS*GFdfOj~ghQ66}S-FBE>E@V_vH^>Cwk)oM_uNsQya_c8-xE@L+!TJ#En z@YgePzkIdSf_|p&v;7-UczW&-yaY8#@c_Lw)iiDb*2~;@2LH{3SqIs2pYb znZl~z1})SeCRD7Pr6ca8+exdLSh5!GhaJU1!Ed^0NPH6r0giSR5YVt!_z!+Nf2&OI) z3!{fB58$mM-N%8uyslsoK4>CFYU>l%8R%O>wslmUW$1bcdm-3oR6X0j5rtnl5|8Zo zM1&@jOJb`{io}Z|@uEol zk5wc%@se9@=xv+4ti-GMHDxk=-TGF=*00erQTx+I&3ah@*aXV(cv3o}1>w-A5x56V zbWOw?o}XyGZhzy`7U;qv2lQlp01qNHEHX{Io(b${f^ra4aUQcRF4tABOwLixRtCK3 zXILKar${Cy0qr|3A$6sv!QghRx78=dGALkMGTOsEqw3lIPp7aR>ow}^WP|tea(3zF z!&!DW2dhBoHkmY7|Bz{qFMJ8PY1S80b+wF%Q2%E|BIQt!zbq&Zeffz&djalv@e8+u zCvVIG9EjQ%2q?kBj%vwj{`MA<%8U#pW=7U=U%t*yU(g>yKg&#$ZV0Ro4dQnc_E$~_ zAS*G`;eds3q5aTJ=*mrcZaOYG$)`9$`G^4p^O6g{k&@}tfi(oed4*Crt;Ej^*T#xM}oGk8Q_Za^V+6;zg18 z`zjK@r7$dUMW#?VOHjf{JKBC_qHs3{1`&KRj}m{OMRjG8pd3J-3A9*n#LD77ClMcC z>E0=Z0YbBt820g$q1$VI8s$fRM%A#2XFI9sRaak3S z7k`5`nP&Mpq`ypI`(XsRlceQ?yi5Z_G!E^uI3ThA?A5a zXe$3e=U4%<$vDmBSg4nvcw(3wSER^GF8o5_7YhF?Q+P8oTi-%@&xxe!`^IvC3S+4R zjBCL)flwHl708ytSrg?MRnPWsMB(G+cQdm4!QIwjq(7#nK%@0NIGDoR4);Hvd3CP!xDs#Y#ZGg z9rNBx(-`u;Z}cRzga-9b`uXznNk;M-Aak%9WlR zu~zhWmCcqxb0zXPNsi;~E858bXq?orW~gm<(HS0O+0E84IV;q>>G)^P@jTH7G3SBA z0D+b>l~+4m@eCiRZwao;q`48Dmc{ISuZIz(B7Va7XaKH!G;0v)VfOY8*F{hj=V>`S zF4ROdrK{JLgxjg<6&V0eQIY~0Zzs+vZLX?jHN@LY!|0lRq~&S#1k5_hq?YIdp!n(J zq_;i{CAhr4W(Yr5{9v#S+fBdT?NA${z>mC32q5Prr42lM`zXNT7vW93;zXdf1di2rgA&-FG9Q(yKs^kB|_l0mTgnJ>}|Bi5< zbw5`ATfMW;=wD6L#hd{D#&bd7o!R4Y|f^`Mm(F3$O-Yue_-I{?gnLePp^oUkx&?xWTV)FoM=m@c%`6D|603zZF7Xeczx3I8F z8N$_Ypy{o6)_IH?bEIkI@zLo#Gk`bKX;iYverjdRb2H-9a-F=ILHGxIMJyjxlB0>c zAZ*v4O!+P02GIV4KEGc5cZv18O;`U+L^h2LqEPePNF~PfIQ^`!1@daHkW({R`i-l_WA9PY0Zj{i*{IHWDERtUAWwpq=%BkAzQs6Q9v#*TL=)>|KZ z2!NN{K1oR%s360wEhUT>@%{_Jsqad$2(0!RvARj#0S-W5aPi7}LoUPBr-xq^UrWoJ zV+KnUuO^{yi(*i*71I)slXtd<$K+BHl2_t@2K<;sM`nepho+!0Xf3z4o5_R#a7o8N zV{eo1k7uI)J zcJf(E8lmuMnu7kTgmd_(J+1#;!u?LH`)9%_t|JVAO0KXy=^z?k(VI=s&C0Cu*AF7A zdaW_@#x*e8{yD0B$>CmdxPQqUZZm~3>PEu8O)l4+#}8LJC3nGgIoY*t_^!IcChW#c z@BhYLeB5?~C3{z+RzRP!2)UTMh)ZW3aX2=`QhZ2NkFoeE5Ky7!pq;u;4i*J|(hBw( zSS>Baj=xrlCr;|9aSDe$A`Nh@GvkivR>}#16CHn-$*B!fRbS|5MoC8*qPB_A7A^`P zvV>LAyn9oC(E{HlH=vd0BNj7uvaJ%}2}K(W_Y(^TK!9+TK2HzQ*1h*oJAS9E{d8!i z4*jK((i_ULNw!H@7~;>d;V%RJyJ{r{*80I|R~80k>w0G8_Yd7rc9D%A4_R!)4uT94 z^{TSUsKC~R!$!WSo3H#%c2gU8kNpd-B79n$)xs1|ra(q+)%*|c*6FT}3X7J82k#4_ z>4c|If7cI$rv_QelGo{*Fk2yn;MrsN0b+zCR4=uINXRtdCsJa&>pv0xV*WL+0K^|* zzoIIj%y)X)|8)iay?_z;@9xk4r_x36mu4~-dWM?qPi_qZBB(Ka=H|Ce60`;oUgc-K z5V><}=zUh_47-4=bK!Xwd|@G@o%S9nD!|1oa{-rkMvWpE3^uKLaP|Y&lYosr?RTWT zm2&N_=1i)s9!Mt48aH|fSaA!fZd&cS5qj@lpND<1C|}EQnU9_GlJyJ0`kJn*+c@jm zZJ#O)%3ZzG)u-w+_(JP?1lTmuWXD4sn=dbrt#d1LR&0;x$u*VHGT{M`@1tkhnJ{Th z+r^}(2XL(8m@?*y?--E1JaFpYFStu-k`Aew$IrA|tR}y;OzKvn8vGcj?jnd~6(WS) zIF}i2fc#ijW6WBx4R!?$fPWR0vi)#pXh1gnZR#VVxE}aTM349^(ahpE@6f}TA+K%< zjhtY9sk71pWg=(nGzLvKs0=J4@+ghC*m06{PB&jY8TyN#?olJEkZodtjlV$Ej6zDCCkf!mAxk z2nL#T4n?~b8Es%Vv$-Z-$f7^1MxoItd~HuSS}7}V+e<;-cv#W~eT*~6btRb>LRFP? z0?Zp_?8y}A_SP*D#;8k)P@>%ymKnW~`&yKmGg2$M0X-$YD$AcTPH-Vea{5)wwH|ls zo6~ACA+_8GSInXOly&4l{3R`-%W?PmSEhJkkv|Xv>WzFwC4o}FkU^W|GM&$XU!N;PuGar29Eh{=80Rgv(veh@Y=Ecn zgvi$ya0B_=dT%1(`zo@jIONx7kz1@At_3kRuZGR*(60Q=Z6?EvIhBsS$#grk!e`XIw1r_2VC&!}G!Wi2IX^fbXSh93& zgV&am(+LTc05E^gVs{E&qguCnsDVUusWXN^p>U8>#l*4(w6KWY{k6dTh$F|F$5f=l=NJc zo#^-O2#(et$SgQ%W&JWH4w{+d`|LjU$~TIxqk>`=AX+xBk9U$6GXjRvoS2OdTy1nb zQpS$*hh9C_49<HruIL-wwgcZwvCn9|lbBkzazdKu_nTL>idHZlBg zjq+H~l+zAh7@xWG>pikZrp$&!ThUNrw1Bc=pxOuwik}hnswP_D4(vOe2FRt|?JIhw z%vW2!n^+0&n7QT+p`Ot>HF{rQ3|pK-0Q@m@S6o6lFHdz;UE3AlWE=_bMs_(-X4N@G7D=r2il&+l zA=6b^-7bl)`5U9|YORw^kI$`B`h{<}Bpm6&nds6+r{%JPn)(+7CIYM@Jx*9K05)ll zT8h@^*9*hegm3e;Gh;f*Z9P1f7{#>rC%E{=mIBz*UoDQwMh`G+Mdx|#xUj3Ad-?n{ zF4W?7)9qoi;S~sdUAT#1g``}<453U%))thP{|U@vWRmw7klFf&$R& z7MKC80jI9N4pW)qmMx1aMJE8MwC5stBU;ml-J&$Xykrp_BAIsZVW*K8Ux(+^<|+Sr zgc3YhHx?(YG7%;A+bf_Jg>&KCY}weox6}olG&g&52xEEGjqDK#4W;&9CajZQuU}h_ zXe*lJe1d~npmnpKhueUy^RX7nZ6mTQ`2S&DvSd1@NmRpfI~ zwE3|R9}MQ&`d68D5gM?oOf7~~{fpw$d@ND+p9rKAiNI;4F&|!cnmcq@{E!~c`3(Pc zy({Zror`$Nh%;lCpGvAKxY5K%S(=(6ZC9xb&L_L(i zTGH%Il2kJMV~zx?%f^v(me+QhL~=hyE4_afknQ_tc6SX$JtE?r}Ys z`024jY7TVewvdzzn*QN( z@ejV`NcFYZ_h|A96Jk>>Jjh18H$em~C^Nk*xLz>RYJ`lBV{72{-r`CD1Vn1zoL~h> zb7}UeDGK@-U7OM;ieb)?$3CZuC%}HpQbUGMx-t%`>5)k0uH%Ncc8dD6q4v-1BBe>+?^TL7lODp4*k^(~7q7gM2VC597{^I)X( z3WKOtfCmm4cqgBI`?(APm&*14x?rjOX`qfMl0z5nqJ=iD$Jl6*8wJ8;>oq5~`c!e7 zrhhQ$e>snQ&b@zE_40ScD}Bcbupg20#wqC-9a(J)MSMNDg?k*gFip0-POD=}{ikdD zm5!F}P6MoTzB2JQPt^}Z0y(q|)GH()8DH@}W_o??ohJ^bJ(FWjcIT)EAJGBY+JAy&c1G1#S zGbTar{99e#EHjPR9NGlQjOrh= z9y9B=buugw)4;#Ep#{?*;N*`0Yi*XDc~bZ@Baw8md?%alVA-TZx91!;ump5)*hp%- zh|q;lB}lW6oGW}QuLlt+GB=y>pvB`LD)AkbB#yWjA^k=2iLZ2YxqgPY?GfWly`^ ztNs*mTCbNLW9w$jR|;racLmx3UENN(~z9%I~(Il8#H*xIGC zK6E?r5XbMX5TDS8dBOA(FbzMgcwAl+w6Dkpi*|APbulldI9Z$`qM?u<=r3Xg=vD_K z@=dRbrp;wDJFS5Y##)>|?CVkOQ@7wo+3$GKKcqzmR?p-=npt4w+Ejm1UtS-?2zot_ zt{N&pE#FrOjNVjI-RWPiX@b)pcG{rGl4$6N7VtrhM&8jWXO+d6HeN8f+oa4dR+IA6 zzcsf+i8=X&Hk5Yr1Y%QxMtB2q9A>V9j!8|&h}4H;+{Z;u zb{y)Ew0z7)f3D9J8t=}$n|HeQ*2I41H%5kNZlU0MZ~s^>k!cn{?~FwOnMI$q@0R(@-$rVwx7sKIQ zP3+X8!IF(KJ0;T%D1Hvde2pG4f57CwJfo5=l|XOjl@)ZgQOZL|;FV+<7GzxbfmCP6 z`<3U-r(Db9{ccwg!Qnw!ZzKZ$MpPcJ05W&Kx0Gc`shX{$CvV_7CEIp2{A)P+3&Ot0 zsLE#?TP%#k7toHABeHJ7*9}Guky-0x7$cY^r3OMB$A)IYpJ|dERMm?@L>1fin)eq&#w5J2M< zZPn!}lX&!mOhK+ET+UCUygtmJkuCvfd=_D6g@hGCBo{u9Q$-he7#82tX3=U543I9n z^e|zMUfV~Mw~-9ol_JvLDZI8aA9U2TDbh~R9~xEB=E%H?Q`QL)mBkkM2H71LySDkNpu;{H9^Gt^;5 zI39x_Co$OC+6WvW@}uiC{y~%%tb6CdjPok3XWtI#5t8`E>JV#VXgcs!2hTji<~plH z5K!F##v!r5Kws6`e}vr29e*tqRs&PeNMrSS$ea5PhQv%xZ@3(o5dW(M zb!iK{g`e5YD(It+%!#Z%HNN4j5y==-_HYurjk8w3KWYyz+iy}dzoxluguW}6;ny@> zoBnPb+vi?PycLRd`s1v60wKKNs|wno6wp_+a;dp-3`qiF2-`o5efr^-_ovPfz=EKy1fMxZR9uD$Rwcahj2mVB=R(( zbH(Ur`ch!yP6xGvmQ~jK+zlBmUYr6vg>J5Hn3UC2I_w|!=ktKt$h+5b9YWH4h~_>y zkTyRALUPwpOQ|MK`k9h;y(L3)fO+BQobL1e12owF)}!iYq$e1CYfCZTa*x1TWgK*o z;6AjXC!nZw!6@&Z)ZbwDgL7+5-Hpo=@4NPKDY$P;JgKsgW-$B4y)txl19yy|8p6#= zHPt1Svv@VVwVBHc45oaXt+-X-h104a-E`Z}Q89uiR|gZ*$dp6J)*8$|We1rkG^O4@TV|ZT~nGA+oCP1;G@kgMN@L zJD8{JWR28dovauFrtU9Rfg+YEC?M^&Tt{Mb=K+(Yw(AG|0^9h9aDU}JN1jNc2gQ4C1$)PW!4Gd5 zVW7iHuuW9e)xAvw>(7#ogokM0^1q-9BT*HWVN|^atOnsm#G@ku_wC&bYPQ?4@x$*q zA5>b=3OpbEc?K7Y+2Iu{x9A|Td`}^`(9w%#D+R*jiMB4RS2Dg2MSS-2W5{V}Ia_Zn z=0C2WjUNvnM!OALVV(Vq-QImkOpX1Tb-OR*RaSmo>`%H{ciA-z-_ZbbL72w^n+<5( z8IArq#fossY^NIQ@zhJFp`Mbydv@j`z&r)22l~DsxS$#zMjy>Zubo;0FWboaJV+WS z;Vw9aSlpP$8@DMLswn1c23v@bgao%~W<}XsLVTK8aegcU;JA}$fZ2lNAh)kHkMDD< zFip6}ev>?FGeb(bnONS)T^7?OJuj(d*XX;49wYLDH4+zWKX>})rgti~lqE}&NY!LU zGe#1R;c69l2P<>EKy*!i!(?0n2d5(?;$_vHbnG%i|D(JO4-e^^`?NzaBM#rS9%9Pt zTT<$42n438^Mp@|93k_Nlj#`J>m3Nn%S)F^q}*DZcDTwKetsvo*-E(-sHjHs-NJ9$ z7I9b+faAxk-$8y=;e?M@!rBp^A4L?F+b1FgO1vajRYGeQ&tkju=pbf`52+C|iO?nFufe}T>CF#Icf{;};5LxbDDC9=6ycA~ z%m1)DR;Zu)^y7+#SDHhHM3eLtsmLa09EP@#V$unR>&)r+~I+^&Z*bi4cQqcZV| z!*^EWi=#Z$bCGzL+cyN~%<(oZ$mYG}t=pRDKp-;A+d}Rw!fBamsCi%onApT3JcxS0 zn5$8pxL2Dm#6x`vrKy%FxrG0erg8BC9;4z(TFLKcy@zm20V5T;-{bf z0rLDY!xG2Zvxc?6PoEi46>PQ-pBSc`ZzEtS_2wjOYX>_pzx0Qe6c<9KuhN3{o-)ec z)oR?)gxbc30Voowa|<569Jx@KBfIG)_Qmzf=f5Y}$dQ8<-!Bd@F%yML*>cm^eFS^F z8ILl345Ve>ymMhEWswgP1PE4F%SWj+V}&V~(=3z`^@PDRAEZ)peWQdpjp~kfd>=NM zZPJ;@;GZc(5fW>I0l|qCImw6~muWE$BE>UeTU0N(UzpdGe0P?V%&R_%xNFgPT4^p^ zJivFJM3}5(zRb6{FoaKWmh0itPm$ShMb!_Tr;Va#Jw?6}F@A$K232^QrkIIY_4QsJ zEYxR~KgvSksP9ZlvgVYvtBAyx2TE|wqoReHz~Z8)E<-;fD2E>8EJcign|d+zBTk+^ z%yp)Yv}hgrE40A1JWh##Mxm##@=1WdbIIKhnl3pSMxBGi<4-)Uqs5Dok~1_>og`J! zL923=8Gn#)#Ql!dWJuJU?(FB9$Uz{jsh^#L+)Ol)py%W{eooM}E5}2o z_<9@LqEgCC-=ow29_-h{2Vnjj3_cf~g8cLSb1A@oT1xQmPeJ(Q@@ppJS9>np`pfV6 Yx6j}GyTc!+KWFv+m;Lqmb9t421Kf{AI{*Lx literal 0 HcmV?d00001 diff --git a/test_fixtures/masp_proofs/99393E3AC8046F86ABA05519568B6780B6F18A312AE3909BEA19D16FCFE837DC.bin b/test_fixtures/masp_proofs/99393E3AC8046F86ABA05519568B6780B6F18A312AE3909BEA19D16FCFE837DC.bin new file mode 100644 index 0000000000000000000000000000000000000000..f456d94d7d508b81413aada12461d083d9b29c0c GIT binary patch literal 18792 zcmeI4Wo%qqvbIezwHagXnAwh*V#b)65;HS1GsMggLmS%6%*@Qp7~6@NV)*9FOs?*g zu5^B!`SD$yEUCM?d#}B=s`}NV(yCn>5&{AO`ya1g1F|olTYgJSZG_S9jYZD!z5vOY z09LEv03O@TGRuCI9VO(yXF^7f_y1hGk#M!jU53SeNe~I($xFAA&S5Ca3_>-im)3#) zwd5bR%u4_fC2A5(M#8!O5sgd_Px8w%+})93d=>Quo_FSSyE--oAVwBI$Ww>Zoi~x_ zVD8XUKMK+^7eztp1e0PU6$rooc6T<1BH3Nq=&fM$0DL=r3jql0rzGI)m-kT>yv?cD zNz}{EK#j{zj%i@iSd)d8Y2YV}uHoV*F$uz+fEa%in6^wXFLrDlx>_jfy7r)TL}-AM z5)|5Vyk^fh-ipJ;KhAJZQW^hgTk?j`wCQ1C_aK>7G!O=Q+Y4;4oy?F`C`Z_LV4v~J zt?p-6v)pHk7W-iE$L5#;j0r7x%JO`f$qSiB>GN?BZ3s4aiG?j>4AuQxo7jW{)=g5h zAuES^i0Z6+WR9<{rBJPna{$k#JwjF3yHEukJB`v*gFA{0B##b01ObgA@VTlaH8@ zS%hhe^fZYQalK4E6x^1Y??e0>Sb@>Ea`dgx^Oc$`qBY8;y~nb(&Dd{}w;iDqk>zUU z-SB}0;zV+hvr2X>FdYq4J_?{7A6NIg($r_MiK^Cu?q*0JuhLI+7AbUqm`Rwod$Bnm z??IA7K{LP{4hq&L$xE@43QehR}G@Im4iX7kWCF%G2|rI1cp12nbBFts68EOKlFj`P}w)ZTsEq5UTXLT zMeMzKCP1m4r5{(vT?HUW?|qNDoo>c&^ydcD7`~JmsPe^?YGgp0MbnN z9g7_3XHlKk6)q-+qyFjp1ToK#@3i_s6gqD~YyyohO_n~^K)GnmDH#D#^W*uTz^7pD z)sQcjtn$L@82AhK$PL4G3_ux^iQ@VIkslq52Gc7%(D`3}PMH08N5|({R=rKH)ocLN zR>xA|gDQ!qH34oj#^Q-QX+yWBZ);dVFc>D;v6#XEdGhhiO8V=!DfF!f<$ivI*!q%x zf1qQMD}n4KF-VJr3Umg6sYMB}iGk`6^`5#)Tb`v)u&^4TKDXcuIc)iMAt3DWctrxmyPtmS^*xBg( zOkc+{mqbY*pTbL!A;J0OIq)`*2CA-hfAQ^(MPLTf=>AsKg|=+gsxFaz|uXeMSA)!)WMb0 zoQGivSCpVAl~zg6h36SU82UGbe^dB3h5ye|xC0A78-kPK+5sJpYBc?B1Jn{smpiFD zI8}?VWE@kc3yg9^u%jR@d>h$tJ7XJ1u$lH;9yEnsGl8Xx{GQQMjT;zU7z|Ok5r_%w zj!d)W)fv$NoqA)Teu$c8A83AlUbcn(15^JPf8@kaA4q`s&l8zh&JLDz0Hz+)&n-l+ z^SreIqUjre@S3iaw@?yK?Rr?Xt)2T{Q-4$IH?@9K>wlwG z{EMhp$BtU_plI8j9!>heay&ztocHH>J};A+u~8mM;LnWF8AXVSw5_6=lI8Q|N6*u{ zy^7erD_P0U1Uxh0R6rs^oR5J6IiVHU&~RJ=W0%xcQ*q`yfw>SLG=a`n-qgUP8L{X6 z^80fh4Tb~t3xg{oV`cY#k?N zc?mqp`)sxu1L?bJr0zx-%-Gsg(Ur#C85Z4jI|xVjr!gcRwN&SM<$wYqeTg|EMB~MR zT=1FraGBtn(kMCD%)yqKw&y-Iep_J7Pu$iu!a!~XQH9qi{^yiK@(>K*%J;pC)N_wl zpF}mfeU6?8ZB8lPY_HR1^pVhHrhi*$Wz4J3g6oc`cmp);iMaouFZ51Wb8a zsh#9W0ogcIl~FH8(VOQY26?L?T3SvEGcsaN3(`*-yi07_5P`4kv;IRaH5veyj(bIp`#JTU$f4mmZr=-0A;MZwi?&O0dzF5!# zn_RvgQ+@Z4cS7}}xmLO_ZAHOgr|cbH0A$mNdS85h2ZSDb>rucoyo-MAje2{WD&5PO z6atg|xlJ#VD)tMP7gNcw8hy*pY?T6Y-?`Ir~)*V85wH)g0$1I zY`wp}Kme3}u9+6y2t+^np8*hps+s1GOQ)X*@Rf%aV649~d|rV+Tg~Qj^x}zMfj_7A zGY6L9_o7d~@`W4~7Eb857qE%{rfvoaW^V;}FlCNbhyU)YEcHZ#PW|Rv|1l*n>Nd9eA$tp1ux6B zqKK}p-=h9|iusq<|4C8*+1~zAR{lTlqxs9C?iGd`JMiy+(8 zY)W0-Cy={k`Yq~TfA8PZ{-H8NDllcC^Xu^+5%||#xzJ@RE}~y==Z<2i7Bt(K-4*@barpcP z?@jjEAVlR@8gr--L}j^4%JlExKLqYdoEGFAr=xV{4=)lq1|246n-W!GCl;>qqsSz) zbhMazXqf~iKiUg--HCny1mh<*rWoTq`}McoBzR(siC_8j86>yG2;LS&6yWYVD;G;t zGZOPl7xN%w@rXgWa{=EjH{u~ZCcWo}k{DCIvG)+u8_U*G)RSv47`SSz5;yZ*!+!k0 zPe3p;$BtxPd3`B#be;V~0eGI%mais*ozWD&v5YyGiJh&2wMQOOAEK6m)V0l4(tu#U zJStKEp!6_zu2xHjiybvtnv>H~sIIi0ZW?ESpi0C}gLqeT?U89>34e}a!R#C_SYULa z@@M$w{b*VoLAP#ow}~n0829zu=QX9IvDf1dI`wvTnb^vysv^49dy3~rh(8VDKIOTT z$1{+ZEScH`3AQ$g`BebH!ZPj71fQ_s--Y4bPic84dxzdil@Rh351Ox)oBJpXVRrpA z@HcS}m{z2~5GP*iCV80*78s@t*d}nrgcQ4WVYcsWg{VA_=_7-DqG^$0eM%RTliUhc z)WrWLY~|XrGt2XskIq3x(?dt_-r6Fq!G(-qTeXz!eR6a4=E2@`v=N!f6Vju zs&6$;$>9(%l+`zkNUcvZ5Q&W-h7S0Ta}f>S%kC;o)wwSP;7W(sVbN@TQsx3N#4oG> zQZ{QS7EI7eunAbX4L^PIs!&7S6F~c}iNKP*B^cviDgrOq0F(c0oErZfUM!l8W;9Zv zB)|#m;k%fbl=pI{-OYkriM>3v;%8P}ofblq#f|`Zyf`8X#ERl0T6if-PTG z$l4bQ*kt*@roJckNJa=6nO%dt6fJi#biQeuaL$i&N)^-~J31Vh95uXF8G1MdmR|M(vpqS_&ZZg`VyJ83wbt=0@%G^mP z#)8USr2`sE;wg3hjK&r;&Y@c8=7hKgY_NiU7-PhcLqROm7F-T!BM7 zq2O>tdXrwnyVt4)?z|kC>7wad@eC2a0 zN!+4TtL~tbEFA8FpRnu`O{bewq_aE|BS`b);N$kNP-yV5hEsifN+*;=6Y2JySO&Iy zM)oR+^xm^cZl}Rb6gI!2?ozS1&xPXnvhHGE$kx76#{EEz0U5NS z)cT+~PSXi+lB~Car?AeRf^Q$=u-O^~z(WC{ndHEchEuxj>@D|8T5E1le zxKSW5j(IRhhF3u14C)mN-XCXhsN|gtz~l3l1E$VB$j7sijhesEM(@e|oUzoVxQ!tq zd*B+-r)={x6b^CaRjZB>L7)=R%2t;(^v$-lH}_}&euG*jf<~x(VY%0jeO9QNet)Lz zcMiL8FoyIwth!Nww0Rdee)c-_92OXrWC{;OIia(L^fGdG#4?gMH|B0IPJ@OFC3#Q9 zFt%qnT{)C|wCR#)U*SLYOiAsHD8a2Ye%flUd+$(>^g;3Qp8gSO5m@CbfFP$T*|Alw zh<87I5HpK$?(e?b4d1V!B0n$PyS;__WE3h;VS&Fu;b=r7kU7w}NOb^le1(h5Bkb{N z-h4r$6Qcb26=suafO_0-`DZIfJ-z>>h{YhJAH^izJ*VqF-Zo*k&#>!|tPGc=ijXPb zRR%NN9%hu`(EUkwzMnojvOzEAjiX_7OuF=pzKv6zxcpO{2oBXox^QjUJ7@LcNiY?K zDrg(Yv+;Pk*f(6`jPih%Q2|FPteNnxI69%CYn$!iwlQ|tbecql-jAPan`W>D$wInu z?Of)GVrj^?g4-;IEOr}$*%GJOIZ+y31bXYpHW_9KJ0l}4OG^}Z)kDcEkV%-r<Fq($|}B=X2sJOxqIT#O&6PbDap7Vrs(^-@=47 z2~EM-|E6ZWKB;>@*@rmRojQZ!rGu95*hBYz-a|sB|6_{o^J@m?Pm!#bM^bJw0wDs0 zlbHb8w}BSp-BGO{H~O%i-2&Y<&UiMz_#Tu3u_qr5lq^+3%kq+VyY7nEBv>t!$Yoxs z^T>&Z#>VNDzbRmKJOmW?cqE*uzuBDtIn~fQtw#}*A(=Bv3 zthwk{`e_6GT5ld=B5kDoRhPh)@ZcW2D?EhAuk4Cxg)&1lFH-BJNUk8d2#iKDr)(LB zMW3ueX&V+_PY}_8XM79*Hmll_CfjkU1(Il#CiE;L71`U8ewOt%h{D)M3F{QNUy0Kr zAYXE6_n({IC)G2iEJ^hG5)n{y7zzV6X%oIYrWOsPoIGJc_dz?)JAsLGP~xW5OG~ZF zun{NqyJXPLQq1_mJa)?g9ldK z35>978lw@F_o90zp`ZCc^PWjy2Mcq31^oO>FMJo~KC~B;)ooLYA4#H2`uxd2=%F_l zPvl2P1j2}E@R|tTiX8eK|1)Mywfasm_Ja$JOI< z;mCR^=e~v>ieam@q^vIj9qeAgaE|_Y_N5D7!2O%ruM^FqyP@nLc&|xeGYjZxEU&Yd zwioLyYi0|@PLYggoanKrA4K~Y&7v4zFx1+bg1dP2aR)TS4;u@p=VjmbfEShKY1r0< znI4RZL_SQ}uJxC=uNM2u#};eW_$eXbS5U&^NK#W(VEOzPUpr2Rt6Q?_s31LMvvK>W0! zOJ&4fht())Bx+;`K6c?n(?0J_sgHOJz8axTW3FBMApL$6DazLt(~_BB!hmvPSSQ=eX;9vA!>=`O3Z z5JB>v##g%QGu6KOI!8~>&NyZ{C*EfMNP%k({%%$AEmMP7y4`14v9^Ow*fIZvqBt!j z${AZm=yYmX{1v<~U>$oqjn4-XH;MI%#Q1J0t9BePj2TS+5F&>Yejt1A^J>HU`ifjDF@2MixNO0IO zr4wB@!-slqgJt>s#}Rq^Vx_>HEAZgkJOgB>PtVXM>l4M%^@bGiv6qTcDI=)y?&R#c z5m)taYll3@pB;{-&bIr_J$J^#Z835R8OJFg^;AVfR)tk@Qu9rQW2$&;7-?f(3-+$r z;QYya6o_F&D#b}NU(ECGSL3>|sWc;Kjn^yD5c6cKThxWC#X-B9ea<<&+g3eE<(9bj zu=X7Q#e5}^6kUN2mZ9FAGtoM1PJSK}VDrvGjPfLp=|WF1aEqRwx*;|9folY&8snAr z!}ZmJhOdYZWq`Rc%c*e!nxAAuPvc?z0-!@0i!3G!O&XppY#C`w;V6Hp#&odUaYAFt z+7JSIAZZe~NLm`#-@Fq)>}mu@_H;&EbD8yHpJl;iEDw@Q*^$-*?UDXmEFaXAtt|;D z(SIR{+|Tm0Zrx}CWOZ~Oap^05q>^n%udK|hm0o+n*`DoTH098c2_+MuH(FGM&V{8( zeO^uV^0smT&5cd*?9CTt3n>NTi6=CxRkRQ0uCG5#ui{%AAUJk&K>C(aN6zo6 z$9LAjDvV{~ux{X6LcuTS*5`Hh)D_cz4gxV2x2N;0?tk(|#Z0!I zwbqAq3|e)b*C#VH)3g+E=@@zO5F=LexgC!3$oYmF3+uecL;T71hAf1~W3>{M^ zDHiRWepeJA^cc5{2+^DP)R8tbdtUoF9C=9TWzhb8f8hcVMsP~$Fs?87VALp}4}&}S zGDF-lbB-RV@2)P+VLfKGPICA90;~neDqiT3_(>@($8sd`!d^l)RfO3mFZaA&uDdSE zQeIC*xY0Zv4*no{Hm009S-oMi`oJlD${7Ymsn1MbyWFpMSC-yVqie56mKN;+j4aXh zq5R=g(^2udAW!%i_Mnm3Td4N1ZUI_al?_{q_@yGKpFq;$07zL5YcJ{MSDGDg03Sez zLJHeTnuk#1(+8_Q*JnYej&O9wgmDhPM`KbC=#1^5KzQ8WY!1}R&g>n2SwWm95FyF) z{03J=hWr5?gIB7@&Rd~8M*^I#Z~MWSJ?7&re+l;{8^&@WIv+bVvfc1BNW+C`LF(ouPO1_{7!rr!7LVRqKBK=o{zM4%GR=oDGNaj0fLxy3x)f5@=#daZ zR5SbRDB*NzRYqd;R@#>1=N+^3>q?}n&fcKOO5)ht(&;2758>*i6)b(}*~m~>f5gi) zZ>zhkiDg+9D;rt%=Dp||&-uz$iGj++>zAbY&pknm_4+4$jn?U+Kl+SGO=1yrs5EYq zg#ARmFl-*_z$oBZ8KW8$e!@CPKVO;Kem>|^^Ufb(2pLF2wR7x=M;fbCwh3joTLDFG zmr$YdkafLZI~^i|{zSp&by`Q>j`Ox&4bOGM2_nq<)zK+XsDMVZWrMb-D$?aLs2BUR z*oAtAeSnuGZ(?CYdim=t9o+Y27F@jbx)(c8r%_ewtrt=3&$12wSM?|TN6&YVONw&H z`b_7m;8T|O0ZZaZNmU+QqL}CCcH(2cULWa>T3#pl=;@M34~5(Ur`))?BWJJAzmG45 zUrS2dnd?_?0uDZe7U20j{4Ax|j#sa!v8Fd5y}&421>qZN^6bR2Pwvu$BkqZ&5pd9a zJ^qx3tgH1EVhtn;bLag*b}|E`$>OPmH_&^^#Jwit91;W8?9rgVg_VMUnbl9s%~52h zrlqO0)hV!sj?D6HJEBwFS^r+jI_CZS73?>4;$*2)e$n-Ip&zu6t%{q3U#vB&kZn?B z$uhBlFgv-@*}9$DS>x(ZAh==_wLtWbba_Q-5AJrFem|ywCdENWk>w<{>UA+8=2*EA zjx~~R_bB}sPJVSLLSMl=!oK!iUjhEQRQ*`?ta^Zw@{D9q`J>fw2)u#CjqufoxRVJK zGY2sF#zRX%W_1tmMG(JSqSCM=(aFR!6@R24koiag=O*Y2!j3}82ld)IWA6KNYQnI5 z%Dli*WEUYZ);B-cln=kF*`y>y}^Jjxl`vvI6La_qD71#n%Y*# zHT8<4IBn^ijrT25$vG?Qr4R)|pVCZa>vVX(#o8aEC3 zL)H}v*xROAnA01VI4D+Khts^;x)_QwMse@dI&q(q0c857`x|OJ{s^*jK0y4TV(yCP@|oNnNt<~? zq$KNeF_{Up*Zm^Fob9=+BArU}HiSvroz8%}#MljlrfqZFIR1m24 z@|*LRC^7rzP=G>l96@P`Fm^h1=vvcdl8Fs>L;OhluAH8Z=D|?IT<9b}XzJ0#NIRC1 zNX2mlrEf)Um1i*i;HcC+1ysA1-f9`!kUowB5UhuGrqBcc%Iaf4jzAgex7aZZja}c$ z7n3VDgqV?Y6k>gsoyS zQl+EEbQ2L& z*V&J+R#~pqe&Ju=_L6FVB*Gp>qpHwJVx)b)F{CUUZ{abwgRBVF9`6TdisC#8uPyC* z_L|%^vY5vc(6?$f!r;$X2|A#Tz^grnII5b;NEp!3O#!o0ejZ=UJ8u@Ns~1aFpEF^R zWLqP1l#cIrmIGR+Y#QYW%9-KSi7en)VMn|kPl%Upn^&QPVc;yh@!|bNKPjx^Z?DM% zipU&$HxqZ?rbI+l-7{Cw=Y4dwSA2siPeXSkRezu}cO^IciM2jrQE-5Wbrh0Fc}}qj zZK+ZqsyV>Ydbj73AxMf?b``ai|MeLzHsnb z9GH*Sgc3d{kX%QfXb8{gXU9mQVB6>SYNiy3loI^`6`EVObwx;}Ojd(!AvjUS2hDD{ zPyX_J2yW&H-eOr<~ndnBA;&>Sh zeOq8`ZewsjH;SOZIB>Fb96V%N@M`@C70<7lg{3qu3maUBTvW@%KS73I>A;(y1?X}A z~S&~NV&ep~xoqp6DH8BMX^aeHuw(TZPs$71w zQ@g(LUVZ;#99j91y+42<5!?GjBZ-wU?GXVaa&y`Q#r(|ww#S;OKn4r!rHc4+Kc!Q6 zbh1tn6O)?wcg8IEe)ZbP#>T@P7vIEU)+0Ufch7COW7R7y>@b+x%ka9UKjrpSFu8>( zqv5w8J~s6+|IyY@1@=b zo&WTBBRt?oOccYkQ#;0|K1_>x<~dE}W^QeXh(b3nEHyXEG)FeWAr);BC=Aycj-&#D zYGgjjJ77%JHLLORt=Di33fc*vzxFD~049q8KNrX+w-Lw+23g&ViG{A`Wbe!xeANq2 z%YF}fibtS?>y5P!Fx}qFE5MG92EoJAT?Ijk94r?=Z$akX4`J=VTzCj+4?3~F*i?2D z&zWKb%G^Gn)j(b6&PS0; z{XEPM6n(lC$EBK)CFn`SSxIB2UY{55027uuqj7jeo5LM@cjbTHiMNx?)7zyjQNRe7 zT$g>>e;_tklLvrWX zQAicZopM~Uu|*{~=zSfaD#VczT~u+pI+4`mW{HSC)f* z{VR-f;dktN2h!F@LLaxe?@nK8OW02UHXTli`5mlHxtH+KRiv^0IOmdZ-jPU5CB-;ATF|0C)UhI_me*fqBsDH`D733b%tJnl&7DEgGa zJor$wm0KU)h@{_!4*!{N2UX>5lwJ#T{A12Lx}CDG7#M_uo~s{?C?TmOI^g;O?Pz~k zYEOW$k#8@8>19@UJ8G|}*wRRb!kun$i4*p!Xsy}{FT<(wUbd6@TSti0_(hzijF%@X*~j#G&n#Pfqwt){H{syS~=_=o9GKWCa`I1(*y%2xPtL;ujW+oi9g(TP#or>iWR2J9QDW!BUe@Ac=Km0~l@7>M{ zKIeUyHMYur8MKZhJnUL@(orr^Qa4G#wj%$V&a9ToBSRU``NuLO>Fv&uoc7t9OAp>O&5A11og#Z8m literal 0 HcmV?d00001 diff --git a/test_fixtures/masp_proofs/A08264B610C5903A47D48E90ABA700BB49387329F8FD049D5F66C95B11B55ADE.bin b/test_fixtures/masp_proofs/A08264B610C5903A47D48E90ABA700BB49387329F8FD049D5F66C95B11B55ADE.bin deleted file mode 100644 index 4d88ddcc5d402717893ed197c13f45baa08a53c1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22648 zcmeI4b!;44w(jjVGqV%7nVFfH5;L`#nVDn9Y{$&Z95b_HW@e0?IA(?z?#Z3W)qDTE zbFb!UH0O@DRJE$Qs(Wimt)IU7YOe(j1_p-p$M$Q0|FKoS+fFA&iK~b-$aZXld^u;{ zW2nd{voevaDMz|%-FF8|V0ZgL?j# zWIA8bt|OWL=~-Odmaztl8L#I#my8Gcgy3c>1#_UPC%4Ed0&Qb1Ya}2cWlRvY*OsU8 z8_kS{5oZJK7-W-}|2Lj64CZ-ZuwLRkvAm3H3uL)?qUZP8nRl9TI8XwEuo}_ zZ}GsJ{Rf@oBub0>LkgVuB{l>6(=@gfq&Lsj+2uNeuE;5Sz?n0G+#>D5Dan!0X z|9qxO#PdYxZqjbb6oaTonJ$soNSr9-li~?>S%3Q+h!LR$|>ZLHUX?Um@Vkj=AE=f8efJUtU50 zvI7A|5=O=hz_2G{-ma7D`j20CvItWb>8KMVWBV9;$+@jGnLh?JvI3)T6zJL@=BqVX z#A=nx`wrymTCkv!ww)jnkQ8d?-SL5i5^oeDW|iz&AUhi=ec$Ev`nq}CmZv<4Pke4G z>}df9@+$pAW06Kf5H}6=@hG+6lSNRX3!C4s_{*K>&rVh6nvaJ;L9_z83JyXp1pm%<22Ns0EYoi@M* zl5K|V+%mEB9cQ$*tGkYS`0xNL2eFH=*as$8F%<&fQbhZ#n7za)w%jg9N6W%Ywy4?6 zU6~KJu|mjj&(R1JklqcpJ}taSy!V>O=%e&Nfoc$qodP6$m3&IDiV-K7CNRvI)ST8P zM(y#N_I-cxEeiW)F_*2Xg0~vJVF`O*z9~?;cj@%++-J|Tey4}w2i=(=6}lgV z28v>7wHhhVcG0Xe!*s>R&bReFuEX+*fw(?J(p>vvHW5wEd71BBLHu=+^*Oi zVa2XT-T<#3I~Sjzr@IjeC>85lmM#Y;Z*ioO4CykvDPftSa8R`flZXwat0(gjFBgYC9C?<7&HdnBWZkS7fW+jfwO&Y07dR*T$XHH~Ex%{dM^oVA|PBcY( zFjaqxx`z-!2j8Xahh05LJ~4N+U+aNM2K2YA$?pymSHM=+IhlCNb9zE!5Jawvo5v>D z^xSOaYXek>)|`?P6tg&(5Bl&J%)R(P};SQ;B*q$CJXF5^Z5GZ=u$zV9W z!UItd`E$bjl$e&!t)ga|POHTbsI88v!UrgeqcH_;Gkn4mebk0%OWW442BSAhv}ZAc z0nXm**Zyo4cxIog4YE zz+z!0ybbW>&X~+7LF58fHwW& z5Q6Qjg6tt{ZOH$Vz`u_FLKLPhUYi+IGXS#N9W6e9En9G`cYA#+?_lCG68!040671h z+CNO;L(Al9qQpT;8pTuBWH>#xO94HM63lUb-<}`e&l$Zf+WX`JKCT9u zy(uA;^9xKRt>x_VIE$hHd!_Izg&?WqXS{3S^BF1W2kPsTV}jc-X3TQuaduID8&!85suAEQ1oInJzvEws!t+aq;N_?( zia*fX)6EiQV0Do~S(`cn z!JiYxcg=2x7Wgoo5W4xgM7KgTendoIS^11&BDgXqz`tpugc91a4`Pd)Vc8U z;oAWvtU|Ug!B5O?4Y|~d7L!kHc;Q=`vL>RwgZ+)F-|=6a!ul*XsEe~rzU%9Gs1A<{ zMsPK|MjD@dmK4h&!W5~^synS@b$J|O=k@!#f2WjoD9BeCQi#6(LJwMjJzmY`a`NVh zTY){J^0xq%<70nn%WWNf2SI60iW0XV>wF;J5MUq}2r$Ss*PWgwVU znPO2F_dt{x+j(AYs0KA`KFFwyUg1_2sFV~$T5hh(4g+wb(dm8T&P8%zHZ3{Jt2jgP zi~$M#s)b**@T(U7SJlG*1PVtN1%nlBe82$qM5NmA>W=8<9r<9Q@JCIv541czE!+KH zZ{~hs>n}uM5i|1ibu&sLp@~{@if-+yyux%%4#ageL>_`2npEob|8Ep_97B*hNm>6< zm}6*!#ty1Xus}9Ity+Re{t-9L6WENVtMb@hPjBqH`^{QMv)T_ZyY>OtKJBxzUt#E{ zL0f?IO5s-uzf$;rFNKFJdThcrEc=zc_!d0dYhVEpY9{D7b4d(yR?I(Kh2enGSKg)_ z=UQI!{qvsaxAuV&M~&SoBM!Q(#)%_QBSWwVzftu&{)H%fkaRGj@zQ(DNcPgY>I2Qn z*xv-GOySA{w!x70O^9m#PU#=k!hhyStPSFEHc(#h-!&+_8&e2HW4et9U+YUr^D#dnyEk%`ewF&vxDba~)}y z$qseNWYw9%HE0*5oMu@ssk`b`tmQDlspC~efXKZ}U8^;+VdCGLtt`lB$W>R`jy6rQ z@&IMxM?t(RdWgSK^*jEBC|t4obe`AC&ca`@M=A|AFk;r{4_8iRmYZ#+p`JAp5%_zh z+CNO;KYKgZQ-<}`2=k&YBAsurd#V`Obej5VD#AxlJULR1Ge+c93%^qMmBRnd6yD9r zGq95W?m}EMzO!De!cZXr?OuFCC=`iib7oKOs)h8MzQ5yNh{DIM$_ugwVZFAIBoA|Q zXOoS6Wc*}xCffCkIQzPalx1KNeWbtA`iCi8;fIt}RWR43!qptIE01{R>zA_ZQ;hrq zhWk#+i>``PGR2)W{9Av(w>9nN(4q_~<`?nA=G{j! ze3L`U%?-mN`~IEs+Lh$YkAP=?tnNw6|D5*KFMah(|1JGe-aeOlWC4lR&h-62`e!-1paw#)H_cw_xcO`G=fzQ+Hl(X*u1y!v?W=T8F2;1c=-hfrUAx#}smdfl7W8k1( zf5%(J^#bX)$TOZ#4}>XIq;i`kLt2D z0iucbOYK`SE*&{qQl~#I)PHm5==nRH0RJuO{7DhlFM0{5CEpTo?P{{HKk{N3VN_mZ zD%H%W(c(%?z$KUqPNBY%>9x-4-@4A~pFk#DFv90sBDQMp?qN>bIt6qQ8U;AvtJ^k| zXO0tRu4f+LJ+pAVHc%VLYSW=Vl2z;Qqi?@`@Sq~30D9?QhgJm#IN?#8O$EvCsRGNq zRFUP_^+_+$8~UfeiN11&Aq(reJdpid3hUPKB;CVBR&T^EvDf|0>3EWfF6c0kR*6)U zhz__&b8Z(a+-o)~>G9njR{A4zl5{^;P?i1u!Tw-6u5ZXfiOOe+=`e!CqQG)HRV zrOtn*?#AHJZM$IS@{=9x?0-V0KYLvKMW!9uAV`W6_{os3pos3PB$BQ~tb^0{-)6Qt z0X?%P%QgN{+OL}FwF30tw*u6B&uqNBtoRb|f=GjNmtBVKJ4;f(muJaV6?DCF7_G)X zqXP8Fk)%$r`Rx;Px$6s38?i6K=QX;(U_lR8!{Lvgbt3J6L!lpgsH8HO#W%#WN>Tbv z3u(#H-Dx+9McfD5pRx^Yfax%VXjeNJ8k+fY0$;7H$PRbF`8kMU=Rrb2$F}`m20+Dq zsg?0Tj*!7`aMDM?(dq-Ab3#?fvO!-aUC?WZw=ICs8hXavA_MnqC&J0q$)4rM0~d<2 z(^fx6<4BlHt+)^YzbgCxJ^+72c*CYkr6RLcLgtKbO zU$lF|XBp9kVXdEHdKl~KKz#WU0z#-r`A+L?0n~jE;X8MPq=XE2lapiCZDWYxaQ??aYJHa%r|>Q$syc z1tJjvlMg@h{D8L5Eyd7X>%eT2&Zqt&B`tf!`&n5BBAX*;>8>H5KI1db=#!MevR>3s1;10%L`dq3 zqJM)$=oc3y^itSq;z7GlXQWQln=jIBXq}ikSLYug_MFSfsZnn^*eJ--1hZ$>;u6uv z2=nS%PoWFpd}@Ox1WTEM(%i-MMs7L>Iz&N57%JvL0ZPz(_(Z3QRU!;1)wGcMdZwuj zkyXau%mi8m_MYyI3`xgE=RPoq>x17$_lYmQU05CUjW~=O@#!VkY{n)DvPKP#Dm4<) zlC@u%j+bK{Z?Qu-dc(=cVvUE%@j|l}!YDy1!WFNU)ChmHu*{`bWT#G~%E+sfX~YLV zaZ;G~6NhWV&neWSwHNiFLCO~2B*_3HzNq|togDHXe@{vShden_QGQk0{W;8>@ftJ& z73Dan9ep2A@gT%{WXW~6vy7cOymX~2ooQtdUjK(_`hXo~Gp)ya?{U{g0hR8LW`s@0+2ZdZhaYN4MD zOD+CcnqRCW;>wzwO-%$yd6~FXOp4cSQwfSQeI(a{cyY|UmrOw}Cdgk#usq<7{D@|> z>_5|4S=5wq8LL*_dFJgf(+vXD)#pb|m|KF1+i6K0dK&4_YfReYY8`IBQXp>*FNAXxS{Qy+#lPVM03U*S>O05rE0>OP5qRlsmQ}0>$9@n^U{#f zaj-5%@gFS-1zeycSK*6jL=f6anhT^zrNTYuOR%`@97*T;>~%<_4q&v?1?F0?j)$^& zYATw9g%6vEnur_t{g4(N>VjGX-c;9H^0ao(u!qT%J9Z2xZpzv~e`U8G){E8~Y^ZoD zs#JXf?43MsM{tXqtc`gl^kEWY#*e5w34({5lnSE zk;>J;1!wE>DR|9atpl%T!GVi>OR9!NuRc9Y5JTmu(myb|;SaRs#if&w74MBq_R(UN z3XCl}PiuV3`)J2JHSGCEcM;4w?+8N`uuoq>Rg40;Ap7P}{q}0(4lyDiLjpGv{H6`2 z9e9ps>6v)$KvV>&!f5M;!&~m2SS%uO`T+O25C|&Da9V5Q%V3;0*K{SfMJgu#^l$+% z*bBWup9ju_>H=hiDEam@4y0%9CZ|)EsyUykgfobdN!zFb3wlVXGY|UTCz{KWtpq6b zGuigP%Yy;O($JAQ@alnFg3GHk>*-&nVr`Cva z`e=N$igYs*ihWOiLVOX1lv!=~sU7HrOA6l2>nJrgm3LBg9Ki9U6mM{xkpGNqnFmHR z+SjOiwmiqxiyB5)Z|nH{*wkcR5#Y?Z5*E0K$YUJ5opaj?(*AEsStlVFM(a|v~s`|23T11p*54f zq7+2ybz<3qC{Jq$3;u0N1~X}iX0AFF)OG&s$c^XeSYqz{oXI_Tk%&zA^{9DhoS|Q_ zp1p-mmvSEthe&u9U4lX3qmw_Mfj59$jatNi4HkJ5Leue5$dLLaT!LU0aCS9EsZ-A1iOA5m7zDyQ`z z$n!lJhi{&?T|yHaj+X5FGji-{=YWy+L@}4H2Vj<9Dxj?q(hu{H@(hqp10fwa>06o} z!flG0G|}0J<7=ANFqN8})2~`cn@SH!0S|*sk4L>j;cD&LI^Uf0-Mi%5Q0#{c`0g*A zFqzP%m|*70X76hhCgmHa4OtEx!n^}`V$7d~x)q`%CT6tq63t&cvp@HVKYO%T`C!)g zOR@R4Mjl`0Rf=}xw|sH-7wT@GZ(k-pE)^9$x0rd+%JqT=jW~J!w{_y*#U|LFwVnF2 z?opq7S?0+i{Au(L!qjfN%f1$@`_O`>a@V%}TsR~LbK(WR>+d**WFA1MFOGV6e%rT!RfA4C-K+*glF!?8H6 zT%gcR*~3r_5uAb3ZU%i^s@K1e{Y{=;&>L|S*BKDeC&@TAG_@SQ2|dGeie3j z1jwnL>$)@7nL9+G5?b5@;b8}Z6R})uR{%~W((*i)xpP2O&y2x!RW%Q2Suk43z3ALUG3jZdNWSX!=+jK0sN?8wub|;9!d4BDp#+ zSCdx;M}|b+;g7fLY<_A(CH@KoTsQoz9QaTgtuPQ4w3Gz9b04$U_#-)2JcYon;*4Yh zel?Rj>g$BwkN6=T1g$7?>QpB)h_Tw^dnNJYQaRoE#M>at^ z)eC*awCpt>xk(Yikk6$a4B=6OPEJm8VWBeWOXHiq@LVeHSbzj_J~|w+=EMA6GR}9H zJA{`l{n0@~A!Md7ws`^$xF0{Jg|>HO>{ER*YBVw()i!DZ`#E=(L7P7A6?LvzrH(waI`mLYUSIs9zkAl zUie-EqUJVy+(7prUGSOUw4X@z4#t&F2flECzl`8}c7nQrQJ7k=QFg~WwI`na=h7D= zH3O*+%2qmq-4e6U(8(8+pc@&X{bHk1pRF0m5ShVSO}BX&?@^RsaR!+s`9RAT#d)N6 zHl$pbskO8&lO_JKJ$Ql{X}zT&uUx$8-y>dSE62TXX6 zdLah)zA6z+xr;#AZH<8<7rDKJv8+t81r3V>%Gb32=DNKP7lVrYV56J`dA^#Iw*iFc z4JXvl+NvxqUENW1=Tp|(INMy*5;QdMErqKl2V(Y!vRW*V4>tDOL`n&=Wtt)CBG2~z zE_b4Cg?&DZ%sx(!?|~U6H|mKL(!%E3$n)#oCq!@9BZYV!TAB9kt2mLhH$2a$yv|a| z(07i|j~S^{OlP8zkE&YUaflN8y6+U4dE--etn21hv6=^h?qOsUV}wE5DN>mE~a7}yA&Z+Hdi238bS#*V|j{NIOa?j@j|GGFA8!$0;m|S76yr2qrteuff-5*6+(R|J8^IP_G>`G7*p@(}ZMlfibtdyu0x7_y!&};@ zD?HNSPn^+F$a!1FkV2w^ue8p=Rb}Y)P{NMcEo@DsWj)CC~mYodW zkFzPLit>coV*0U`JJ5HR^N@2PzGFcw`>Du`epxysz4mQMFAKoHK1ER{4m0}B)OhFZ z_+nRd9%O_X4dqYH39}O520N8amW*_{`%n~tE`e%L87z)Ul0(8mowm@+$1&@vyBNqI z#)TkOd-85^5SsTEr$7zqvcRfkD+|$t+Sc9eq#a?-b%>{QoyvVh1A}`Oy*s%|$SSU$ zP-2IvFk@|#PXY8oa$P)3$B#02TtBFyoFj1|wpMz*w@`9!%GN>0#^CZ?A`F^|L%OY= z)6k{n873UQ5SsgbWul>7@ZLQ?c$8rS+-Asp{L*SSyRaDs`P>4txT4H%GFi!KB1z-= zr=51kYFJ0(eZ|Lx9t$lh@x}3y6YvE>h-4G$5?o+6+S_>eOGgh8F}~A zG$x}PYC12j1O{T_;8VsHNudc*kW3-+W*Yw3(s228ZQ?Nk$$q_wMt})bK#fUXXqR1E zD8KxCrFB7`|JSvPoP&xkQrjw>Q43=5TGWt-MW-0cIagY*Ke}%=99Dx&gs0$ta*?qK z!zb)&=*r`0h^|L+c?UJoT{%Q%W9W;U1&pFgZld1Ou0MV@#crctEIjW@OS378ERtBo zR5@9Ewnq-JnU+f{$(ueV^7iY>#&o!|652+D*{ObNhZIRhZlE55g5z*k?&8Lu0HcLX>TN+i?|hTyT;;zoaP z8*rW9@LRvnab(ezx05=bDx64&8}_d+M5bn|0amzw>Bxv$Mry&Pko1i)=t5;YfU9DO z^9oI!Nfa;qf`RF~WQTFx?=rX158f(q2$k3G*r|o?lKg_K*}MLICKM0h!xE2yx-^}c zI*9zBYI2mgJZun@baOz34;d7l^%a_aEv=K~wn%6#6z|1pldzQV*p433 z9U2nvdyf7yR~>9XO55;&v;7+*1c?IGiUy&cnH7!$i^#R-cVAcc7HU&)`~cPMo1Y$^k{WtA4P3LyDN%9zsNOs~(+qGB%`Y$I!h1IFl3CMGr=A!N(GIDl z%k&CjuB5vL>;Wjriy12da^l~9>G?8CfE};`dg7;B?`WmDmz{2fi#Hm+UBCrrVDyua z8f`u~ZSJvj$#Mu-NBQ74H_hqWNp6N`V{EJVP1Z1JMwU>-O-tMrG~RCnO-6C&Ipvq= zS*Tqv;4$I!K0PXYKW*4s7~9G*D53LPQwG?i+50$?+0?PfRe`*AAsu!zg1}wK#SVcp zn)34TkMPK!2(oZNDyWZ1Ynj<&?N__s)Nj6N9EJWwLBgF$7EhO8DaMpF9; z(PJ-4P_tx6)~V(}_*J>hppvETh2e>wZV#&5G9`fsQIwg2_~-`9TM O`(GFTIDr5A$$tPvo&`t% diff --git a/test_fixtures/masp_proofs/A9D6D90370C747C254D4DD4A2D4B1C762CEA0436B0ECA42C52A830A0FD66BC00.bin b/test_fixtures/masp_proofs/A9D6D90370C747C254D4DD4A2D4B1C762CEA0436B0ECA42C52A830A0FD66BC00.bin deleted file mode 100644 index d67642b43e9810f3f2e1acccdbbee81da239a5ed..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10312 zcmeHNRZv{pw#BV+w?HGoB{ULTg1ZF|?%KG!hTyKj3GQyeNr2#uq;UuYx5f$Z&bjZT z>V3Sb^KtLrJ+o@nSRb?YsyS-ZsI}&v@NjT&M1QQ`0YCDz=+P}?lRyGf2x|mN_zaRw zWQBE~dt_F%31bP0y21aI3C|BiJzz3cHi3Fn;~{G})Kw^w>tG|_?Q0uq3G#_l(f>~T zSAMm>7zTFSR2d!h>m3DLIukOv*hkcxJ@e#xrZuvF+?gI7B78uCGJsOB8MD85GS$`A zb)aRuNZ(cp8?(2J7C)`@4cEcVF$7z#zp5=j&FKzEI&%#Nct6ZQ@p@P_P)q!}pmr@i&>i6ud&Z1wizdC>qbNThm)P9AP#%z7{wy^7wtFyC5NCvP?;6i#N4 z?$4Skm-j5j=5x|(LZ&pizA?T`tcdlKp-cZ2vK@FNWnja7n!@LMlqHdPBPJOt;KTty0s_}0@5Wt^4B$rvpUFZ#jeA)(ZH!<~ zK+abC&sU2yp0R;7M0ah-fl^$T0Ft?Ho@B~l0u8I*mjFOsAcDD!xdjI>`q4tD_vETM z^2b&lW!54abE<68Am_kK0ed~J$dFcUVEnZTTNmPdogPT4QL}1rU%9D+2swSz4KWo< zrE%Vy99SyzLM3KS!x@D1xs@?ct!N<7%jc#l^HF-TzN@sq10E=(@qh=C#{)=Py$|rI zuoV{CO>-^n0NA1;pg3im61@W|qISxSEBFY*uOFHxlNnf~z$TA1S|-cZU43Y=(*tFJ z60%p2v<0dC0U30mu~}Ftm9yoJ*L8W@bBFOE z#NI{fB0A~*m8aBO3gA*q_nefo%qg+bHeXN2!fd{z_3UkBfPjTc)M)?F7&4IE3#~c( zL!0!#6}5S!{78vbI15+>38PjyGvclJYkECkv^$**t7C%pub&2YLuEHOyz6EBPFgDd z+T>>Cyo1G7K>2~CQ`mlAX%T$Dj&Ji;KgR*vnHdv)5Q8p`YDJwk9nfjf`g5)oGyojf zsZQ$h9cn6V!q5i8-8ACXtCl!QHUX|;qsF|dA@)W|tg7Xr*dV}U$wtJ&viy3^as;wC zjt~My^XM5=nWEh_?%sPqHfJLaeS9d5JaKtHOqkgzk|59kJNSJ@+hrZ?1vFBG!wP&y zY7gPByE7=P#i5>rB>@$>Kta1{%Sp)5 zZlez&Juuj=p|~$vT7_8W-N_`C;OPmAY4}Sc(jp$Qwx@RcKu4fTyxz2un3V1Qe0bQe z2!Z9u*b8peH#+#_3%6LUqt5I=C9BDbmQcyl&m3kmP(j3!*oR4*QyNxbuj+-GF?!8zNYhVv!MD2s4IxI`5}yQ; z&H=IKI`EW02byx8EtegExI&j?($eaE`=kiY@SJ771^Fwm?sYy-31j@W>OTQ*gt~f4E}ZOKkoF8JN@HM|DQW;@J|`O;d-(hh9!-$m7}b3 zef4@^0HCacS#raW?0Kw#wU?@P&!%D#$Jbd4LesOPV)vI?lVMMQ>?gqKc7!Dp@ZpNC zw6nSHQs-0eifq_s#flK~5N@%Fyk`b!nD2Wtao`-KKTmZ@z_%f$PT?5FOH@gy>KXw! zUXhv|+kj7)!x}if$g>ow(~|)Ewr0i3kY%OhrS+21`|FeI{rO}GZ=fwu1lyxsi)PmH zn>54$`YWA8HofjH-6c;6t5tw%6KHM)+>mh8p%D>0bIKiZ7h2gtchNEO&8EZTpODK_ zLY%Ay0}Gi|wG+jZV2lx=mS*A(i%GE`CAW0X&>Z0GfMxG#wd($IC!{|R{_pN@CZKEF z#ksb?)zzZJuENf7HAF@mI-KMMMfZK>mJn01Pz2LL8-3O>xM94%GqKTYlD?pS%a6hSIP|IP5fE$VF0<|p&mJrAbhvsc2txksHU z^G|15)Pj$c-j>}~uM++d^>3>GJMlj$>Mrq0qg-EXinO58jzL>(_I-~FB`Qo?z0ArF z<7>IQ>AxBNw?%zumr+NZ_Jz@+Ai3Q~X?wmw_0%&1-Ixa^YD`d0I1&7hsDD%a--!r+ zl#Rhy#_-AGdH+ua{K<$V5h(YOH62%Z5%26v6h6PQGCoM|x$`WLvIbm;+*z4Bq^%$k5vEj+eeFDC zh`o%Wm$?A^=q5YkFFj^zL^m&vnURUE4Cqy$yHW34ew02LtJ0NUnfM5Knjh}-L*Z0& zI<3dBqbBsCA{2a#m`h!y2HwA+mP9Y)De7T2j)QP6a}BM(1eGt7D4V5ua=}`10W0 zJAUDtrlx0hB#Pria`q1D@Ca$VuJpW!3W&))cu48rYrqZp^62|yv_|qVA8ThY{&u@Q z3G<^GyIoJJ4qD*+raJ~<P#HKHhD3Z zVJ%eeBXk1^#w&{kOZxidTw09zB9C~YT+d_V2_mqdqLhE~e z&z<8Jzk2vkDNMSkV#8gXJi3IgL=Vo%w4E7KvOhXe5Znb{M0&_qACmfWPuW;@PuY!? z_F$xf=j>i=3*9wSkj0H~eq6>Y-Orfi`b5k$?Cg*_wAgD3X*HR0$eDhKx!&m~)65~r zIulQ5aJ`))h}5ic3kFrCN$>jM4=>PEDG$SVoWVywtvkf-26kJauLjD#JZix3qnk=x zyi953hgiS4N(#Eblb)w|RpURgqdV1nuFJD$_gxGf%B<)^Ej;s9ka|zQa8_~8ylX5x z-3~6LAx%_fkWoFQL1OumBswx@aNT_0zkS=8il2y1JD06Qn?u@_^%(rq*$kD&#;LP` zp`nLYwk^>JzZhAaL%lEA);tPs29_OaBru{zaqD{Pd#P{0weL-cQ$9wV*~rV8=n+k3 z>to^$F+p@P>>x%L)a2kG5TSUCjfI_CPDqyd&}h}|W2_vf3dBk`(WtaI!TWs_AiF1C zR{nULK|ti)0SF3AOUT97vUsaMHzlp*oyqM#UfSG2h*5bVfX3#sF%&*z;6y*zO+L~xrRu}LDbb6w6J5VG$N(6m=kep-v&L)tJw zYnQEvA1yho?NI26&ws_@3u>#kB}#9#rdW9sJKhH})!mGCFEXgUkiNgpfhHIVdb{{4 zU^ceUAUK|(xfh9Yh3r%Mb>9|w!+(0fW0NCABI&*~KLni9L1Z~%bQe57?WHU&D3+D+ z7|`x?dQ62evaM599TT?Pe9|gN*Qix^Dk!)|7l3+48Z>7YrhJ!53-@X|t%x1u9%YAJ zW70P2Vq0COJ7yeylAO+^_t39n+TyJcbARfcrb2oJ?eKENVlEWQP^UEh&u!DJ3Jg)&1ekiO+??RO3fPi53XW`0dYa)NCcxK;WbslAU?!6ldB%Sar7fXZNTe$DgF*4|> zOPN8SYp#t=SRaO!LjJ~R?%0CRTy-SeOk=NsZeDpwv#YazNf=e(D<#T~i?<$m`5)2A zjVKRB?uo)1zuV3&&1$ag)T!c*)767JuTgTOuqs)~JYXnoa5W@m&!6gIo0`bxB)<%K zP_u{`im(RCd_5;B6<{W%apJ(cMboY?$-5E3tX?P20IL&uqtSzsRb2c0EamgqZtlLf zu*}Gme^23mGts~?FuIz3q!??+{LJpZb~P{5jwPg9nT}4Zr*&`YPWaY%B%LByFl@$C zdTjZ4gS@0GM6qU8DU;if4e=Q7)KHj2xtxE!svQFxfJK3}LRm>|J&|uzd4y^N5pi~C z$hZEWYQ7t6!QPl)zK$q~@j>WUP4@`4Zj>nK;GIr8ReoUsQlsZ%`na$uw*yq=l4<=I zu}pOW&I>lmN+@^20tfG(K<}qp*+$|?n({(N@rx~%KAD@^`dj>zr6Wd> z<9STodyq^>@h(s`;&a1n{fR@{6H{n3RX2*2`K;T=03-DLFUuwXH_N9Pi?d_6IY)Xa zf0nb87fJXHvM)0$mJjODkK)hA%xl-%EQ#mz(2qRgc2XlRFl9mjcdjomRzPWr#0&X#*s=Rz9D<_T(;cdFF!*O2@{i{q=?7j z=$vI%LC>p{6;uVMDI65IS6f|5d{mJ7cy5!7)Nvz2+(`KJN?Vh6S<~6thYl?YYcycE z{a?Asn@#MUazm{p6)YOizz9TN6FK6yAjbh&Yqt71CN~NHbMFnGvW z(w0VX4L;yO*kdnsY4kyz4$b!A%)^AaP4 zMmuOE=+lv>CdGxz9{I~id3ofR04(+Res(q{b%eXf`6<|Z%6%5vWRGo(2j;XuuhX~22WU8fT2R;o3eu;-M_&@H`zXgAh9eX5m*Ma0iOei~ZmJE@|?~p+n zw``9^985wdfz`zy8B?@Deo9UjYdYeCjVj)8pxfQ z6elx%TJn)2dX6011f9FtX?vhE8n9;4ej8$->ko9+;^Do5YaDS#o=G@pjEk%8aj!u% z5(fV)q*VE6Es&?O{cgp9mnd$P$5Gu%qh*}<;>_M~&^e%*$+1o!h>B$5FE?ZY5W5nCwQ2fP?sJ5l5JVBx#PX98ItX$D!y40jrK-q%% zwMiYVqsy)3d2>2tb7(l9(uU!?Gu_)n?D9@P{+gX!-8UR+0SIwQ{BZcv@-cf2ir|p& z9ak7rvwq1QWCRqG^j0Ri#l70sUsG}VWHgzMJ`;zu!f{>)&mVx?ur$SAjhLGxgZv~a zaKzj0utXz~36+;IFB2wb17k9OS~EMH{b?n-=0W~#<6D!{#Sl@-Ntqur|nI+8WxwxxxnMT&wQxUZ={ z#XXH^sHu;1^s<;TMT0L0!efECLij_ZNRwID!!aa+kU~wU)^(z?y1Lh+@skwCcptEB zs{e%el9<(%)k$UAPX)~d3>7n|&Wf~5;+w-)GWn4yhRBIEjlFaU3-)p7+ql||c()2V z5v_wzUb}9%2%-f*NA%sI^WuCWefHW?Q|R_z3HNuGO3~l#1siV#DSZwU3ZxI>FHjqh z{QSmjEpJr^KVtGT3tmS+Y^0;{^A+0L18|zt)5$g(3Sath!}oZMQ40Y)pgU(cK6DbF z6Z-HO1tN*ALM48MYYK_8n5cZ&)+rs{>hy@Y8Hy1pljr*l;?=bvOPrC#Y9r#XyS(?- z*_)p$P*8+_b*GY{e@*D&Gm4&_jFA5C>d$^WutX124ZoKz4Re2H>vNql}9d+mYBH*L-^oX zLIOR9c9q68wtumvQ;)Fl&~ZJbhGHdF)Ian@BXI;~)w_W+CjrK8Hlk#jd4oiihHVp# zoEUfDw6RDSbR!?3k4!i=uVkTlTIoEI>!_+YUcmX<4@A&WoHJAXX=gmdyXqkT+E30q zDs&<1@qBcneJ1gv$cJ*w1}yKvL)nW{xHsu~`1n?V z^>|9&mx?>2yBUL&vf{HiJ%hxmmk3j)t9t6)Zo7kM=s{irfZ2N4UKBo|f#4Cui=lq) z)*l{)Umq8xlqZhP&@QwtzV)Tag=G9Fhr57`12P~A5xn9|c@p|Pz{-71VudxPr1Ib*1Vq%T;fbZu)#u6p{dDO(jaEGtVEn9om(w+{RY8W>&OL*h9O3fb<%F%BV^y=j%nVHKUK zrg;Hcy;~P7TVC+4O?Tu?irWt)NJm@HMNTaNF*Yt0aIba}d1y!N1o^y7AXp@M-f*Id z4)*nycdT;b^Ggmw_w6(2kV%X8XgxI+h1YtrS?J4*S%I7>cK3K^hkgO|EgrKcs)OxS z`0+QsZcB(7*J|BR+CXO+LFy08aoZPrZTV$*8((y#hc^$J3n&$?iMgGgwCYhAqIF-) zIJ~C7H8`Wwp(feJN2`Ap)F35ecZ0cp67@(yGi3P=IDbZDQ?X{SJyJ|ZLjSb&1V7;!S6__(%N8hD zS)|#pt8Cqc%U0;@Mijgfb1o)E&*k@dCyKz6L@&V0!Iwu^RXd%OsH1hj^7z%5O1e*P zQ&L)7>)LH&C~faI+|i)I#m@(=QRkxcIo?x3UM!jzDpd;eI|M9|QHm_%p7lZS-WkQJ z!t)h`0~j8=!Fd&1Et=Zdp)g$TERE;H?{)# zz+TlTq@M+$58ABaLGz%jun*{i36CNv5+9*fmCGl`)#5y*N>P$}Iol?^4|G7qR46M^ tG6tALvciP4JR8d_ipCJ}|4!=t-~FF|W`76bUzN`J=hyx&dw{{hI0b9ev% diff --git a/test_fixtures/masp_proofs/BA4FED83467B6FEE522748C6F7E72A01F0B169F946835583DC2C71B550315603.bin b/test_fixtures/masp_proofs/BA4FED83467B6FEE522748C6F7E72A01F0B169F946835583DC2C71B550315603.bin new file mode 100644 index 0000000000000000000000000000000000000000..565d189c0ce9f5b3fc605e1d63a57a4b3ff949e5 GIT binary patch literal 19947 zcmeI41#DbRw&%^vOtCv=h}n+aW`-DIh}khSGcz-@9W!Ih%*@Qp%Ltf#>;3GZz;~Bfz7?mH zk%}Chkq}wOqP!vnE?LJz5K+a>LuBBGIR-NN#5-vkYg}a8Ja9c%+HvDTZ3|lmEzZZk zV|&Azy5Tu>N>YrikKQD@0{dcqV0C}n5Jf_|K@x$JvOLtkE$%c`csx-x!*09ibzutm zGo<3>1HGZy)Pyqcad~EOb2wq&=;wN+6R`@rLKim74{K`bm8v;)vv77i*Y_ZR5)G^T zL^C#)l=sn8bQIS^(OO{JFYlHqwRzLal8mcko`}9hw_TR#C_)Yz_0!9vl{yw9XcD>9 zq`*sWdz=WV88M&7esnoW=8wAP74!jG(7{6h0BeGe!!{>spo1K5xZe50j{4ObeMqA- z7pt9@tJ!jINPsfT$9hN)A;uec?CB8)Tq$2* z*-9r!p8r4VuEO^qwtW)?(%SSzAs675gz;AoY&b13gDFcfyyL~3NLIBo5~P0oO&pB2kJ=&4tAalK>}zUVQNxB z>V>;*iF5*`V&bE;U=7bVna9bSTk0$(4~m7gZ>d04(MW+tLh{+$}wt zWO1w)m$Zeo$m_3BHZ_=4O8HM>v;eC}(4h=vSWZw;CFP9x8|VnsDd32R`U98LCro;$ z{+xhd78SJ;ZJ5WZy(jyb%_;w5XJ-Np2Nc$jQ5_QrY_Ru$Mt0}H22pcUpj<4MZ>CFM z`7!8SOCr7@-;x7Hu6vO(fWxHMgkSt%u+lok|-CU9xz*9hC_i7|< z5>Y#o1jYP5JS${$pKNUA zWUtB@gB0LpQkl~cEG&(sq<%I=$Z>u~t?fsqfs@U`TmRZ%>R}F$j!>D9SYNE(b4)%pmYx6$cLE^&b8hW!{bI)6{Yd5ihyL@`F{QySZ>Bm+GfXFX!HMq0KwgOq9N~aibn402YB9 zES{LgctJSic?9l}JxwFgmYYA-!0s^TSME+sQb2c>I%puVZjoWy-CRH)J%laq7l#R} z;&M&p@5woG*>Zrv5kS$Z!}$i>E`BblRPMOdgmeO-%s2w{O40nn>472aPfY!1`yWK% zF+Zbm@M_*tn$l|nF+sYo3H@R>DioTxvP4cxbGP+13+cIFKSTSMDcmgp%0FvIz9!6S zN$n?Wj+NS@!WBZYt5$PuR8PdN@;fP@6)#bueOwq;j36yFNOp6xN8$78Yzd1M4 zmBNsY4&@Sjrp`fC;*SN6@dDcr#siy*SKYfmIb)6>5T#V8o>gxi8WU|69eXW6dA;ynEJ~g{<^@p zi2!&nV`=I3Hl{T2^j*kbo8G<6a#s5YrL4gRS9K)fJiq;u?Y?k)Gcd4OxPuxFP<{4H zp}hiDGj2fe@C1Y|?!5>h_@Mu-@BjGNJXAs955i?bWek@QA8zoSxbyn3@6It7XUf&? z<424r$NG2IKh*j|tv}TIuc!szN|9R`S>fcA7j!Kk1nuP*J){B9YqPg388t$O2(SUT ziuMVe-kj3XZZut47G8&kP5c;Tla6XX(mK!c##~hfxR$O#B-)EJ>5zA><22V?WJ6Ab zG4(`V_)&jpM3u0m1qS= zN7+Ssb^sv@MZVDxzyz`N`EEV-<&z+y5j!8?QCUGmluEw@fGOE8;bqK zxNNqu)BF|$FsqchI6+9QT4fLrp6+lm;zWqyK4j5)ieGdk=|-+e2nhT9&CtuNYBFV| z=X-LX|G6d>pQlOnF_lQtin$ROB-zJ*AO-w)#M*}W5BU7;=65e{TF$TkM7Yx?`8kQh z;4X_W;-{IJi}AW{Qf|^}eYF~T>^+Uo6vdeVzt(;KK8_|!=;dyhv2bt(pG4fv&2ikE zaGtR{P0;8YxbiqVCWsaG-~Ij&?hoPq5bnPs+`G^#RFo6xOIVkF=j;XpzGJ8@?7WV9 zVyu}s&)Y^yRY1%We)dBvF-V*Y4>z#si$Lz4pY?qC^*fbMd%iLq)d3LWf`QiI1dO8@=Pp*ku+(X@3oPTm7+HA~@D0Q+DTaYb;y__9TA?;eMv z`?j?@fLLCf;+4?SJ4k7Py~!LR1Cpp$d(t_NU{x4h!DSM@(P@2q9vC4MAA{)Eo6jF8T5%8Zhm%e~i{DWIyzgzGVyZI+zz5SoQlWFXM zY@+*m$T-;95i1X`8V9)r7k@AEum|Q8)`%aI548gS5bIA?&P30=*chpiBLZw&Bq=9SU&)2)GRe%MSoz%-OR^?kb|(7>)8bOxxFa} zg$HdS_usJrJbd4sXVzoRj?CE=fkBN|n*@P@YeZD%i{W0aYF>JZEI}sy#{22V z{VZDn{avMu7SkQd{B*s?>S}gWb5_%^3^;`m33AMwgx#T3t+zI>514kAkt)p)NH6N| z{xtr|4x}9Oa+L+5reqDk&UCv;)b>oOxg1MmdG6t03Vf}6#uyARO&52OKoCOG_ zcG!)07y1|)<^2Sa4Qeu~VYlS%wYz@=_^0{zoPqyNQExH7$BeQwNt&6r`Jlockw#}* zUps3b8_T_U{NjBZ=k;rb|7B4tg6cF=UeKdp=B%8sujD@4%BxX@!h5knd#qS)(Ey(Q zi2B!5|9iIoouUTrBc?xf^uG$*9>4HOS?%8f!`EkPyk@X*iLjSU;ZARcF z&yrS;@-lUFQCYv1#Tg@Mqm(azCmzL2a`-f${t@-Rr?jKSAn(BYg_P3(`Pu1nWs|x?!vgR2h z2-eS8{=Z&v^GDSGo?`wL_0Q^BIEn)DeEYY@gbbvVaIlGu3BO!U{ZsEqghGnyg@1D%3$%%$X}8E;^R z4=~5nOpY30a>j1Q<*67oxmkh_Lfz1`SD9uWLKDD+HX?HE(W8^l63KU07?Ov*YcF3U zT1iL9BT>Wwo6aE&(H48Y*0ezno>^c7%d?#3{Gm1e>pB$5*C^UR%+pC_@h=Sr@$U?1 z5q5Uw@w0T3J3CDm)kCUP$)0Wy@>V2B%e&a)t z@G5v8v_@e<09cYgqa61}4qvQS#)wqZ=@v%Vjq`OlL~cQ4HGynuUK}}DFXu;J4H-7d zY=U3SMR#n07o*O<^0IWhVBxzp{}!lvO-d?aHKzlmJv55kZubE8o=V@ZPHLs!=$XkH zkt&Md$1n!O*5RB9S_d*us%Or5!~77cd9$-cWMSK=r|T}KAvu+`7LVTv0kYfJM&|cR zAyrY#MxV#d5y1`Xvz|`_I>&BgqMOR5*Y`Jk%wdrNlCAdm>X-jaSNyJ!A&ZJAUYR8#wz+%95dp5Uz9*y^j0y4pRGU;ya>)3g0*|Sa&yrR9r;% zkbpi@HHkAnrwB`nZTQQo;2jB=IW}!gb3Emuek7slqQPf0H%YE@Ai>|1uMd-Z4wsSH zk9g%E=3r^%CJ**TzhG3rQ9dDshDDQCTGJyoKS_ln)B_pV<2lTN(`A&}mYb+?Uhu(| z2(ZGS+EABg&!&x;TY^tquOgc>Kq~#| zxd`SvsVIA=O9!CJ7G=_R`F+-zxLK5Tgr7(VvWKQuK(B<#9CYn(TgL2jqwSLTlt~Wu zhh~7)n>y2LHvNd5<+!_m?xN(B$vzNtYyzFI6ZyXJBf9Edje<-WoOg(>cYGIvUO0~~8#v3-3^L>bCoMmvZuWWXjb3Ty*9 zGkfU@{3sGz5xurzU5n4%aeP{fruHMQsb>LKcoqJ3Y7cI35iw1yAx+4*mON$E94I4j3-?UUT*^+&5gPs2^h%8bn(Ktdk1V@wA3Qwr9g<2BAdn=jF^B z1O--d7(KV}6E_GO&~9>7)pcTAG*yrnD2~Z9(SY0_(_ZLpPzW5Alypx5*Su1Ri?MZi z_^p9nsPlE?Q=w1y0}n;)9@X>Tg2mrXiKV8`MS!2G-bXSh$JEkv&8``>Rmdu1JiDvv zgFp9kMMgJj~{`y1= zB*Ev$mpPvD@ur&HVya>iYC92gn>Ab%vI97%kU2B`Z)Q1FoPmxSVj!s%; z*6e8J+w50e_R9UR0Dr4B4lt0=Q~RcgZ%MHCP_Qj90LLTmh;D{7^=?+M+D(>JMra-o zfnZGD)E9+1UWL>$D6$$Wr~yOw8Nh*t&m>i@`HMI&@XJ)BW55t^-m*wKZOG{o+TPIzz(QVz!~d z`ex&TQF9C-;FwION8vv2+)m(S+}F5c;M>N;P+JZ&JJk)-fxZjwMsIf4(BwrFCz&#P z+!wg-UVtlj9uNXMWaz&lh`S_>de8HMUR9|y(bJ~-M5d+&6zqfH%>?${yf+pxJxj9B z2S2PFl@3PKO1$vYb&(BPt|n%F z1Q<}-VT$nHxR}Kk;--0pa$Uhw%fHxb2-9}DKo3RKNjg)(ove7(f3t(PibT65 z_EA-qUY%4eMCR%eQ674lmN6}uut$8odR7^nIjx{cBcIPWyI9-$q3qyue`$Rn!a%7L z5RkI|TsoR>T>|=iGUAt=k(iVwMz!A`Q}=)>OUU-h$ZTNZWCH+ilUIHqm_e6juq>^!Z z5PPz)EGasCCt>;V$326@+ft~b#!kP%Qrrkm$z(jei$LYV5{9ug1{XkW$r&1?MZ*DgQ0TJ7VWdh-;a^B#R-gD6-H3gx?a0WZNY z+Vuks2pL>6ePo?{b&S1~i=~;(m%W~Q%J+v;L>psY95(Gx;7>hUC}D%b)Uj{BmzD(~E#7qqfVWBz`o%-Zc_GW57^N_xQn>z5l7zScSR`L4$Mk+r z%$ia|O_KB^8FIdH!$p!yDUw*Pbz$1C#W*lQ;P?C_tOK=tX4R8>l_lsNC|zrIzfk*y z#qkSlYzIzcp;uqF$z2xqHAxzE)G+YsWrGRt15G^fKEIRigcA@LI(>6-HaZ`CBPM!p ztX;VdzsDVzhwJh9ql9cTMyb5Y{DThhC0gn7Vv2tfsSH+QQ^!pW zM=kDH!@MIC_wL+Mt7j$5BknKX3cMzc?MyYwE<14Mdb8&;6tLs*id3g%G7P?(GL5al zPLD6OH*Y4jS2^0`@h$0v&%GP8#5jz7ghL3=R zTNhOs$>kl~S3bNl(F)z-I6DK^B)p+KUxouwtXsb@*e#i2Zl&rPec;0dB|%Uwd5&)h zq65D$Gvqmo{JtGV!2m-Mr$@Mi+5-_?oa@>FdzGFP3hMNHiYdx?45~6BXiYw^ zwPmu2@dy3lT-Qeo`IL|F^E&#n!CYDJHZkZqD`1_fc8bx;0~g>pZzKeQ zDA@&Fl(G2mJoJk>a5qf-H(yfdVT!>&z|yUD)3cAca@fFrbi@ij)PYd661w;ZAJ$TE z6pBwEpKFnVJSM!rgc_{>PKmI`P6sq)-5HV+mnVeef(t%oUp8yWb@5dCo~XsRE>w*9 zrHI4;-0fi=f5!4cN|xqJ<0gnf%e_XQv*^ezt^`Y7Ps*sNx_4Uaj7RqiRkju6-fF72 znNEijTdx>diS9%xb6immZk$9@T@~9W&xR zU@*0gJt$AweW~TdoOpG)QI;(>M#W{EVF}9sn>9*fSxupe3b0~Zv`flPcNwRY2KF@PO-FO}T@^0-v-;>#XKk9|H&6SB$>}NEbo;owwDUyh zM*lOj@}o3mLWx$7McL{$8UfqfW3r;;#Bh5|N&b_GMUl6HxdBg2**I#IASIcm6tArj ztCl89Tuh5hu4x?~E;F!eWXAH%fMnxhDmH+zz(-zQR-4AFCqojLR?TZ zZ)>A5s3|DWZSJE@sS4QokY3N_fqHy{wu$YinyDU?GiywXjGu-+TNlasZe17js$2gO+YKC0bLa*#|Abzzum^j_+HFn(^4Yowf%%>YA z1JzOx6kHZiz)H$B7>xYRVL?Y7`IfhH!vgJ1;vqu_Ay^?ooc3y*`>-6{iAkXnLao1A zfdZE!RoSE@P$`nVz20M=$+>CP6<=nG{Qza%1}~c{Czz!xFTJupx7hyHI7=UuHX|JYlX20^S!t4wxq{iSBLOiWzh~yJ05nZ3dk5 zs+foDk*;q%^1+3eDtd6E9mXwQZ)$fC#MfbH&n~HHAB21^QKvUSbzSkFbHe4Z4pj&y zZ;#94zOM_+$-$pS73n(;3^5%CKblM_e$mnbH!naOerxan`Q0fQzdIZwe4K>-KwLFY zQ_6;%D!AvS&9w%5(UY}-Ni$uKogC$LZ!3IPKSeaY#aVxPVxDK=JDutDGBYS^Qs|n~ zeGP{v&mGd$+^InmHXp(fM0^aPt_S{t-Jyp&k{)X7v%_5*EP7N>y%#Uyx(=~i`NLy@ zD!NyMBsjdseo@XbZ%YW-=yh&rm!iQGZYxVF(hf5# z+ZPjVTF;wG2s%dJB-^zW3BdyP4@XRw&Zb^c!Tsdz=akGJau%m?nWCk8a)r$wk}HON zdmE!i`|j92(RaRz2F*cdI+vkB9)z3b*L|z5Lny3MLLgE;!42m%X&Gp$(x#HlCm5!# z+rMjGSmqfxxbzV}LBcflYK2p!=0FYsT*cLp=T&?ySny2wzw&QoAf)&#>wQ zP)U33zs)hVJyk;Q=p)@+8waeR=lwpi)V=2E>#3$ddDQP|ob?2UAI9LUW)8Cw#PuT% ztp%|(4l8uXI`dI_vXTtLTHAY|iv3%qZ$|7&!YXZrx1!1B@gxHap64JmNL1-6rC2BN z=TtB{MU=Lj?0rq;ZFVSp6>giCUaWkv5h8nh;Mh1qDaz-nrgu3cPXl)-kX$KocDfQ~ zpMzn&qQY_42}YxLMAR-ju=5vtMzn=BD{4}so!L#faY@q>1DhAd%I0sj)6UIbo(Max zP@|sFPToF(YzFyv#Ij9?3h;u$ROA`mVe2NLzE{26uGr)d!G=z(v@U3z8Ed|(ige>* zfPtju-Jy{Ch}4ee?W-_2L;gvBm2QJ_M?I?}jSr6knjDixMfbR~=NqjM9_a*Fcq@Kd ztW>XOCnZ3lmg*7O?NJKMv%)D>GQ+gxEGJiPthaEoF`}ftH%j0v@Je6D)I}^cL5i?+ zY_2iRP(c0FE=1q8Gdv($vL!7BD&;=lx&Y@!Pms7=)N#v8DA|0$DAMP!XTu$jI+|=C(lL)P#f- zNcGdM9B`rtK9;J{fa8o`1-1}u`&eLcZF*GqD2K(|Etrfysc>57{aW$IU1pNSgWh=_ zZ1@+nV)tg2Za5pIqf!4wcsfq8F;-(4@a1Rp+b^vnmrTKJul!?zsqfF|Rn^#SP;&2pj(Ci)8w{MhZ`(74arR4fJh6r@c z2dt4a9L0H$y%j8H1(T)gw{*}3prQ~2Wx>Ow(43DQlpeE`Jz~aR+WWrLt)^=mEc$=Q z8{`b83XpAm&qs(wXv5P#7+ED~-NzE1m=Fp4F?_+ny2|KNF(K#~oVt|HbrzRBAA#7Y z?F!3Fjjy;muu_JSeHXDx{IW1T&hRRgf*BKSxI<{;3I0ukIeEnOb(<}`OjQn>r#$vJ zIXPM=i-h4t^pIEmuQ(#NV_nbtw2b8- z{%xHp3pbdJ{5`vyYZ zL?DKfktK@tHa}ko6K3z7cyW2zj=|<}trcdu9oDYnAh}jYrRL5_zjJ;@4bU$Ma3=70`hn!e$6PM4(ocU+)xe zO|XVnzn*cMVp2dA2rlB3a>W$kb7Qg(tHS9pYu3(BFPHLXCmb$2x?HrQu&=98 z?gdiAf3`~eI>kDpvVK({HaNASI^-&r;8ThLPpnODMWDdpYgtr!M^}Bu&AX2F?Se>g z2Z0(JQy^N+UCe9g^$vmn5(x4!N8#*gnJA##r8xKPaI{j^VSw4~N~9uPK}Q`38ccA> z*$_e$n8~`=ssqL=wSoiT)@_m8?&!z}_4`+O%4IC}epS_od%5*_ zO0-G*x#J><91#Px5ix# zOla7>dJ2D)72!y{9P|?x5&gN1Nvd25n|$Un%&JfNVhG_8G#a_&b%8i|KxF@?^R6%X zFxG_@heBGs@1+n0m+Ln0Z#u&ilM9y0`X-vn?HD%utPsc5Qs74Po%!{s{oQk2d=-U; zGP(V78E1u=+&=Xef#G_S*KoF-Fp@r@=|@BPh)W+8+0LjQC|c?+7~S2EhT2e(4_*XX zQ#BD`k%gA??K7x=`4unCQ4O9*)Ibh1XX}YF+*Sl~)1g%aj|mw;hHmzC5`5vXzqSX% zuG|1|*BYM-Shq!ze&Fi;Nam?&cuJf`*}ZWVvawJP*9Mb)2KT<_Se~FejGmLgB6oeI z#b9#c^$~fJBZw|Fxn0jRUm^>sYu%+;%N3x(F#EM$DFCFs#ma%o0EHAgl%x%eGqxcf zcsM@5kYb)cSjX2XhF4e5x*@k-g&>;Dg1JK*lKCXU4UZ6Ue!Afjxs>!}>yudYq9V#D ztULB^GHTWB>vsuftfBn$C((OH+2t3Kg_1c2yD#Ag{3Xov zDIZ6+l1pwlwFy?-*YRd|Kp~#XI5ESw7C_2T;Ly}B_tucD_NqL9a3D^8InEksa?54y zQ8vzKJ)=)I2-!&GXSax#Ic09untPN1#;*iTeSyyv{h7kGs!nL=uYT2fxhcl`CKq>3 z{jR{E&nXAwZjaPnZgWx~@oE~68K7@hQ{290*ON`6W& z8LFQ5)I~Nps>)Fsxt4|PNJak$*EF4PWQncxj@)w$Jqp`!KSCC#$Xr+8rdY2xyr2mJ zrZD??XZc{E41i*w*Lhk2GIFH!X!{mFj3r(EMo`W)Qw);luu>0<+uzltKQEoVh=a^w z1Sf12JX^LrPm|vI!gj`B8u0cFoT-NY@gk&$UcBYDbG^*`L7n-#)bZ1D&xU;2aqb!9 zhV`Wq^82z_0?40-qy6)ay1%w1|4g&}Wk0tl|I^`Ve|`Vu`^)EVCy@Pg{>y$n`}Lb& NzCZiK~)dy{2)v!?Iv~r|L{bjoXQNN0??% zC)8Vfc>Y$J&x{-d+Kqsv=!>+iXWZ12965yPK5KsQ!Sn5Ox!ZdvZIec2Ga4kYC^i$2 zk&B`9p4}LlZ=fEqnnS*-T*6Y%f}cwcvMC| z@%BTx&l6L4c#XrgR+HY3({7o!Oi3XPv`UtMh8LgM5}2Vbha(y~DQ#F7t;?RjZijwS z+k~fy>ly|$vBMJ!JK*@+bW_>Ue;PZ$j4kb9m! zqo)CaQPv?815&S=46xFM-=Tz_CC70hx7T61=({Un@gP2XW-u%Otcc$XIUnjl4e&qV z2Nn)_m{ou6MID(wS?)Yt&Q*Ov1uC#_>f!t(*uH~tr-t3}72onzEicT20WJXAM9RdJ z8Hji=6=?r31b?+%w zH)6x3thvA>JyU|rydeMzq==QGrc@o-VcY6x{9fjE`FVI=m8CsMj(%z`=xl@n1XS

uE-H%Yk6E~GN6&ybD%-m z&W2AlQSj{_UcHqEP?s&Kgcc%;?pXQ0CE5VLb&s{E?CwBoApS;~L(v=XqC=;m5&mbK z90J15j=E=nOuS#|dj%v#t3&lvShuNFDeE+)y~;U+R1~Y0tibf96TmpRrfaWCsC@0(M@>3Z4E6|8b)c}h;^Z0y4lwN$ zIDi*L6t0h!O4hNp(`oP=WHC7@gVibb%d3`b$?R$8n;L*;EGJH~Ir^=+775xGIEoRu zUCkfoV;|M%^x-zd6N?fEu&&DMh>%pm(R%f5l!X8I8@*95l|EiBr*QpagN>gZpcJb! zt{^O7wKo&|_CADfDKz?wLs?71pm6QE!|T2dP%e%!`vJh8|RlOKIIYI#h? zAm~wEwZ^F1Xbk9SVW|s3mnP7g18dCh@Wt=-V45@5v~595CdrQM76<@;K7nOvPwg6& zku8zZogC818TS z{)mU6g-8K{htbSzH)k6LFiY3-*G7s2WXsgiV$BZKhZlff7O3F$88-&g`>PJ`2xW05)2 z$`{Cp3_;(ax|>%@zUyB-16r3h1`(M!Lb@m0W1r#abSZIrqX~wY>!C7bqITRBF#=8} zuBmwkjN(>)Ut=gDSG_JIw>jzJjs^9+xNHSctwn)~=PXW3NS@}g^Hb){1{Nmp+*csk zKEVsG#)xsXQd?o5F|KJO_$gzkiPl8JaFS!vsXXyU2)=Os2qluk_br4I0CQro)(vzF z8Y0h|;&saoQZmzv^g<7+4BDcj!?1&4DgWjQ%&&+w@sn?VKk&<9ML=)9UkuFO)*#R? z1HAw~y+G%G!J<>ts<3GFw(^#6=CJ&QQ!o#T==gN|qb6sIM9ge7jtd&iZ^He~j(=yz z|9N&ChOYfRRTj0LOl2l94kI%>%{Wx~t+(TfUH4mxN=dh{cp{-pv-g;P`sCf?RVonjH-i2wfc~m}Xl!p3k$GWbT(M?oS$Y4^ z1?v>o@b-YsL1I75IMuK!r-BBwCK^5ZLEC!eS0NkaH={ojb1?xuZipxqtc4XbuV&#Z zFZ--Fu1ZU`Ci@yi@eHCfXut9Uox#3VXcE2aMw@rSD(IDY*rYV`c%nhd|3LJ{ti_x) z?(&}*`+M>K@eBM&IFMZW8CspyM+4?g}TIW|P zms)KJYJEOpDHtq%6r$@bG!V%wzT6eNgXtSVOl^LN;p=2a-H@DgCnk963`+wW>7k&& zrr*q**a~BYl95{W<2yKLKV^fJ!{dP>bvFt9xnD%NNr zueBhAORzHh61da=^8h7*?!sp`w8Vq!>(kN$=60jwdZhZN+e;Ah(A=kGW>3=CLicE+ zi*<<2hO^7SM{#;r`-~iF)?AZ#pY!V$Nfvf`V);JaZrb~&Bb=F1S=;^n`z3>XN@r`9 zY^0^Re!+KgpXhs>KI-;{aNx4PC1iiEvCW+ts|9jFjTnc4f>Q8^op?;nH834+hswz1 zE~^0Z+GjlUirI&9KDBM3YLdQ#$tTQw<|IWEp$5(M=vca~k{*!E6|PnnUy&Snx#_#C z?W!V7DS)67;fG&YDxz#ZT-O%0b>mDoe3l=f5TJtVtCu3Olce#MDU|;RE4pB&d{>ku z=Kl1)x)u9iD1i+th**C+oOBC@T#uj&6JhukWFuIOVOF8|p6|#mu_=x1MmXnK9*&n( zYgHEIth@*Y^JoI)IW@%+MxG5DXTLWt_d3IH)8sQ`K+n+ska30H&JWR-|ew^s?amyBAdk-=KB^Urd>otL_9vhy(PCC%9 zL5?{aBP`%4*I8JSFLCdu(P-Uzk6I$Hr;b<>Khn$1K>#M|OQ%-A$6@*!X9b-w`V|$M zj}edW^`@hB)`W51m()jh6%5)D>&$WaljLw7gr4J#Es8JKR3>bC`MlcuuNc6HncFo9 z`Sql9&MrI&7IEFw57wfZT>;bm7h z@iHgh$mhnQ@Rc;eMP2fIgyB5&MB6le`2J+>*n-_9mUgURJ>y*Z-Kd)2D@t=2k%g)kk43xuw-gs3%q%4Z;L8Oi!bh0;MUqHh$FlTN8K6_?cO_*u?LgcNG-s6;0Pxs82_9U>QFP+sjk z_%_Oo%<2wMGl$jd)?xAeXoa<2Aw}(d>4tAcgL~0Wzrffw= z+lI*oO!qD+O}M|J0hhL1hw%2WI{**k(XGF1@`=KiX(v{aGzHQCuJ0`!!O+#GW;LWlDrfoIQC zD+$E3qloOKEnlR`q$54%NwIsZAIj$VZna9K_hPm%2IW|BjD&M~=_s2;L=KpVn@Jk^ z@5_q!|F_TCpOMM0`t-kQw+C*d^IS`P6MXeJgX-9*jUM=VxSftPQ zz?0wdYMQH7dP<>I#q`*`eu~5a=joQWmm2LG2D~LnBuYZniKN zENX*WIzMP1_j@!qdsHlyi6J5jwf_5V-Lq;;mw4Ps1!HmF63aP6Ygm$2^;X2Y-Wuh_ zN8+eTge+PE*RPpVt=XmEG|b zj2BSu+PMddPEtzpB&8%xtb<(YjBqK0Q(n$~oQe=R^fgEbqR7A>^Nc@8{37nPjT}(& z<>11W+jsojX_a^edmKlxPd`?QSAk2g!0Q8Kp}75Oj$3Jp=N=9$Q3S8yUhpuUrwN+K zBiY@by&DlW1s7JmUzf z+M%@Max*kS;}2WDPtNmU_jMg3iB}A1(qeyMCo@6KAIb82Cny0`Ln9Po;V|0l8K>_` zoAFrg*X57$$?@|^j2E=>M!b0}VnP$TID^Lu0p(GDl~9tb?og>ahx^OJ{>G)0qbr({ zV3iEat7VK44eNuI*i6>VBy5_?`=`tY^jYv@-WEaphl50hFDXcng45?^-lj<%+ z$58D>JW`U?8B6dZ0xo?57*#RKzPZk=nNX|n%=Xg||M&dQZ^S;GXT2><*fScaMubf% zUO&VU4kt@o;JF@kC?2*)wJtG{Hlop?F%a2sKbmM$=7GB1wJ2hKcGgNf#^VD?Y%d_| z0A}Gu7DO1)om46-r4Qm^8?CaCh6txZ!WgsN%(RFG=@&ykb#MGd1DXb4oy$j)vR;IYMf zP3C(#14}3XS!OZMs#GA)CQ(k~lo8ixYw3febn_JF`W!o07$U3+fgAf}FzT-A-t8~i zizaLAEhIaicR%eIN+S@o!L934BDbL+P9iM8S2Dg+3L;AE0K7rMcuhNs22u1}Bwo&t zuKP&#SS!96Uj9e;5Fbh~v4vrX64YZS&c+4wMUvwK{v7J2*Xpr=6wRCLY6vq&<1%^c zr&a;1J&ey`ZHWQ%Gx9a1od(+1C-vcaWV`Vy!Z8@|S0P;2=l*VVQeJ|jK(({AFD9vhrsvRjah~`zP&w zQL6S;E03n`$=%9P663`@q=Yu4_pYH(LqyY~fWlzCbY~4sWK#BJ-Np58QAa~X*?K+) z->L;MQX`KeN2yFw$W(t$r#0=@4pV_;pN{-z5sUM!%rW;#@gXlN^m`1IV$nqn=&GCM kPo0YHs>hBTsZBut`gf%tC4nFEL-%w2r{dq;!~d=PC&p#%2LJ#7 literal 0 HcmV?d00001 diff --git a/test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin b/test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin new file mode 100644 index 0000000000000000000000000000000000000000..fee4361a2b54e7e9b59cdc715893a58d59f9ada9 GIT binary patch literal 7448 zcmeHMbySpFyB~%S1d#@%q`Sm}fFLE^$j~9Bbax31B`Do6l+@5ehte^CbO}QvLntN8 zPy$!aJ@@$`RK=OmY4zCGSj`Gy2tLeCz6rp`#` zy3P$=S(8ahyY+Lg7Huw?__N!X#2AFNb_^eeJIsA={&)P0i^^RkFBlaZqBQwjoCSW} z={T4+qxsJMq`Ep@<)-D)y+5dcpKpq@KD@Q?Gi%)nm| z?XM2+|5&udeydB`5Ly|=9v6Dz{oGzsA50SUux}6h&8`RTj_?otBig^ERR10HUl8rD z{#X8E(U!lwm0ejd)vhhv6tS*Ka2ohBZ7!gg@ESm+q2R||$@GtC|NZ{{mfl|w?XMOz z|FLLW8>jI7nm*Bha8fS!y|2*+q^%Fk{R#>4XC66*j-K29Big^ERR0w;@Vj@Dt(djQ z<9hqY#ouQMzgoQhtW^Ki8scZ%U;UK-jQgu2>Sx^Vabr32e|$<@JpBoNFQ}&~p6$b= z_r6dFC*^$AT#tQ)&m7x90O zpWL0fweR}XQ$Ozlure9^;e#eti7t)Mq$Di6sTj+FMaMG4szo=J4z>-?=HB90Ucd{b zvdh1pQx?&4V!o0=sg7`sc=Wn`LrdbdekAdz*xZ;4tsFvdN82ceu&Zz->rKm)-aL=A z0b2k^l3yKh1tz03XS7baFFvV9YX6GU`TVhj_dL1>(dKh?&q_GMyUF7`G@x73n~y#n z?p;W~hA(WxZf}x)sKVkt$gQ^Oa15k=R66`_TYe?t2rs?Y%7fCv`u-y&D_8#wLutg_ z!o_2!-?qN;+%aq1W(A2hg$Bn~M9FX|_?4}BMY~mg#UhuB(!4Xga`~|iEjXgHX{Ll? ze?P+$&-ott5!)5)i3OT@rE4tXmWd1VuX?}qb<&$R(T#%D<*OLOX0nX*oN`-5hT0DEY-=p$*uJB%rv;Tq!nX? zX~xV|N3$hkM*-KoLbl5U`3u36UFzzFgWkFm&p-173)EbOJHiOHOXBqPdr)j}9WduS z(L`)dKBbhQFK!T(k6T1lK*;pa&OL!4F|+0m>IR99yP>@Rf)K;FHRJ=k(+nU>4AznC zL(NsmkW4SrUIF-s&gFKrN4x0yy7TNaltWuIF|H-Q?`d`(Lg12)09a5BU%b@HGLX3f ze)$U7)6sRpS;A#vY8lS1OcABLeN7w_kEWsN%YU8z8{9+FWco;wl1%)r{&^!PKd8Lt zDR#HLmVxW*)6^8&Qjphp?p}DPSbgij*BE1lWw=W}?Qyg6C3OmnXqO5J)!BrcfISSZ zALi8ct0z9)r!YE%A9V?~;!Rs$)LG8$@c<~e0L$t6Q5zXzy?+XT6j8A=h;Jm;N2 zz@8!x4zL6mk<`DpyJUi|`e=KPqu%uf0{kFcgXILBPDFHgD6^z&oRe+%a+mZ%2{qc zkY2$PXcmx3vRB#x3avZ@gR&1^r?sCJQWa8ytSQ!o<@B{Prduh&+EEH8SR?m zW%JVga_?Q)NqKnO4vcZ)bGG_c_z3@o-aCOv?VC=Wj@_(dx(s zT;-gVR`%yo}T>117Fwqt)PtSOiOOYUc zI3p-~7NhhCnrU0RqsyVESao`BN~i*#`a zNvpK@<`)Sd+MfJo{u-UBS$}mXS%}BhWzT{uTsF54hvK+QL$5y~u8?R}ORyu2 z9z!@Ud1iUa_DuP?CTkVQfh*n^N?*S3-`L&H`n>os@U%E|zHEds^c926v4M)-JiWJ% zxQq&|hvH;K{mzh2o2Vz=1WiWAeY%JArV=l`JU{3=5}4W~WYS~e04?VF^Al>Zo#JvF zLEjpkDo&XL@OgO0o*<;!<8gu1$*>A}ORQ0?fWzRecONbomLRbVL(A-G?3SfmM%+FK zTa8Vbn5icbvbB{S9=cPHb*D=Ble>4((~IIRo}lB^Hu5LM8SZbE%=Y?Y9V$VJ1U=Hj92C-gTDJ^^KE$L^3E$R1lL zpVWXDeVsPa?v&Xy=k12jRUE5?ITTmk#ln-hrb}vNgrtzQwrobXoLC|77G<{Y#4ASE zt*z&4G9*RlBU8`+37dQ(TYk3Fo*WBaQkn)?p-rbWZqtE}`2GP^TQ)@{Nh zT6GG>KR@kPs^Mxco}Lfsq3%N+GcL#J$mW|}zOskr01#1E zAlq|w0Y>F<7Lmrije$PXrl^@lyRC^MwwcR^aw6Ke7wJmFbCJ5A>}GEvF~idJnoSVy zZaBea%3%9mVUxn$mfg*zBm>|`Y|s3d*A^_mm*b4ptsWI*yF#|7G<9NKs+A!vO)1&Y zs$4m{D6v=bh%<%BYAALno5)c9NS*(E!P9d=5r&(sjxqL;km*yCY*L=eEO*+-VTQ01>t!-q90*?c=8@kw|lh$~0lyHqm~->bHSb)d-7}K`|a2o8HiA zf)M*BF+GlS`r)fWN;eg8C!oub#8o-U#U1&giVE-1GVWkQF*McT=gSB>ia=LFU5X zF$;}JIX0*dYuYc1VSaqWQ7EH4%`UQ;8CGcykjC@g4beW(RaJlQ3D22RhmK{nk=fTZ zl{CeY&Y8!YGDswIhn}ZnlP^~ob}2y=lq{KZ;>4Vree6}@Ah1>w`59X1|LMl59aZSq zQ>F6o8f;Hv=Wih!SeImsX_}zpq_O3zK^cr4Hoae(HCS37|Xx1 zV(zi&)^AoLjsc7s<-W1q^|BXAZglA9F{-M3Sd-2`cCHCalQfqfSd~3^B=1MJ3z0_y zFytPMZc&g)l7x}F zJ2uNzF-Cnat*G*&3?>SjU0;gPO-Im!4&>92)toP&Y&H3=$n?GP{m{*hjfw5R$P(S~ zEM27ECLARq^&5+|lPXXq;gOv~5Ig|%Py$IGkr6bkls)1MW*Mh*l@LNekhMdhEdt6@0 zR=xa={OqqsKG74BjFpQrys(C;$V&vk`zV~avy!+|FLdH=d(CL(a|=o6sadQk)oqo^ z7^IC$+UzY_5|DL^Q{xJ|F(hcpY?3S8+S1QGg`irxL@>sngKzeutnsqdir4cNNxZ@p zh_V^GWzhpwDT=pd`FL0_=)%nh2)6=}^S`Obfo8zBHRhQwnQ}fcUstw{uxf(>B z$iwK%A74G8$tv){4_T1H`148B|J`b+{m6;FFu`aDswq|y<7Tb4*u!t)tT^}*Q;@fY zGb$dCD>`b#RgG@m>=Ksuu=Y>mSHe7J<+ebn+azvc>>9^bUXgbLTF?JHT=0>M^GH4`+I;m9TRRx~meoX-( zJ`2s&v#%I$37|L5U~dB@o-VZnZE1ZT5Lc?e3~6}&(rcjWU|lsC@&m& z_RXw}-FNA7npG$_;tPV-@SApMPFaO`3uP@<-HqJ^o!DU0dL`p^TF`vtMO!6S*JJIhV-ftRf>$|`w8!%+S5ba` z(rM@tEk&T$x8j}O>~Nf;Qa%KmA#sSztt4(fG(Ucy8^{Ey!4bcNWQ%yJqJNXT^SI?3sa$SDtsQ>e)(% z5sYKaYi>llDG$CfMtXi$sq&lf^GWvqv3T)2e(Z7n*y{V=!2ILn$7;t9{CMUE{tF8s BMC<@eTMqcIGl7Xsl$EC2`5tO=4rN&_Ae{I~{E{=Vdd)9qE@VJ{UGh(@^yG&@ zh?*)PCSX61LL%13mWX_Vd^j|Uuc6q)_R5^;P)A3Bjgf`Lt zj%j4kSeFHrY2?F;uH)b%FbzbX1e$o}pRr1?D06BXzFDm7zICT^f^LMA6cF5Zx@FBc z+K$7-Im&QLQkkd?dVR?1xypE#k(zNsKm?QR8?JA?;{|mj$bB|XWZ6FpX@x^U;6xD-!+t`F3%v(fA!`6-sK(*PA z@N8MG6(GnSJDVz5_`*c}4g|;-xdW(fpeXV|uk1qxA0s%E3}bM51n%|euyT9ery|z4*fE55@BxYnx2MBvJ=I%Pbs}J7a&BjYxqM=L_ zkL#oFCFQizWDNFeWCldv%h9xfE>vqWiPR}q^c~55Z$Sr7-f;p=gqN#Z_=*E462q5^ zm{YQ60_$uf_g2X7^>%f8s7QSiovdjq>S+N6a4WqaGf5)DikgOcxs_S)aQ{ehENX$Z zfCPcCO}Rk-43LIwl^Bz9;{o0}F^b35Hcq&iJX30zEZK5&BSJ{_76%B5-_0j2iX8Qd zL36Ja;Uvz>?q$@o-Uz}fB`4XpcG>_Mh<509^2$XrcAb&iZXY|IV8Z>Z97L|e;+`2? zL{xAA%MtB!BKBez7&3co9W9HqIl^YMkL6yR#&RJeJ*T7K09se5`t8A)0Esy7@(dYhS&LJZ z6tFLITVj@}a!22mAXDHh+LK~z4FK6^q5Ds5X7j2~-#QvAAbW2LaJ{z`VlRu3$!lYkRc8yRK*UkS1EySbcYbi;fq1T$e& zUh?So{?$)3p!62Mi6V_rVcutREKjVJda1+*Pm`1nKDG*2Af zQ+$XD4H3XO6@{8MncG%J!}#%OM*0WUnEg*Z)N)Am-Yl0E;UYeOnd8T7ZBY5(4171K z;>u>WePV-3bT(=!^J-MTYaR#yFxj|EblHvSZ@5q-)hFxd{{^8KvJpLmX997E`t z7{Crz2Cy=E4Nt$b5z~^D@}$d}#sjIpC~Jb8PQX0$r}{;z|JMJHD2y;nY-_zKuco5N z5+0p?@ro#gch@s`5P|DPvR{1D`~@hH)%DMD{o54wv7?!&B)kazCWnIcZo>y?m0fZqW83D@00NhUh_A@FC~`o@lPYJWAAgjj3NX2P2t}Z{!QV3 zFomUdZicTQ32<+tp}_-2T|RjbU8VY_`v`>Z)8c|8=ZO6(nE$MYLH`bG`fl$Z|1)?A z061@xnb}_)t*Bw?dl7Y7@jn*0zx#`%Z^DMvb*Jn?{WskIb+B~ShB3sXA+1HPMH=eT z5=w0N2^+ZCcgRUbtgNI@7&kZg*U$N{c2bpR#F>-j|<)?I95c zFS{})K>%|9b>45v{ifV+%KdMYYc*ANrPUH$7iT3a3VbJ~fcN8WI{n(aM&|W|Q%jbu z3K%px;ySes<#dQ9RRQK$z={00I&Xv|vw5*RySp{Q0S9!Y+MCBRfMU>S;$hXcfiK)S zrjNV%_H=m4ionh;crXIc=jn@@5%U37p7+Glv-ZB?YwFj=@U>~fHyNM(lF;+>YWp#TI@oAiJpn1{E_RA z9&wWK4v;n+6acxAv(1N~BTjwTr1Wn67K|Wk&DB&mMWaTGJ)IO9B?h24u36(t7r8P_ zL%dGz*GLRU1x>!Mk0wY_+)w?q!J~xGe{l%p?}&x<`ak*h@v#p5W^-u&aMv75OtMWJ zjg`JOIkV;oQJo>?x@s%V@r$LMnp`!BGO5-9&b3Lg}X^W*CwQxQ4h( zpZZKRUt65Rw5$vnSfC{U#0WC4D z6M-M;W33cMs`|6cvksLBPrMMt%V%)m%w7R?EP#0bg>f>(sW|3vtd1bJ;3F+F`3z8W z_FNPA)83RcZV{5dfCBM{0{`9fLyID5GMUwOZpC3vwzpwdoq-a)QcGiPIA{X^N%39A zy{?Z`=S&Bz2A{B6Den%~hNMqX^1|kIZ7;(e7R-D~ zFYxdB0ZC`tDK?5c{6bN1$-v{DdD<$s@P4UE!1adr)ui2&A?E(C0sfYMW(NL~rlu@e zpB+*&0I=GfEt$d;F~EVBj#~e{nDXx%!pif7{e2mMPT)Nkinux$(_zGJ6ZP z@)s^C(0VL4e0r#*XcNA_P5mp?|4jW)ni|HLVDg7gbVM47IEMOs!Frn7LIo&q36lL1 z!skEM7G!>9_}?~ll$`F)bcArhGMIW7^mdXveLS_s&t$P}DpJZZ&$PYG-=_YR>VKyG zCr!Pud;(m7lq!FW(wALwc{(6uj4v1VsUCyb+{BflAq)BoF=+Qw7%&n%a(9U6}=WF%MlO(`==+Q5hg#Jx7D zpGs6am%6i~(`^1-KOm0durlYV8^=YthDOM&I#r1l2nI;smO)dF<7c@1n~{H;`k!g$ zU!(p{n)>gaeED8lf&WKLz4wN{Mq$98U6pF_J^q;>E3WgZ!cYx-#C(We8Ku&-{I{up z{k{K8{c~jshP;HV_{+!9pBebC_rXAD){x!evgVwyo9022ld(c9p;VEIZ``O*d2stC zMRV70Fh!Io(6OV7pY&E9Ng1(TDHS0K6LK*Q0jc#`Js}@QTCX`@v{> zzyc&9$JCTd0WBiSSzc*SrR_g!4=nU;Mz7<(EE0UHj{oKLCJlfPQS*XLf|yr?7`de2{l{A)Ahvvdx`q?oF`H)#Z}-L!TgJrNEsm zUS1@|m>&ragyNKd1o4%#Py@kiW0ahtUNeSqoFzzy_6H@~J`QOl=nB4wU63~Yk`UaT?82O3Ywb&LyrCX)-6jF@#eG&6GHXbr znRW%q5ZQ3UT2Z{nkVs^U%n9V6l*f)kwFbjHza52^3wY15^;Z7w3~%c=3>X;B7DHD? zm;-e>Xu?WRo^z(IO<-CMGG;Xp{3%W?xNjLE>-cHAR;DM!w~C$W(^8-pJyW+)T*5y$|aZ)yHO77%)0U*QZT8-A%c*k_+b`b@;hDoZ0w%XlRU?Wut zy|}rX!<7|o?MA~#$2MHC2_xAu_C{lKA`msaxOwpG2}3bDZM3C%@4F<*BC?P;mjsVV zq5^PubSsn@_)pT~Tlef&gh=}NrDb{RvJGD6wKU-s-&u_UY{&~&)RA`eHk9&8mk(-G z2|i(=+Odhvd2lR)kSS@ku-uJ4NkIe_j|_4XF?vVE^yvFg534U4@&|P zkf-Wcb|0uNH8)%ugXl>U8#3xIxMrTds3|C9ktAb;@ecP)L+@8Sz!0bx(*$uY9Zjv8TLMt`hMf_wmo)MDu+}9R<{s!dl_aMv?ns4tsEis}W<( zR;~c!nN+bc`=EKjU2n|3b;0_i>7JsXIeusoxZYI~=oLIXp}2&r7iU6EqO4<IVR)ZfROmeD*OSfYmL>8Jmu!63SUH06&XXZ()^mJjM8NFm4PPBlEoXP<2g&Piql-IzWmcdrtF;1g#2 z=b^X*J}as)Qiq^?5kK{!CpU|Tqd4um22Eip*Vi9GVn+DEwwC+2>&c*%#kfamP>dJp z@D$>6Plm`H+T%ETYu4#nS07beJ$zxFOg)gtp~ zo_@Z_P``e2$CEt3jQ<)&0Bx4*8iffSJ*KD8XN3C79nh`e7xCnwG?*g-Q%Q>r`6YFc zXG)uTz0BN8_Q~2MCt)&lyQ$CCBIcc;(gb>rX2f}|E!Suf@_QuvaT{rvNLDkJ6;`!& zK`-;nadDgXkGbt8gE>X z#uD=FwSuDVA(zw)N}vbrCBqGvMaY(hZSoaM!GW%^r#+HtEbP61vE)ak7d~@fwdN=a z0ftKwBnp-VJ5}dFtyJQ7g`*WO&yBh=g&gG?C>i#L@kbjYPY+`OJ^02+@+s$Yr+s36 zZ_Qah`$(%9;k!)VeAcBkZA5m7MH4@C+oKWk*61!eYvo&J{UGZ*w?PjvkFk??U6zCs zNzBdBUE1I%ASG@Mf-UPH@3;Mo(6dk$S-eciO0Qbl6VkL%~X%w1B&t zcL??=0It4Pzt2cwP}-h%ka4+$hD^n~0q5*2*GS9G_kvf58Iu2eXrwiE$d>YSxTdf^ z-W4iTT#eed6HFccjCsj)hp062%(10^>)a58@RGrCq|FkO;RaVWbEj;=mlb6pioG_I zLQVMKvy_y?4!?k)$p&oq1j78|fform%^T{#o6uv|N(k@Vjor)fDMQ%KCvn$z3e}cE zu{NzdD6if~AS1FXst*0=EFTDRJBl(_G2r%7KG9YWo{XA~Dp;N0#JPlbD2JV;)Ix|)wTA~s%>FC6n38INxEIGBQ)@Eul~5|7 z0DDt3(7Mk2bU|s(FFkvs)zH|W!UuULI~bA9$_XQ(M)J=>&s0Up4L z@vf&qTt%o2Bk7WQXI@0FHANp>(7=#>8O}Fi|0tj`Cq|XFzuE`Rx-HG+E@)!*4!9%Z zU)4D%#RmFR!p8d(^8wjM<0k2uJOD6k2R%$nNYGgx}5z0 zz9!6%D~?D75$B8#Wle9RZ3SC6N@{4^1sR zlsLpqv>_KBIO~Tve?NnOt}w(K$yQQLp*=0`%93=#7N{ZAm(%yq)pEfRd7#xSQz%N_ z&KJ76gQh7#ninyQ2c;9BTd&il0=h4xXfnvs%j`g_v~%$UG$qdSjac7boB5ZiL5?K5 zsrkwiJ@KAAM`m(2GKqrHGZw@Tj+&P)8QcuQ=883$ee+6mQdMYoum)($MtDL=!ak#R zwHQ}BgYur7)kf0VP~Bx)eVec1iKvesVXq?hv!d39{U&`DIOlRJ#h=|{bSjIH^{23@|<88Q(3OL!9bG?bQln80-go{VKDdESqw zvs0HM-edUvpJ>&zyw9?^$tTdIo;Q!EPgrWdOX_jj!sycmIyYkgA$#}g zY)If%e0KK5p6_63An<)iIvJZKets1oQ=N%&*uT8SM}6wx&zuyzlvX3h(DiA4ScESzd#1lAaoE^cCH zipZ{tZk#1rhmqf~%=XRGbR+z1v~WaE1&8X@JorR>tAs(^1zb4Bm=VVlD3atJ0$DsJ zC41dzgt?d*3q%d9t*Z6Z)$zq(E(+tI>qwm;^M|rryBT{}d|p=cqvy#C0`Xk9dFv7~ z3~hV+u9M%0fw|3n$F?yuVEv~gJ_D(YN2Y9X=fH4AuSLC+xA{THQ&?2^q%u=bKK?XI zEaNwIoyGB^A?2{~&{YofDwx{z43^-5g*pR~oKN{X9?QOKk@9=3X{;_nQ%A+ZvDt&{ zZX+&O2x!!4`b{v=Y2lif4M&po-wpZ}kxd=iysrGRUAsv+QG|1-v^o-d+J9(&ncnW= z$N|Dh<2Yq7VE24!u?LHGaSHT=J--BN4Hal0t|Wg~%fw=3uP?J{#pMXPlrCh_G}HDC zfW~3snu~quyWaKA!b~n|fD9Q6It<(=-fWl3Jw2g3!bS1IkmD}q!BekS%igQMM z(o5Zmkaa%l+n@`toL0|CPH)-JPxY~E>r!5M6HSb@;okG9w~nXp%Th~SVB1H=g!*uk zh5^4$$alfI5U}<4IKfM~ku%}E5b;tgP`grdDAls{w2{+IF@1QuXKT%+i#y04!gxqz zc&Su%I#0eTt-XD4MyAM7;lG+Y;Rv1PdGRa*X=P!gO?Sk9)6YshX4}~~pAv-3vW2c^m?Q6XnT(hB<7jc~ z8Z~V&o~m_pDCRWKz{!a__tU4o{;n7m>PJQGKYaLTUz=N7XI(nI2@Cd!KF!zCC+ZU3 zD<32Esw7E=iZMqN#K6a_!s{c`UAbm}oTtZT8QD?EX(p&9O%fR9^7ic_`8H=z% zA-!i^w(pw{X}<8tZlFo8rZ^lYBAKgufiA}=J@PS~l$Q-#83pQeNHRN&Vr@98E6o3y zMlYVc=ZF$PH|9!Ct06!!^TsyFr50BaE@HRRI&;`1cG_lHwdGy>#UYhuuMW_d(R3iH z$gSA~$bzI#_P_zNggJU?lL0?fi1Fg@fM_8ukoi zT%jwU+VLr-R#->WOyV1q?Kw}S;YvrHMRr0M?r?#ksI_NiyMDiL1dc>|3M;j-QnxCk zLuuk4H6|;z!o3yd_R0{%&$45>cB}kG&#cu&wbuHxLH|mdC$x4AEIEW)$PtN*iQHA{ zN+i$ucg;j;Z8Mb}OO(@6vxDi}kezJQQ1p}VB`dZvd&MQB(;S#@P*H;m&ZBY|@Yf@; z=SGeTav7Ecpe3Ge3iu}aaxu5>=n^8ZM&SHT=0$M~m;5=xr=k2%SAB~8netkTFD>*0 z_lChIMWw>LS(JP#+fc?JU;7P`_^@ofNBsCDneeKd=e+oSfcA_S!o)^79Cs7oGZzC* zzLGy1pm@4b&wDzN#uKO1uEHekdE>$Na}uP`yc_W`EV5?M`67+iJ+iN&te#p3nDV=# z@>`&CLa#H7&KdTj=p~E!yq;hReAhvVycuINQb@% zd;LT1QxqXwM9gu{m2a_}^LivHHPRrID#a&(?Bq zvXo~8S*L};)LR!M^2x4&J&ZZCYokv(;A*BVMrebtir)TP*#l-CbfO4x!7042Gw+_U zA#=V@8l?s-kD;%7uWRUjxn|t(YB7df+#yI3bH(18D$zghv2oYJ{FqcnbiX4nkW9Z+ z7jdq4dHCI$cmr7aqJ;?>_I$p~wdKk0d8{s=oVPUc+b53#8F3g?CxS*6#soBTaC7l9 z9l|J0%^T`u3zQ3zqvH(OUzcVRzq=fgscqC!>8NUG=IBeP9^OLtkd#XrQscA@dg6m` z;6KL(o%%ZxH~{08q=Gklq%>)|s`}9r4mF%5>`U(n4rvXk5N9qHyb%Tghkw$t zh`RI68^s{+Kp2J%cbW#&3`}6hX9aq2%;30q+|_m&V$@XO^mV&a;6@3EUW*9b-eE4r zmi~i1_R6?ZAWQz(y?=D%j+Tg{q!zwS9*YJv3eq=SV6YIWE|N=uodMDTYx-tq1F2S; zozo6AEN{6N`AYNHlg=0eD%P~SYO!m(GB-*gH_Vu72CPq-uW9Gy8pbTmFX3}R@J!Q8 z0$U6kNHo%O7OCs51Y%(6xhu)7=oMV_#E)?n8Hilkg`Cs*C}L%xczylNS}&fRDj9QqP=8nPZ{OWh$ojT`P##7Rz!^meYy6(w%}=$ z16y9Z;kASHL{uF}B{Y?WYMe9^-b-Sxu@0q4LCN7-eGZbkahakb3{!CTDtkfD0$w%T z+4Cy+e91H_Xv*>}XC-xk{bsLD>a@3^D6@Oy4X2%P&4|tXVW&g0vES~Q1xotTwv;2i9`O-kTQ^tn{CmS+?Al>!6&v*ZP z)_{T{TB7Z8_MhHcHs9@Y+uI?c`u49AiXORSs3L$BH?Hr>FgaSwuXgm8Y&L}B&vfmU z(8{*0cW9Gc=g#8F+pFPY?QL=0ZF+KMN|8|bl}rYm1$6R|8XxSb@!AY1BxmTcgeT&kqcnJz$2e+z=)SY;iJ6L_Q*dy)_Y=+Ca31^)pE$%3F+BM7Uo1 zdxsQ+H?yqd(kGV=^~_KRJ-Ej&vt5q`=@YG7P~+W6RQ9s%>zWJ(Wg~R9mJ4vaTDc&5 zife)bkD6gFP!u4;S99q%gfj~#lx|ok0UZ`Um%6_wf#i@YKjL#RV^tHurYY#`$L-e3 zY?G#BhnZ!>=Z^SsCq{ z41mO4E*_Jl1{S6{_3`onse4^bAD99ZS)zAlY@<8E0MD+k$ar&AT6Nc|@7Fd8c|GcU zjnzDss2?~&$a4bcMVp7b@oplw%B)IS4ONj!|0xgAEe_tR+y}>yoH)KSH;UHg$4vEl zcga|r)yDw?7Xyn`Ck!F3l{e2hv2QM1MILPWjpeNcf_uHC_?oB?>ytZsZQflan62`% zs!6g~aRf>_RN6M`o920)pP1i0LFMZ;oy-Y%qp62jd86Tqc9u!x-t^1{0oTb8iiB~E>cCRay-%WOrk2Rwdx!x; zBN!lx=Rva8(YVZXm?X$w@)}|Ec@i4tdi%Hj*sUx16P9wt6yykU?${WrFSGzU-EyIs zss~Q2_%5Cs-+^EUsH&DR-e=$kZJl0!(j95k%2P(uqF7WNKU&q^AYMX<+)cX`XOC7m z&Ek&0hvqezwfNdkMF(W&yiu)v$T6#~1#FzW(X1T(uItrv-<7o4ho74-dD=?MIZG1_wQ>G; ziV7htu#{SCN;wH>C{XVA0;)Wu>8Vybg^Psov(;s)ST;Ys6(}+Cqoj#g?nLhEaU5=C zzW}KPwd*4XsfDtP?uq6N8t833-|OgGLFtFw?&-U2OU+-D1`eGKlSpXNJO@)fk_r8& zyQ9>y2P0;g*ch@$*aY{pA#$+YUnWU4zO~Q-V};90KW(i79!d9ms)cKmhjf?ReJV&>~#*ARv1gGrgs6L`Q%ag%Qi@&sAOWsFJ<~DsY`02VQJyaF;VmHYWG*J*)f$lr& zJTOEn<_V1s0xp3cO#Z&uOT+GLk;u- zAfpkkQxge97|%o4>(z^cG^V#6js9~DX!r_^HeG7_X0uli^a3ELxPxaI6qhxLYstT~ z920T!k@P%oY_68S8dpn&_v&!7YZLSWAO+ImGaWg+H9|1QWX3g!u5(bXCrpzURxzx~`)70l{RNHVzu+6Pjjwt^P9AAo(n zWK)U*ovc8ioqWo+_CQ}~AopDb6R5o|fk(=aTv2nHgF=62HzwVLE(>)>N|hAW0@E14 zi7qg4fBLnb_x}n48=PpfJ`Mmb?(y~z#*`_}@f{HvcB1ErK%S4GrJkft0!E)GfDaR7 zIiRv6%)^POqQ`aacOJ(+<pvylhiI~2d;44V;M1iBgJm^TL5?LIs)06#VMs&V zSO4rb!C!ykCSkFP%^dI{*&k@ARW4z=hcUgE+=QMm*FxvA|w-|u{DCwgxn zcr)DHCiSax>Iq7c4Vt$p6rfJee`kMQ_VqCEZIe#Reg^Lr*^YdY{b$63K6wy9U3+frC+$XH|0O~P+8YkDO{PCZX=8LV zY2ts$2Gqsq86pk>&=LM2myiNi!P0nvs82gLsBeahP6Xj{^$93r0;AJ4KLkVAt*o_d z@e0Ora*5cUwyI%1R@^&AC~6tF0Iug`)j%YoeM1s{^m1RhGVzw4M+1bf%8{FtndgOi zpOC+PmE1S(_uo3Qj`Y!qXUgnr>*Pipj`M%b`e5cPx2UbrjF_|lzWrj3qaB(=>OFL3 z;C>^5S8fpJ);)?d0)!~$JDdMuxJ>dd9@@^OhDRfhz__qHYMRi)IC*6@;c@8@-_XpD za^dd=llqDvhWB`ZDIneApo*K{0}vA$AReoF@tA>HnsdnP9*j#K^l;qb;iV5lV^&6E z?_P1b5FL5AaE$>^6l=)Qao&i~7Po11mMFJThUgh&lcJZiIF)dH=bl&TSqXDiuMf5Ih3@*@eUlbLk;u- zAgaY>f(noiRR#Kviy{oZ4aV~rRpW8XS2$Fp`_@jg&td7D^Qd#5F{YZLSWAP{LVnELrb#g$3glL=pqlofhAF&2n*>eP+z zs2oDVv$KB|B?Et@3Wjtu&!NyKvC>w~@UzDSdBz_puD^Lr`z;zn?xnxW$)RuolGh26~I73xI*{;bq3?rxs zp|)muA40i2e8&b?$PJpA=}zp2Jxjk;7Wxg~f9=+sBT5=8mp#1Se~&;k084hzv)5*B zM!DBY5;moU>iUWIl%0}C;2AX&nhhZZFh#JDKhemo5 zY`BULPf&lAz*{*Z*CI>Dki}qOF{QE90w2W$xO2?VLp1dH1AX(omJZu=PVqmH7PWK= zCu#6ulZbfhmxw3`ZI^DtLchbLD?`>DCaHZxn(Hy)JAYy6oP~Q<$qYtLNOsf+Xk(oj05^K=RrIa&LGy*9!!E{0Fsswd8>b8U)-NJJaXVR<1l9=em}` zhLEgpEs*9o7M1j))#sdgiebdI!r4}X$mR^^XXW!v;08H|ED&fynW zYRxNg`;%)2AzPOhr8#CDwZ|l*;-SG8{FH=&tUoz$;6DxUtf!@*?DTG)idS%A$1Rqa zHjkBHO2O}?x^y2P6?B_35>3o3c1*O&_e{6gz|EPkS_4SdJlLeq?- z`!KFk&X2L@k^=^{%zh>__(d4o5TJ3TML20b>rI1VsxMAO?g*xr{FVE^`FJWB=a@!I z5r)~!J;Y96eL7>zq<5=GRg}Hv4iijKFwB+PKD5(??HuL|t8|h7<47?6(meh+0z_gq Y6BuD6dz(Z?U#j6PLnD=60F&e#G!Mh-$p8QV diff --git a/test_fixtures/masp_proofs/F36A8353F15FD6D8158DBC67DDB827EEEDA858AB983D16024AAA415579A68953.bin b/test_fixtures/masp_proofs/F36A8353F15FD6D8158DBC67DDB827EEEDA858AB983D16024AAA415579A68953.bin new file mode 100644 index 0000000000000000000000000000000000000000..755c32011e4d8f737561ac85b8cb3357c052506c GIT binary patch literal 9649 zcmeI2Wl)?=w#Nq#3{G$o1_pi+QSf1T4v004maZ{?>U{jKaZs?EIE3hA^Br@fw< zIvlO-q7$Zro9b3SCEe6lVk`#4@won+YhA~-t}8XNi_&%6B$DA4Fje{{6^Y(Q6sljt zKD)((_ZCWZ_{J48pf9KmD`3UUh@h>4UHPYj|A~{Ij(!@F<3yua(x-3hd2$#Xp3RAx za~yJ4T=0eVxCn6%R3Ik<@?Z1*s!KgZv2Y_s3&7NT8}j&I19I8$JB*9(#xWI4U&y^a zj<;wM69PXd0;vS6u{&}QagG*_oi#(*Iu;OI?6v}0!uUK1j_r%RDO}l(k~%MC+ba;+ z_&ER=+`~Z0*Q4aEA$Fem>1&)pMk-!(M#6_$ZmlIn6opz5s>mvS5o%Z6bXXX|5&61Rkqj)YBeG(snyg6C7mr5V@fc zDh1v0^%>p|P#~TSVZWw!uTB9%*SvQjy5?L5vApiP$>LELqLTgsw(LMu5a^5K)sW+^ zF4BPDJ-J{0kgF+tr5AT(>hN>N(dTT{dt6W%@l_p~H-zI9NIE&}LarDfQ2BX&4hZ4_ zAsb5@o3MjI?o5Q*_Rp%{t$$6YN}gq7j+2S*dfG`ZV5QCR&aajW6nPG1YeJbW*XD#& zsg-nXDZ(3w(GpgjP~vc)Rnu=NKzY)Spkb4$_ME7#wT#}%*`405ZWkpOtOt65(I`74@Ae#NdB-@0T-PM_ z1hJ=DgD6;bbfd*h@Rk9I%bZQc&p@_1r7?vT^C;q`6xUL#c}~QEstNJ-jjcAI8roI% z)vRJj>Q`rirqj!oE38mID+kDNNc8nH7l=9~XfCXI5@IiXK%%h5+tM&Iks)a|aarso zU;=$V(6Kv+272Q9pgJYAPO9^a+W4JZZ>~lliyagd>yu(qkh(D+oi-@M8EnpK^Wo*~ zH{Gl5f(tyJSb)K1XN?62V}bUe%e7gZ%(8%6I*b3nCn%f1roIEu@sm4KG5PvzE)ya@D{jW z*P&=g-c6E4LD|t>ivyC4_AX9Uz*Mx@RZm2HGr25nnFQVXG>eglZP6V6!PXFzz8AcH z)nqoMarLRCwgh@v1FH}PKS1p}^M8mcG_~#kVw+giu~ns1x^(VipdKm&y85fyfvqmM zc@Ur-A{zxRkVpa8`G+~wGO}hW37v~3A}e7Ko^bnl*_=zp(t&&}D|6aIq@X04di#jSf(QF7hJp0@WZB%JbwBE@ zylp_xNbNBNQHaIXbYQ@3kig=*@MA6|2~9$ZnM<770sAK)1sI~J#$WQFmECB3K@cT3 z{03ouK*RdnwX|}TO{c*Kq^tQ*{W)@R3=0gj${s~7d8>=kl(MR24R~T4Z_jCl0TRrm zFfZ<@S*16$rh?wc16IOwFE{mJSu!}DGJPx$@j$Pj0xxfUU1_uqMXva015KuZHGryv z{@B-4D@e&h?ZhCl&_J<4F%-31_T#Vn+&MRwehDwyg)?m;lITcW+E5KZe#53`ulBkh zwlKAbDjCEBC(q_d0;d@5DI(log}R<=gA6wJzv)ip@nNz0OF-izwb;n-K#MJo{JozU zn6(_deS22?`poKwev`6%n*6Uj?|)5U zA3L^@Dp8gu4-3Y~I8UTr5-g5U7-vGa|RM=oLU-W}WDkDhv3Gt6+o0KAPW&ay|4{1>wf<1+KT)fX1mW{00lE0a?W#jostc~CvRvT$ z_JcLAorGtDG@IHWr`I%&r*%rh!eA1yn0w2lPCn`ZKEQg`7~5dRX`VySzPmL%;9`awq+dztl>M@)jP&7Z$d;O zUhTXT0Rcc``Z5$AR2>(~HICeut4b?+w;@fI$=-{<0(ts70K^|4qlUp`B=htn5C1JY|N3-=z0Hv6qMf)i-GQ+@cDb^cfpFx zcfWx6*MGkgdIoMk{*8!2Nk@%DfH$IODQ5>jVov1Z4>+yv0RgJ$aC!E;vtT9=^sjaQ z5bh7*{t)gz5w88}Jy(>)j2fHaX2VUB)-e{R6`-VI^uPe4TxE&ROc0d+&0pGT5~`}z zl$~Zxwl^iz$j=*Sv;To6U=9VG7H9zK6vizp#8!2;)=<`iIm5W)y^^2x^M3oWnD;%& z^az9s8Yy%%Tl!o-(6UU2m_Cga0bH2TZ{F~v`mT57ZLcT30BN^Prox1b;saz{uDYOj zipeDQP5ffB>qe@~ms%@t$Ux7X)7@-xVoMNgqp!MAj5{-U_jzPcp|aSP0(ujc3{)V( zN)d^LN3Gzza7OESr)!OtoY0u}+YR_4avHsENi|v^K((-1jcyZ|DVt6HZSWE!CJ^Et zfjQLmImF$pf8HK*`ybr;djRxz@xv9Li9K*JsVPyTjILg#PEPGKmb1vo1pri{Z=auN z&BvWW6V1%dcrqAm@H@$7p1Q2|({=IUcay>C)Pyp>Q8g|(8!V}S=-WEYUDFSN{422XpOOj4Q*+iI@r&ZUiIRXwJE251(U_L0_2J^h2!85bCbK^wHe(3nr;}uOIvdID4;08dToOPK@ zm=HyQ;!;E8WqC;2Ndh&-V6zR)WteV8dhF?9e(GH+PZ!#pK|S0aFo6jTD9G}5(t(mp z`CugH%q}izkeM2e^2%eiUudl%BhQdiK3TVPrP9|JzTzb5c4gjX)zgTg+>r`x45P!V zyx~exNKq*qYfvTd@KIVLR9)Mh*L*xSM}Viz!N%Ur8s$0^SkD}2x=1iw8DPbtkD|11 zimhZ2i{)OW^a7T<2Y2$XwGQLGqkQS8r;QQXH#e7By#YpAE3nzbMOUCBkm`YEiB?sxTr%qVmv^n4aDXYi74GEo(#^O(RXRHad*5 zg4gi9SGFCg;u;rZZEMZ~`;wdFJCD<3G^_Fs^c@)X0NO=}M(MZenrUCD1mKMb{7R=n zy+X}a86FdvCuUIxIvQw~CsfB@Amc*Y(iT!MC#-KIj$9 z_X3JR`LW8#oYlwa_s^#Z%%4|k=_a(>qx-#lS z6q*PTtHZ3Q6ttl>y4&(7tSC7a$L;QMAuS_v5XISG>>xkqlXN83ilKIqKrs5&SKoob z$_r z^)nKiN zo=v`=NH%t>edww1;qeta?sX4CdZG>g5W8m9et})(veL(%h-d>}!R0s72Q~eIu&}mM zUpNP;|4Wj3-|DLO9-wz&Js(+J8O(k0$+DrWucREu$i%@ES+3Md!sBPl z-ya$ZRwjYzSm^DEbM`crQ_>k_q9R|nN^(0K1LbKaHfbu zRl6-vp1oG@oPW;nl|0QcjuNcqf=hLnm)+2(*e-T9isatLM{cFmz^-nGH2r{~C1EYr zdf9v%dtRb+LF_1;{*hL%tj|c}EOp{`%($R`J;B=DqeUh|(e$z;3wX>mby{@mQHjw@ zv;p(Jr)q?yuEmidTAvpSQ;B8f561A}-o&=eR;Lt-gMl-iR!fDU?d@F>NaEAZ3}2*p z>{tS&Y)DeOwV#KA*R<=C7B?uN=TGr>lE0Gmv%%Qm!)A+mO%F^dDjUXr0YU zkF?!#(*NL^qJBG;6h_Rra)Y9?5zcX6sMtnE^{s%Vma_FGY#}Zl=!v(~IQrBD`QkH} zUYBqCqe znGTWK7|Tzaj2U|urKW{Aq8x*$|gE3+Fs?Oo-*N0 z|Mr|wwlbt1=A$adL?3!vot~~D#6ri$%QYJzygMi6cZ`?#i9$jvjLKHVTqH?08S_Vu zG^gv=UAZi;jTY(TUZQ3;|11lxkzj6jZ6(u?&;e6PQz=8AZ8^#Q|MormOPl;IPyf4g zdqsaa{={ia@i-H~IpT3?+Q+c1QIpLWQTNj#=YTvP)Q zq%Ohd_+7siF1bWsDl;URryD+9fllLR9QLT24EbYJ%uO?Pe;J>5%_Xz&>$0(hCq4a$ z8)@fu-~5dlm;S3;wWDk^4(fWPAsyfo_1I;6>Qo)}kr^)Gwlx(~mF2M#peo+10{p(^ zRMZ*$a%7>b`_Pl^m5to8$~%^g!yS1Dg%H_r-u^o#sv(FnW`p{6>CAX~SwPtN<=xrn z%xn5P*>981CF0WZOii(SE~)@I9%WdYPX3bxRU&zM^jS#8;{X9SWBAiZ-4v2j!&s&}i*;Svm zFdn;g#zH@+^m`Opx9X78x#Y_rOOuN}GTKArdGd{Vm$ws28Hwjep;ve;McQ#CShXdd zMiH|$RrrYpX-ZY1-<tVcN|dQ+g^{A>1zil? z2{ZL?JyB%=#w4txwc3vWWjKaRy~nNgPSK0$xfhwUUx+X{Ad_5yb-ca1H2?WvO{d)JlBux)G6&Fiow3!o-E~#B5t=r!NoV^*e)3M%#sUH}kY(he7rdMBLd$LJIsYOU!M5Ht>|%MHuOQ#~kVx<;pE#4}>N zA)E_@;F>ag%GOHGkj&y=J&Gh*k_(J$nd~~nB%_d1X7L9kFYIDOkINJW-7*;Pp$PG9 ziR6ut>WE0z(Y|wiPVjIsR_yo+e`^o>%Ms&jWHxL?F^RD8S@O#+1Vk{_J~vgKt?7Jm zK|(IGh@Buql7RCvDOpaCLTQZx>?$-f7idC;Jt7vJcX*?T_o^^y4cWf-waqLoJac0U zY+PJ_aZGPK3!1sHx{_BRIW*=sL9>r`(Z+OA)~H~6M3;zBv?=Um!MtA+k#M|&%^BRv zka8kJAKM30oB$F7>e#zPPfc|rEb~aj%lj-~0TQR!Ar((B=k zLm{(enaFs`Ty$gb4*&C(lhoru{qemD?O;y5{Aj;fW5Kdv%XRy0ZW{v3sGPN7k`ywI z9iZ^%-jwLZ{ZVS`d(G3h%!h(VlEZiE26Jn=!`~I=J~tE{hGEv$6_+WP+5aF3rtiZz zEsR}h5%H0u0^oFbpx0oOmJ*G4`$bL{b7B!Q%2TPaVTA@%Un6kH;Ei)51}R!vDUCD* zF4?>1!d4WI+irPOh8ZUdzbV+*t#@zMfEm{~6_LylnC0)c_%eK&>_w#1WYGTgxzeu) S<8^*Kf>!4E`4(MeJ1o literal 0 HcmV?d00001 diff --git a/test_fixtures/masp_proofs/F3FE67606FCCCE54C3BCF643F0C7F5019CA3DF4CB89D10CB4E38DA9CDE3A9A0A.bin b/test_fixtures/masp_proofs/F3FE67606FCCCE54C3BCF643F0C7F5019CA3DF4CB89D10CB4E38DA9CDE3A9A0A.bin deleted file mode 100644 index b10162bc6b0288fcfa3d1721fd716ec3c2a776b3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13799 zcmeHuRcu|$vaK;@W?uF(Gcz-@moa8$cI?=WnK@==W@d~jj+rs$n3?hJ{ocvd{kYQp zIOpeT?~+=j*>kE&Jv1~_v%4A`3=9nGkMY~U(N~(1Lu5t3>X(!Ya)gSbw4caDqVOST z_xK?5NYpkJ!TxGHjbsEgFqB`y<_6Y<@zqx?aQu_P9&w~J)FW46~yn5fBH&q zA$a8I$r2z5*PbjY&=5~L>J{exz$BrTdIQfVbGl0t3mqX=0fCUW5wWjeBGK8(xxZ;F zU&l%u8L_8?3_Ynxl;!CDbPid%ucF0A+5QoLJADU+5H?6jz%{7kt08=sTfLoVn3I7L zpOYNh%&xVj04d)rNElPkEl6S>gf#&+{vJye|0VKCACC<4s<6+7lTCN?~B6vHey6(wQU=$vb>o;PI5O*kPp>Hj&UZ_U`QCzV5UABtspsb#4Ny z%YH!O%<`y!K>NJ4p`Jw~M)u8#1noR`5W^D^T`Bm5YuM<06n}zw4AFqZt1%rxamV*W zT-TEA{41x|Nt#I9y^vS{k3Ay-6acUx_BiHzq6^XnwT;I!e34Kdd_YeLw2{b3DXwo zX%eO42blUPcx<#;Lj0TA05Nxp^c|4%HQKD=^=cIZhYAgCSkTE^E|7^xiuLoK@Bu}V zM2eBKs*bEs-OW_K%K81i9-j9Vsjm_fwH-x$ZQuYt)n_zT88ie5^DrOJGAn+*{Uqn2 zHUujeh!6HD=U93GIhc0oFDFJu4o-M4_%M&5&kw#;#cAE zPt5M(>I8tL$j(`DN6B++`5n%#wuPA-F^idpavvU3#n92dlkd;~1`pWA^oSOT{#z20 z5SgJujUZYFMJV`ch16hm6E1RXK)5T=lFlv`^zuvh@mt9~3dhe9ZhH+yZxFt5DaSy8 zIY6d=>HPYzwU-5D9ZCAl{T3#`d zFr;j`M%~h9)2Q}NSh9cgk&~yNqfR-dK%0J2xr&U ztuTUptlxifo7VNK-v(ep z@y+cz0qAG8bnI0Llpb7%DM`jk5j+A^9e_6X>>Lw-V+=caHh^FW(Dlty(K@DSF7*Q& zX>?xl_lD$`^`Dk3Nvx^YyBYw`7oO47}*jkKFfk_MioBn>zn6E zA$dy;(_*3kcqXGU(k5~{>gieDiKpfEX~!IY8DNydX!d8hw}}-A0xX>0XX=8>ho+Hw zDO8p>vK@a_$#RRO?R>L1vm zPe1?{R2jg=K>+PsB|gL=KNX*Y$6!@D~UA@p>WJj?Y$9F^S&bL)h>v zlMsDbAExg1Du~=8lYIM*4lbFztvUzf{X z3jd8&Sa$1r~@9*!6 zrybbCtXgt9Oga={?rmW}V`4n;R=;5vdC9Vp0Wkv8KLPo`2Ys|Z znXMQ$6-vKketyygc&_zULLYEjg+6~SW}6h%NGarId{JZ#wu10{S!?+W2Dm&At`;_W z6O3w*HRHc}M=5YMQ_@snhnFCDLZ!$+VFm1yr>4LZYd#LnJg@A}@F!1R4>M0B8L79Z*9;#|JBP;t-SOSSH`Flt=Hdrc@yFrm#4O zFaeu&lCNuzOE!VKENNk+tmr}D=yc0kB%kiFAZLdmH5K9hETaA}=syPdU;RZ}70FS` zuXOUMjPP*2j(B_=EHNmxHq}LhGXjto-)7tymn@y*GSIt4LKOX#h=EbH(fpZLL%Z+v zBUCLBX^m>$ErA{RfQrny-OK7uh8F^q<)lH-U*&;FXWc3`i8^qjF1TRk_sTqJSDb&l z&>-P`CH!R8Y0eyb_h*)W>~D*}f0EQRC2KRoAR_>W!^z?k*s>Mpdhh376M6f7U#E2Lo&smUel2K#QHnY4_YbLmL;d&k|4CBA zyOK=oe~F1qBbUO~oGVyM1I<@J3Y4HaE+XswvBM_+8{vPO)X|FiTT_u@1xrwxJ#d>z zUQ7veTECJdH)$zo#=O&ZHvS>?Z>ayC{y$0T`K4p<3e;4kBlOM;i^LfiA8WG(0>K%} z9^jQxrd#6qZ-oDCQiH&aJE$+1P_YWuPPo?!UtH95X(AE)xL|zO9Jc8IPydkmH`ISm z|CglR{4#S2EXOg!XqVE}tJ5iHo`-i8yuJqmROmYvr`U3_r<2ChFj8HAk2n0wn%d4= z;ZKyyR}W0T&u1i8`A@2`$=Jb51jfI#Xr4&dxR!cxVbO2=RUQcE9|-bisq05YxyB}F z93QI^t&ojStCt{CjuNJM16tAkA@$#r%)eLt=g}@aRS9LW+xy|4bN~MU8KPeqG6Xd=*S)?Kbkmq{z+ z>US6XHOE+~oX2-F`a{G%4B;9_T$LINFQ)j>VEKW91^0zU1xC-qt`q2`8YQ4#=!evZ z(R~x!S3T$xk1x2-V!p=?Ca%cZl1mLIuE0}XX#{S8yx`yGlFQL?)Mk?}eTf*>+J;$Y zYwwy*rlI$1{fq#*JnaI02r;@}%{dJ0Yy0-ivCzB*o&silnNXgO0r00;GFfgt#w zJ0_wl$@dR#*{gN8j;GHCgYR!N&Pg0xDTu&ze6@BHHAImgsyjS)o;m6ohRkR^4!B|x zdf#EgV(4h;$m~zew<;WDq!p&v8ED8YDZBBJjFQ8BtRPf@D0U@A#%i&x9R6&AVzs6I zmZ#9hpVOXre+62I(q%oY`|NbJJK!}--FL@mnlf)W8upa1)Ump#1&=Q1kaU+<&9mSr zq#i&FhTdij(9L>_Sx^oHa21Z`-;DV&1y#r-vl9zCrh z594)jo3zDbJzn;MB~6Gz?5%eR){-GBiwO=vuq@~4NYqlZkH%^V1sZ0)F} zY-Oe`lhaicJ@G&4F(DpP9!R~K&yQ0bed=F~u0!M3KG8eR(7bx}B9z&~N%#~_0%wuy z5sd>KGiIPQV1n_&7uc)iANlxMbtp$1zLEhC#w~S$e^Qrjt<2I#;nCJT=j%k+X3K!R zRqPvcr5W5T{iy3|N3O{NOhXjcQ3pl1cvdT}4Q`EYK|kB{QE`Xw{_I%~@=vmlRcJkA zsLvj((Sfl#Ei(}6<)8GZbOSWf_)(SF1wein>UG+D47*&rEPnZXyqNWx`lu3xE5zP_kd?L&O$Tqx~J71lv6c1|8Rmi?(~OPWF6Votuyf zbb&yfVwRrFvFD|JA2wFGzP;TblY>qe_6iX$7{_fS9C96jS((TXI}6qGtw&HpXl9zQ?P8Hfdzwx#L0b)M8LY5W_rRl?2JZHNxTl2C%J?82N8Tipw2!r?wD{T5-WgyZB^wBWXp%<30tPSBBY$~Pg+s{u7N?R67_w>#zC9(xGHIP5nmF7Qb z#$eRi7ljjdPuttqxj%@jOelm;Djqi!05VLjKn#ASTjtX@3+Q;Ytg>wkHD30@>#3?( z#jU;E9vq0P*IHjb+DE9gU?*G0U2AQO2Vq1Mw+@{?VyncYeQ#^sX^=r*KogbXmF71~ zR0fZTX@|9d{zY+g zEhr{hH#WX82aRhKIw=PV&;2!hbF;o<{yt0EDaES+aSg$(kMU0i&ZZC^6InBn<+6 z>*bTy)Gic{I}xbzkWL7_OL(f5$p1p&EDTZ&Z$~sZ#!PM7Ae}2c*pje!D@d52W5H7S zP;uBukkki96AjKPXA1>)|`Fgj!UfVm7=WuEno&WnE%C{e%S%ByqpgtNH-<(QbY*d-f2i*y!qp~d z7iT+FcqCYpWA_hx3rWpHh?sX~$4R;vwJyf5wjET-J&+}@@jKqDT2quO7Fh@Vpd%?e z!LKD*-OrXZ=65MlcBKm(ek|$>7v+N{W~8=1`?LW4V8XVAs=p514B;&7(@BG4hhiV3 z-x^tQwZY$Tm8doSYT!at|(UcPdCUxfSY}- zoPK#wBzjO?`Gi+N(n!N>Y~m`;(-HlboN$7S3W^KRu$x8~4;yt4fp6z&JcLcvWP$Fb zL+iLB-&Do~N6__b$E?V_^|XP42n*f3JHeDm>Gu(bkMGJhQWOPzy#m7skAaAdb%p~b z(nE5N0z)iICG?c)zD@Y2r@1CN4u0nXqHHiiXTziIal`gBCnL3mjR_vGVNxJEziuc^ zq*Jy<^DVN{s8i>*Z$HnBAxJNnoku&YahR_O6f(EU#{D?Z=cBpmGO0mgdwR06(py5p zB4+Cdz2nGp4|_i3RP?VHgRi0wJkTTf|ZxZ`2xXg_0dQd9XhHQD7#N zm$Y4mFRy$d%!_k03EjKcBo=Y1YTsZ#2-<{h;`L z!hE~oE&NSfT3_gmn;Y+KTUU^a+)nHuR3dp$Q*0!nP0DbtMn=g>hp5V*+2o)FI0i`j zere4@7xVN?S-;Ju)?|Ev7VL9>@d4#Jpno6^z4aZvm;1z)QffJjRy5Gj922s>J3n1S zj>oNU=X(tt9%T7-Z_r|Q#0hCW8^*7k1stsNwNIw_ar&J38)F^!S{C20lGODETHqS2 z0Gm3}KYid`XU+Y7aGrU4#P?tA1rb%OJimH81#mIf7I2(yP(OnY;>P+m(xa>(*M*b! z$iA^HVAYvp4b5v|%Vi%Hx*lSUb0T`c7ful$(0FPrv%>U0jnmhFAu79D{Zfd_lLMma znysD-9$n_yq4*YMo<=wc#^;nkJr4{~jBvET>swODz!#&C95tTQRwVD&nfvb@(6=JO zVqHPZMp!sjk^)2IlB{k$*K? zIb)?l!}e?M5d&{jv4K6{g<~uk@%%yJNnW8a#bdGxSKTH!3z>0Xbl|!gI*&bFZbq}w z*n2&Pn#`H|YKol}T;U0MSuqdZ$J5Bb*$B(_MKpMZ&dzNY|4}1LySuJUQ#QcbFBu|c z3i;2O3dP-nBN_cxjV`{Hd!div(Ge4Btikz2Q|xgp)tVm{ejE;~h5ra!;l`?huS?Hh z4;h@VHxkbw&fogHW+(yNtb5W0w0qWp^ejYDAU+r zG_ZhX?$qIP>7VV`{^7 zf09%E24IFINP@AC^o;T#w-Hd>>9wG+rfZ3JMR_zx-HKFjJsenPjHsN_%t=mfTQ^Mo zV%^cBw)`rQ7-z?~^QF-?foUKMlseD3i-rUH?kNWkef2fp9rs+=KH&2)Vam0l8PB=6 z52yfi334jcvG=x9)K4+Lf4$>u&t;6?%OA$RPh@_s)NnaVzAUY~xpzgQ&QTY-oIU0a zo8o`=E`w-iXJJTpCVDl@N)+8qp2_VLlXhrnr02l|$SzJ|CEpWjvK+j%xx;M{VqOKgOLKi1at ziU{&N|JhcO=k01(xqd9Gehly`ntrN3DkAyoP0Hs12p0m=H^co^)xha6%6P_{-9W(> zocBxaC}nZijCVC{Ux_4lUN45*atEo%!5|_7&K@IdU_17Ty|}dyLt_XM558#NNBQkZ zzue%h!n>T=mkS)%EIH0#)A(;L+h-2Dj>s%Ko6SsC1xL+1gc3D3{y~f4DfaK$I)v!) zC>xl=?)-?h_wfP#Mu9!yDAzLW6xyOYI((HS>7;G2!x+yeZ(%FtBBM%RD_Q2SGy>hv zjP-jhlOpublGyjE$6z-;CyND)ZWNgEXmU$jU@Hu>2_*C-u5-<}4KJ-iOLP#2GQD(y z<%!;ekKUuxx$Bu^!RZw`1imHD_KQij^i^-7xAhbv*olj3=ZGZz_Z{*EVcAV$?a56J_ zAzrRT5XvX$^tFFN*Go$`4c%^pp=TurZ+Z#QmaIr+m}q4u1Qjx>;M-G7r9tv`UUs5x zQB?HoMl+RVC5`VG(KliSkdE(ZHXqeEmh98UA>HvcykBQPPX#evSV5ycA0(`ly&aeROlh-!?Z}H5)-QjgS*$+X##Ta8tOH1YV!;cb9Xg92#v$!9;1%BsF*KPnw35 z5I)GTj`9?!=*sRS0Jgyj8H_83h|0`4Sk zzHz=TD%-0ga1XRSFrT8OsnVOlruNl(eRXlk!NV~d;}#lgMm3i^tULlSGxx4^A~eh- z$C2tdB)}AXJHa?aA@sLbFgaHv4h9 zcV6^sjqUD%mC{F)0jCsvt$f;hiDL!R$EE}{-{X6sp(>Gt6;bJkhKZF2RJK8`8%E=2 zw4`j#N1x#cB8R9%Sb^uKx-Wtk#)idwrdF}W!Kokmvc$LxWh|)Q@0%e;O^FJ>_6Be6 zAcKhnp1?$u;b(2TTtA3^X#bgm{}l0*SAYXk){SFxi+N>A!XLE-36!7vncs=Xa?KdO z?H&=`y6GSxK;UwzCiDx=FBjf1YycGUZRZN|9>~u|oSuK&noW5>l7OIeH)as^6m9_d z%#i5gzLg$&Fm$YwX?gd|jcZ&kl6kL#=#g@a>a z!ahkNqJVlARvjZpUBsIkf~PNaMD~oY#wE)HNdZcL)k!NP)HT2E3DIXX`5tLu7UIVu z^aQ7Szo4U|bI&*4TsAx_LxERz`zwWb{1PG{m-Yu0QCgb=j`EW?&GYrATrK^Xwxx|g zvU>L~w&w}~TI!Q@akhd&_?3SYd3)RX*EHOCHo9w%Q}A3=Xov|9-0YUEEv4m19sv^0 z^$mArxD8tTzowzu76jmkBNnI2X{R!zDxHJ86YVMI^F7WCHNzq7lC-~iCB|EgA*HuG zG08QDD%>w8G}w4R5is8Gjfa^7`1_?-D8~jMRFLF#J3wG;(AE9n4I&ncK*aO)l1sAA z&s5dzZDiKurOchRKjY_!AK?Koj5EiwsWFm7b8(U;xO$T$}Oe2Bj1#A@}1Cz?W}H+ii*#Ud%P zNl@?pWsooYS{rWa{2XWiq6QmT`cbX;nOXQ z#7nl29_Hc73G;4TDKfOydOhIB=om{Y{mD^wAEOA~q{sWe`+4|&zd}Ptzf5y@W>1CS z(V}l@m#T;NSHY!h<7gw%^)E#hT$>7qQ6t^IsI+*UIY(eyvGqmNxpubkb&YEb`6y8q|m=5 ze{M_)B)jp^h?|0H(D)=Orsqx^2G1_!Q_j$2a^>pAv8RKdI|n-K6h?HjW1mMtO%`e_ zLT{n;La{Hl_4%rdYU-1?9ONE0+0GH*2I2W!q9HnJb1v3p0J>lc?bua%!1c=kZ<<=I zX?EvI-iUAz?@**)?LsfKm6()O!#v9ThTh#(E`6DLQ}Og6PXkR;Nh-7UC37~;H@^Fe ziA-3)q3&*)Ke0^i7KR=uQ2LWtY)XqflKc@ld?{Sk5R_A*SoRKiEzbFUXk7BKe4udo zN}Jg3`pupP9S*BgYFVOxb}<(_N;YfL8W%ADDnwu8ht`LyxaiEj3PhO1I>zP5$x1A# zhG22bYy?xw-6zl>W0H1B?d}Uy*Rm=h@6|?Pzh;Xl2}`&4O^TN=tcw19lJ`-1G>Bv8Qy) zws}0L{h3Q@8oi6JRfjJy@0qsi#}j)0IV6qnFOmv20%UbFIM)3jt@T_@;1QIFWNuX; zbAju?KEj1F_A^`bdms||0g(BbGET^A^N4vyMiEP0!0_pja{x-(h?NVCI7%HSuD5hU z%*s>{anxGI*qM14OpTOn7FP-82n{hi8dN-+r-(Y*)o6k}@sL4w3S;?TM!8zZ)eynj zT9n`G;UX;~p3ri2%5%kIL@$21_d^}LC!?_m_J3ZS`k&3CzjxpNY}NX0zu!atYXa(D L{&$QI|9bu}n12XU diff --git a/tests/src/integration/masp.rs b/tests/src/integration/masp.rs index ecd1b34465..592130e574 100644 --- a/tests/src/integration/masp.rs +++ b/tests/src/integration/masp.rs @@ -6,9 +6,6 @@ use namada::types::io::DefaultIo; use namada_apps::client::tx::CLIShieldedUtils; use namada_apps::node::ledger::shell::testing::client::run; use namada_apps::node::ledger::shell::testing::utils::{Bin, CapturedOutput}; -use namada_core::types::address::{btc, eth, masp_rewards}; -use namada_core::types::token; -use namada_core::types::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; use test_log::test; use super::setup; @@ -22,10 +19,6 @@ use crate::e2e::setup::constants::{ /// for leaving their assets in the pool for varying periods of time. #[test] fn masp_incentives() -> Result<()> { - // The number of decimal places used by BTC amounts. - const BTC_DENOMINATION: u8 = 8; - // The number of decimal places used by ETH amounts. - const ETH_DENOMINATION: u8 = 18; // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node @@ -35,7 +28,7 @@ fn masp_incentives() -> Result<()> { // not invalidated. let mut node = setup::setup()?; // Wait till epoch boundary - let ep0 = node.next_epoch(); + node.next_epoch(); // Send 20 BTC from Albert to PA run( &node, @@ -94,10 +87,8 @@ fn masp_incentives() -> Result<()> { assert!(captured.result.is_ok()); assert!(captured.contains("No shielded nam balance found")); - let masp_rewards = masp_rewards(); - // Wait till epoch boundary - let ep1 = node.next_epoch(); + node.next_epoch(); // Assert BTC balance at VK(A) is 20 let captured = CapturedOutput::of(|| { @@ -118,9 +109,6 @@ fn masp_incentives() -> Result<()> { assert!(captured.result.is_ok()); assert!(captured.contains("btc: 20")); - let amt20 = token::Amount::from_uint(20, BTC_DENOMINATION).unwrap(); - let amt10 = token::Amount::from_uint(10, ETH_DENOMINATION).unwrap(); - // Assert NAM balance at VK(A) is 20*BTC_reward*(epoch_1-epoch_0) let captured = CapturedOutput::of(|| { run( @@ -138,13 +126,8 @@ fn masp_incentives() -> Result<()> { ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep1.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 22.74")); // Assert NAM balance at MASP pool is 20*BTC_reward*(epoch_1-epoch_0) let captured = CapturedOutput::of(|| { @@ -162,16 +145,11 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep1.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 22.74")); // Wait till epoch boundary - let ep2 = node.next_epoch(); + node.next_epoch(); // Assert BTC balance at VK(A) is 20 let captured = CapturedOutput::of(|| { @@ -208,13 +186,8 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep2.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 90.96")); // Assert NAM balance at MASP pool is 20*BTC_reward*(epoch_2-epoch_0) let captured = CapturedOutput::of(|| { @@ -232,16 +205,11 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep2.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 90.96")); // Wait till epoch boundary - let ep3 = node.next_epoch(); + node.next_epoch(); // Send 10 ETH from Albert to PA(B) run( @@ -302,7 +270,7 @@ fn masp_incentives() -> Result<()> { assert!(captured.contains("No shielded nam balance found")); // Wait till epoch boundary - let ep4 = node.next_epoch(); + node.next_epoch(); // Assert ETH balance at VK(B) is 10 let captured = CapturedOutput::of(|| { @@ -339,13 +307,8 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt10 * masp_rewards[ð()]).0 * (ep4.0 - ep3.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 22.71432")); // Assert NAM balance at MASP pool is // 20*BTC_reward*(epoch_4-epoch_0)+10*ETH_reward*(epoch_4-epoch_3) @@ -364,17 +327,11 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = ((amt20 * masp_rewards[&btc()]).0 * (ep4.0 - ep0.0)) - + ((amt10 * masp_rewards[ð()]).0 * (ep4.0 - ep3.0)); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 386.46336")); // Wait till epoch boundary - let ep5 = node.next_epoch(); + node.next_epoch(); // Send 10 ETH from SK(B) to Christel run( @@ -417,7 +374,7 @@ fn masp_incentives() -> Result<()> { assert!(captured.result.is_ok()); assert!(captured.contains("No shielded eth balance found")); - let _ep = node.next_epoch(); + node.next_epoch(); // Assert NAM balance at VK(B) is 10*ETH_reward*(ep-epoch_3) let captured = CapturedOutput::of(|| { @@ -435,15 +392,10 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 86.60024")); - let ep = node.next_epoch(); + node.next_epoch(); // Assert NAM balance at MASP pool is // 20*BTC_reward*(epoch_5-epoch_0)+10*ETH_reward*(epoch_5-epoch_3) let captured = CapturedOutput::of(|| { @@ -461,17 +413,11 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = ((amt20 * masp_rewards[&btc()]).0 * (ep.0 - ep0.0)) - + ((amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 1180.41525")); // Wait till epoch boundary - let ep6 = node.next_epoch(); + node.next_epoch(); // Send 20 BTC from SK(A) to Christel run( @@ -530,13 +476,8 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated,))); + assert!(captured.contains("nam: 1407.16324")); // Assert NAM balance at MASP pool is // 20*BTC_reward*(epoch_6-epoch_0)+20*ETH_reward*(epoch_5-epoch_3) @@ -555,17 +496,11 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = ((amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) - + ((amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated,))); + assert!(captured.contains("nam: 1520.37191")); // Wait till epoch boundary - let _ep7 = node.next_epoch(); + node.next_epoch(); // Assert NAM balance at VK(A) is 20*BTC_reward*(epoch_6-epoch_0) let captured = CapturedOutput::of(|| { @@ -583,13 +518,8 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 1573.18")); // Assert NAM balance at VK(B) is 10*ETH_reward*(epoch_5-epoch_3) let captured = CapturedOutput::of(|| { @@ -607,13 +537,8 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated,))); + assert!(captured.contains("nam: 126.565")); // Assert NAM balance at MASP pool is // 20*BTC_reward*(epoch_6-epoch_0)+10*ETH_reward*(epoch_5-epoch_3) @@ -632,18 +557,12 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = ((amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) - + ((amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated,))); + assert!(captured.contains("nam: 1699.745")); // Wait till epoch boundary to prevent conversion expiry during transaction // construction - let _ep8 = node.next_epoch(); + node.next_epoch(); // Send 10*ETH_reward*(epoch_5-epoch_3) NAM from SK(B) to Christel run( @@ -658,8 +577,7 @@ fn masp_incentives() -> Result<()> { "--token", NAM, "--amount", - &((amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)) - .to_string_native(), + "141.49967", "--signing-keys", BERTHA, "--node", @@ -669,7 +587,7 @@ fn masp_incentives() -> Result<()> { node.assert_success(); // Wait till epoch boundary - let _ep9 = node.next_epoch(); + node.next_epoch(); // Send 20*BTC_reward*(epoch_6-epoch_0) NAM from SK(A) to Bertha run( @@ -684,8 +602,7 @@ fn masp_incentives() -> Result<()> { "--token", NAM, "--amount", - &((amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) - .to_string_native(), + "1980.356", "--signing-keys", ALBERT, "--node", diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 5dbe91c1e4..439d62e184 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3101,7 +3101,7 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "0.2.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=1345b463e8fa3b3a6fa13e4a43fb1c410690ad62#1345b463e8fa3b3a6fa13e4a43fb1c410690ad62" dependencies = [ "borsh 0.9.4", "chacha20 0.9.1", @@ -3114,7 +3114,7 @@ dependencies = [ [[package]] name = "masp_primitives" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=1345b463e8fa3b3a6fa13e4a43fb1c410690ad62#1345b463e8fa3b3a6fa13e4a43fb1c410690ad62" dependencies = [ "aes 0.7.5", "bip0039", @@ -3145,7 +3145,7 @@ dependencies = [ [[package]] name = "masp_proofs" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=1345b463e8fa3b3a6fa13e4a43fb1c410690ad62#1345b463e8fa3b3a6fa13e4a43fb1c410690ad62" dependencies = [ "bellman", "blake2b_simd", diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index 81f07ba049..2022e64ca0 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -43,7 +43,7 @@ once_cell = {version = "1.8.0", optional = true} wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } # branch = "murisi/namada-integration" -masp_primitives = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e", optional = true } +masp_primitives = { git = "https://github.com/anoma/masp", rev = "1345b463e8fa3b3a6fa13e4a43fb1c410690ad62", optional = true } ripemd = "0.1" [dev-dependencies] diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 8e3bc2bb20..c8cafdda1c 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -3101,7 +3101,7 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "0.2.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=1345b463e8fa3b3a6fa13e4a43fb1c410690ad62#1345b463e8fa3b3a6fa13e4a43fb1c410690ad62" dependencies = [ "borsh 0.9.4", "chacha20 0.9.1", @@ -3114,7 +3114,7 @@ dependencies = [ [[package]] name = "masp_primitives" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=1345b463e8fa3b3a6fa13e4a43fb1c410690ad62#1345b463e8fa3b3a6fa13e4a43fb1c410690ad62" dependencies = [ "aes 0.7.5", "bip0039", @@ -3145,7 +3145,7 @@ dependencies = [ [[package]] name = "masp_proofs" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=1345b463e8fa3b3a6fa13e4a43fb1c410690ad62#1345b463e8fa3b3a6fa13e4a43fb1c410690ad62" dependencies = [ "bellman", "blake2b_simd", From d4da00bc8ba3b8cf6341f005d71da887542888c3 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 18 Oct 2023 19:52:24 +0200 Subject: [PATCH 144/161] Fixes pgf inflation --- apps/src/lib/node/ledger/shell/finalize_block.rs | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..cc14275c4d 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -828,13 +828,16 @@ where pgf_parameters.pgf_inflation_rate / Dec::from(epochs_per_year); let pgf_inflation = Dec::from(total_tokens) * pgf_pd_rate; + let stewards = pgf::get_stewards(&self.wl_storage)?; let pgf_stewards_pd_rate = pgf_parameters.stewards_inflation_rate / Dec::from(epochs_per_year); let pgf_steward_inflation = Dec::from(total_tokens) * pgf_stewards_pd_rate; + let total_pgf_stewards_inflation = + pgf_steward_inflation * Dec::from(stewards.len()); let pgf_inflation_amount = - token::Amount::from(pgf_inflation + pgf_steward_inflation); + token::Amount::from(pgf_inflation + total_pgf_stewards_inflation); credit_tokens( &mut self.wl_storage, @@ -877,18 +880,9 @@ where } // Pgf steward inflation - let stewards = pgf::get_stewards(&self.wl_storage)?; - - let pgf_steward_reward = match stewards.len() { - 0 => Dec::zero(), - _ => pgf_steward_inflation - .trunc_div(&Dec::from(stewards.len())) - .unwrap_or_default(), - }; - for steward in stewards { for (address, percentage) in steward.reward_distribution { - let pgf_steward_reward = pgf_steward_reward + let pgf_steward_reward = pgf_steward_inflation .checked_mul(&percentage) .unwrap_or_default(); let reward_amount = token::Amount::from(pgf_steward_reward); From ab2076658eca0cd4d2ec817745c296d202260b9d Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 18 Oct 2023 13:57:49 -0400 Subject: [PATCH 145/161] make token amounts in `RewardsController` of `Uint` type --- .../lib/node/ledger/shell/finalize_block.rs | 13 +- core/src/ledger/inflation.rs | 122 ++++++------------ core/src/ledger/storage/masp_conversions.rs | 11 +- 3 files changed, 54 insertions(+), 92 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 525ed86ec4..f36cd4b7e7 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -641,7 +641,7 @@ where .read_storage_key(¶ms_storage::get_pos_inflation_amount_key()) .expect("PoS inflation amount should exist in storage"); // Read from PoS storage - let total_tokens = self + let total_tokens: token::Amount = self .read_storage_key(&token::minted_balance_key( &staking_token_address(&self.wl_storage), )) @@ -653,13 +653,13 @@ where // Run rewards PD controller let pos_controller = inflation::RewardsController { - locked_tokens: pos_locked_supply, - total_tokens, - total_native_tokens: total_tokens, + locked_tokens: pos_locked_supply.raw_amount(), + total_tokens: total_tokens.raw_amount(), + total_native_tokens: total_tokens.raw_amount(), locked_ratio_target: pos_locked_ratio_target, locked_ratio_last: pos_last_staked_ratio, max_reward_rate: pos_max_inflation_rate, - last_inflation_amount: pos_last_inflation_amount, + last_inflation_amount: pos_last_inflation_amount.raw_amount(), p_gain_nom: pos_p_gain_nom, d_gain_nom: pos_d_gain_nom, epochs_per_year, @@ -689,6 +689,9 @@ where // for the previous epoch // // TODO: think about changing the reward to Decimal + let inflation = token::Amount::from_uint(inflation, 0) + .expect("Should not fail Uint -> Amount conversion"); + let mut reward_tokens_remaining = inflation; let mut new_rewards_products: HashMap = HashMap::new(); diff --git a/core/src/ledger/inflation.rs b/core/src/ledger/inflation.rs index 46e2c7ba8f..3e1902a445 100644 --- a/core/src/ledger/inflation.rs +++ b/core/src/ledger/inflation.rs @@ -3,7 +3,7 @@ //! funding. use crate::types::dec::Dec; -use crate::types::token; +use crate::types::uint::Uint; /// The domains of inflation pub enum RewardsType { @@ -19,18 +19,18 @@ pub enum RewardsType { #[allow(missing_docs)] pub struct ValsToUpdate { pub locked_ratio: Dec, - pub inflation: token::Amount, + pub inflation: Uint, } /// PD controller used to dynamically adjust the rewards rates #[derive(Debug, Clone)] pub struct RewardsController { /// Locked token amount in the relevant system - pub locked_tokens: token::Amount, + pub locked_tokens: Uint, /// Total token supply - pub total_tokens: token::Amount, + pub total_tokens: Uint, /// Total native token supply - pub total_native_tokens: token::Amount, + pub total_native_tokens: Uint, /// PD target locked ratio pub locked_ratio_target: Dec, /// PD last locked ratio @@ -38,7 +38,7 @@ pub struct RewardsController { /// Maximum reward rate pub max_reward_rate: Dec, /// Last inflation amount - pub last_inflation_amount: token::Amount, + pub last_inflation_amount: Uint, /// Nominal proportional gain pub p_gain_nom: Dec, /// Nominal derivative gain @@ -65,12 +65,12 @@ impl RewardsController { // Token amounts must be expressed in terms of the raw amount (namnam) // to properly run the PD controller - let locked = Dec::try_from(locked_tokens.raw_amount()) - .expect("Should not fail to convert token Amount to Dec"); - let total = Dec::try_from(total_tokens.raw_amount()) - .expect("Should not fail to convert token Amount to Dec"); - let total_native = Dec::try_from(total_native_tokens.raw_amount()) - .expect("Should not fail to convert token Amount to Dec"); + let locked = Dec::try_from(locked_tokens) + .expect("Should not fail to convert Uint to Dec"); + let total = Dec::try_from(total_tokens) + .expect("Should not fail to convert Uint to Dec"); + let total_native = Dec::try_from(total_native_tokens) + .expect("Should not fail to convert Uint to Dec"); let epochs_py: Dec = epochs_per_year.into(); let locked_ratio = locked / total; @@ -82,29 +82,20 @@ impl RewardsController { let delta_error = locked_ratio_last - locked_ratio; let control_val = p_gain * error - d_gain * delta_error; - let last_inflation_amount = - Dec::try_from(last_inflation_amount.raw_amount()) - .expect("Should not fail to convert token Amount to Dec"); + let last_inflation_amount = Dec::try_from(last_inflation_amount) + .expect("Should not fail to convert Uint to Dec"); let new_inflation_amount_raw = last_inflation_amount + control_val; let new_inflation_amount = if new_inflation_amount_raw.is_negative() { - token::Amount::zero() + Uint::zero() } else { - token::Amount::from_uint( - new_inflation_amount_raw - .to_uint() - .expect("Should not fail to convert Dec to Uint"), - 0, - ) - .expect("Should not fail to convert Uint to Amount") + new_inflation_amount_raw + .to_uint() + .expect("Should not fail to convert Dec to Uint") }; - let max_inflation = token::Amount::from_uint( - max_inflation - .to_uint() - .expect("Should not fail to convert Dec to Uint"), - 0, - ) - .expect("Should not fail to convert Uint to Amount"); + let max_inflation = max_inflation + .to_uint() + .expect("Should not fail to convert Dec to Uint"); let inflation = std::cmp::min(new_inflation_amount, max_inflation); ValsToUpdate { @@ -119,30 +110,17 @@ mod test { use std::str::FromStr; use super::*; - use crate::types::token::NATIVE_MAX_DECIMAL_PLACES; #[test] fn test_inflation_calc_up() { let mut controller = RewardsController { - locked_tokens: token::Amount::from_uint( - 2_000, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), - total_tokens: token::Amount::from_uint( - 4_000, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), - total_native_tokens: token::Amount::from_uint( - 4_000, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), + locked_tokens: Uint::from(2_000_000_000), + total_tokens: Uint::from(4_000_000_000_u64), + total_native_tokens: Uint::from(4_000_000_000_u64), locked_ratio_target: Dec::from_str("0.66666666").unwrap(), locked_ratio_last: Dec::from_str("0.5").unwrap(), max_reward_rate: Dec::from_str("0.1").unwrap(), - last_inflation_amount: token::Amount::zero(), + last_inflation_amount: Uint::zero(), p_gain_nom: Dec::from_str("0.1").unwrap(), d_gain_nom: Dec::from_str("0.1").unwrap(), epochs_per_year: 365, @@ -154,11 +132,10 @@ mod test { inflation: inflation_0, } = controller.clone().run(); println!( - "Round 0: Locked ratio: {locked_ratio_0}, inflation: {}", - inflation_0.to_string_native() + "Round 0: Locked ratio: {locked_ratio_0}, inflation: {inflation_0}" ); assert_eq!(locked_ratio_0, Dec::from_str("0.5").unwrap()); - assert_eq!(inflation_0, token::Amount::from_uint(18_264, 0).unwrap()); + assert_eq!(inflation_0, Uint::from(18_264)); controller.locked_ratio_last = locked_ratio_0; controller.last_inflation_amount = inflation_0; @@ -170,13 +147,12 @@ mod test { inflation: inflation_1, } = controller.clone().run(); println!( - "Round 1: Locked ratio: {locked_ratio_1}, inflation: {}", - inflation_1.to_string_native() + "Round 1: Locked ratio: {locked_ratio_1}, inflation: {inflation_1}" ); assert!(locked_ratio_1 > locked_ratio_0); assert!(locked_ratio_1 > Dec::from_str("0.5").unwrap()); assert!(locked_ratio_1 < Dec::from_str("0.51").unwrap()); - assert_eq!(inflation_1, token::Amount::from_uint(36_528, 0).unwrap()); + assert_eq!(inflation_1, Uint::from(36_528)); controller.locked_ratio_last = locked_ratio_1; controller.last_inflation_amount = inflation_1; @@ -188,37 +164,24 @@ mod test { inflation: inflation_2, } = controller.run(); println!( - "Round 2: Locked ratio: {locked_ratio_2}, inflation: {}", - inflation_2.to_string_native() + "Round 2: Locked ratio: {locked_ratio_2}, inflation: {inflation_2}", ); assert!(locked_ratio_2 > locked_ratio_1); assert!(locked_ratio_2 > Dec::from_str("0.5").unwrap()); assert!(locked_ratio_2 < Dec::from_str("0.51").unwrap()); - assert_eq!(inflation_2, token::Amount::from_uint(54_792, 0).unwrap()); + assert_eq!(inflation_2, Uint::from(54_792)); } #[test] fn test_inflation_calc_down() { let mut controller = RewardsController { - locked_tokens: token::Amount::from_uint( - 900, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), - total_tokens: token::Amount::from_uint( - 1_000, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), - total_native_tokens: token::Amount::from_uint( - 1_000, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), + locked_tokens: Uint::from(900_000_000), + total_tokens: Uint::from(1_000_000_000), + total_native_tokens: Uint::from(1_000_000_000), locked_ratio_target: Dec::from_str("0.66666666").unwrap(), locked_ratio_last: Dec::from_str("0.9").unwrap(), max_reward_rate: Dec::from_str("0.1").unwrap(), - last_inflation_amount: token::Amount::from_uint(10_000, 0).unwrap(), + last_inflation_amount: Uint::from(10_000), p_gain_nom: Dec::from_str("0.1").unwrap(), d_gain_nom: Dec::from_str("0.1").unwrap(), epochs_per_year: 365, @@ -230,11 +193,10 @@ mod test { inflation: inflation_0, } = controller.clone().run(); println!( - "Round 0: Locked ratio: {locked_ratio_0}, inflation: {}", - inflation_0.to_string_native() + "Round 0: Locked ratio: {locked_ratio_0}, inflation: {inflation_0}", ); assert_eq!(locked_ratio_0, Dec::from_str("0.9").unwrap()); - assert_eq!(inflation_0, token::Amount::from_uint(3_607, 0).unwrap()); + assert_eq!(inflation_0, Uint::from(3_607)); controller.locked_ratio_last = locked_ratio_0; controller.last_inflation_amount = inflation_0; @@ -246,13 +208,12 @@ mod test { inflation: inflation_1, } = controller.clone().run(); println!( - "Round 1: Locked ratio: {locked_ratio_1}, inflation: {}", - inflation_1.to_string_native() + "Round 1: Locked ratio: {locked_ratio_1}, inflation: {inflation_1}", ); assert!(locked_ratio_1 > locked_ratio_0); assert!(locked_ratio_1 > Dec::from_str("0.9").unwrap()); assert!(locked_ratio_1 < Dec::from_str("0.91").unwrap()); - assert_eq!(inflation_1, token::Amount::zero()); + assert_eq!(inflation_1, Uint::zero()); controller.locked_ratio_last = locked_ratio_1; controller.last_inflation_amount = inflation_1; @@ -264,10 +225,9 @@ mod test { inflation: inflation_2, } = controller.run(); println!( - "Round 2: Locked ratio: {locked_ratio_2}, inflation: {}", - inflation_2.to_string_native() + "Round 2: Locked ratio: {locked_ratio_2}, inflation: {inflation_2}", ); assert_eq!(locked_ratio_2, locked_ratio_1); - assert_eq!(inflation_2, token::Amount::zero()); + assert_eq!(inflation_2, Uint::zero()); } } diff --git a/core/src/ledger/storage/masp_conversions.rs b/core/src/ledger/storage/masp_conversions.rs index d9b13526fe..7a4b6b7701 100644 --- a/core/src/ledger/storage/masp_conversions.rs +++ b/core/src/ledger/storage/masp_conversions.rs @@ -102,13 +102,13 @@ where // Creating the PD controller for handing out tokens let controller = RewardsController { - locked_tokens: total_token_in_masp, - total_tokens, - total_native_tokens, + locked_tokens: total_token_in_masp.raw_amount(), + total_tokens: total_tokens.raw_amount(), + total_native_tokens: total_native_tokens.raw_amount(), locked_ratio_target: locked_target_ratio, locked_ratio_last: last_locked_ratio, max_reward_rate, - last_inflation_amount: last_inflation, + last_inflation_amount: last_inflation.raw_amount(), p_gain_nom: kp_gain_nom, d_gain_nom: kd_gain_nom, epochs_per_year, @@ -127,8 +127,7 @@ where 0u128 } else { crate::types::uint::Uint::try_into( - (inflation.raw_amount() - * crate::types::uint::Uint::from(precision)) + (inflation * crate::types::uint::Uint::from(precision)) / total_token_in_masp.raw_amount(), ) .unwrap() From 2e58240c37b0830cb312e26c83779e878e72feeb Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 18 Oct 2023 23:40:11 -0400 Subject: [PATCH 146/161] process_slashes: fix critical bug --- proof_of_stake/src/lib.rs | 4 +--- proof_of_stake/src/tests/state_machine.rs | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 35ea602aba..65f8e83996 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -4554,9 +4554,7 @@ where // `updatedSlashedAmountMap` let validator_slashes = slashed_amount_map.entry(validator.clone()).or_default(); - for (epoch, slash) in result_slash { - *validator_slashes.entry(epoch).or_default() += slash; - } + *validator_slashes = result_slash; // `outgoingRedelegation` let outgoing_redelegations = diff --git a/proof_of_stake/src/tests/state_machine.rs b/proof_of_stake/src/tests/state_machine.rs index e9c4db1b3a..ce941abbbd 100644 --- a/proof_of_stake/src/tests/state_machine.rs +++ b/proof_of_stake/src/tests/state_machine.rs @@ -3929,9 +3929,7 @@ impl AbstractPosState { // `updatedSlashedAmountMap` let validator_slashes = val_slash_amounts.entry(validator.clone()).or_default(); - for (epoch, slash) in result_slash { - *validator_slashes.entry(epoch).or_default() += slash; - } + *validator_slashes = result_slash; let dest_validators = self .outgoing_redelegations From 05fbe1f798021978127df05c1d8ef8f744e7dacf Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 19 Oct 2023 11:53:22 +0200 Subject: [PATCH 147/161] Changelog #1999 --- .../unreleased/bug-fixes/1999-fix-pgf-stewards-funding.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/bug-fixes/1999-fix-pgf-stewards-funding.md diff --git a/.changelog/unreleased/bug-fixes/1999-fix-pgf-stewards-funding.md b/.changelog/unreleased/bug-fixes/1999-fix-pgf-stewards-funding.md new file mode 100644 index 0000000000..73ae0da0da --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1999-fix-pgf-stewards-funding.md @@ -0,0 +1,2 @@ +- Fixed the pgf stewards reward to be constant regardless of the number of + stewards. ([\#1999](https://github.com/anoma/namada/pull/1999)) \ No newline at end of file From b6b376dde85db9d6286799904f67b21f4f889f6a Mon Sep 17 00:00:00 2001 From: brentstone Date: Thu, 19 Oct 2023 11:27:52 -0400 Subject: [PATCH 148/161] fix bug in SMv1 test --- proof_of_stake/src/tests/state_machine.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proof_of_stake/src/tests/state_machine.rs b/proof_of_stake/src/tests/state_machine.rs index ce941abbbd..6a0f4f07dd 100644 --- a/proof_of_stake/src/tests/state_machine.rs +++ b/proof_of_stake/src/tests/state_machine.rs @@ -996,7 +996,7 @@ impl ConcretePosState { .total_unbonded .get(&id.validator) .cloned() - .unwrap(); + .unwrap_or_default(); abs_total_unbonded.retain(|_, inner_map| { inner_map.retain(|_, value| !value.is_zero()); !inner_map.is_empty() From 7877a13d6910cc79f693949a3ad10fbddf36782f Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Fri, 20 Oct 2023 08:50:08 +0200 Subject: [PATCH 149/161] Implemented a mul_div operation for Uints and reduced overflow risks in inflation computations. --- core/src/ledger/storage/masp_conversions.rs | 84 ++++-- core/src/types/uint.rs | 319 +++++++++++++++++++- 2 files changed, 372 insertions(+), 31 deletions(-) diff --git a/core/src/ledger/storage/masp_conversions.rs b/core/src/ledger/storage/masp_conversions.rs index 7a4b6b7701..69c1461e71 100644 --- a/core/src/ledger/storage/masp_conversions.rs +++ b/core/src/ledger/storage/masp_conversions.rs @@ -16,6 +16,7 @@ use crate::types::address::Address; use crate::types::dec::Dec; use crate::types::storage::Epoch; use crate::types::token::MaspDenom; +use crate::types::uint::Uint; use crate::types::{address, token}; /// A representation of the conversion state @@ -43,13 +44,15 @@ where D: 'static + super::DB + for<'iter> super::DBIter<'iter>, H: 'static + super::StorageHasher, { - let denomination = read_denom(wl_storage, addr).unwrap().unwrap(); + let denomination = read_denom(wl_storage, addr)? + .expect("failed to read token denomination"); // Inflation is implicitly denominated by this value. The lower this // figure, the less precise inflation computations are. This is especially // problematic when inflation is coming from a token with much higher // denomination than the native token. The higher this figure, the higher // the threshold of holdings required in order to receive non-zero rewards. - // This value should be fixed constant for each asset type. + // This value should be fixed constant for each asset type. Here we choose + // a thousandth of the given asset. let precision = 10u128.pow(std::cmp::max(u32::from(denomination.0), 3) - 3); let masp_addr = address::masp(); @@ -126,11 +129,21 @@ where let noterized_inflation = if total_token_in_masp.is_zero() { 0u128 } else { - crate::types::uint::Uint::try_into( - (inflation * crate::types::uint::Uint::from(precision)) - / total_token_in_masp.raw_amount(), - ) - .unwrap() + inflation + .checked_mul_div( + Uint::from(precision), + total_token_in_masp.raw_amount(), + ) + .and_then(|x| x.0.try_into().ok()) + .unwrap_or_else(|| { + tracing::warn!( + "MASP inflation for {} assumed to be 0 because the \ + computed value is too large. Please check the inflation \ + parameters.", + *addr + ); + 0u128 + }) }; tracing::debug!( @@ -159,21 +172,17 @@ where // but we should make sure the return value's ratio matches // this new inflation rate in 'update_allowed_conversions', // otherwise we will have an inaccurate view of inflation - wl_storage - .write( - &token::masp_last_inflation_key(addr), - token::Amount::from_uint( - (total_token_in_masp.raw_amount() / precision) - * crate::types::uint::Uint::from(noterized_inflation), - 0, - ) - .unwrap(), + wl_storage.write( + &token::masp_last_inflation_key(addr), + token::Amount::from_uint( + (total_token_in_masp.raw_amount() / precision) + * Uint::from(noterized_inflation), + 0, ) - .expect("unable to encode new inflation rate (Decimal)"); + .unwrap(), + )?; - wl_storage - .write(&token::masp_last_locked_ratio_key(addr), locked_ratio) - .expect("unable to encode new locked ratio (Decimal)"); + wl_storage.write(&token::masp_last_locked_ratio_key(addr), locked_ratio)?; Ok((noterized_inflation, precision)) } @@ -238,8 +247,7 @@ where let mut ref_inflation = 0; // Reward all tokens according to above reward rates for addr in &masp_reward_keys { - let reward = calculate_masp_rewards(wl_storage, addr) - .expect("Calculating the masp rewards should not fail"); + let reward = calculate_masp_rewards(wl_storage, addr)?; if *addr == native_token { // The reference inflation is the denominator of the native token // inflation, which is always a constant @@ -273,8 +281,21 @@ where // The amount that will be given of the new native token for // every amount of the native token given in the // previous epoch - let new_normed_inflation = *normed_inflation - + (*normed_inflation * reward.0) / reward.1; + let new_normed_inflation = Uint::from(*normed_inflation) + .checked_add( + (Uint::from(*normed_inflation) * Uint::from(reward.0)) + / reward.1, + ) + .and_then(|x| x.try_into().ok()) + .unwrap_or_else(|| { + tracing::warn!( + "MASP reward for {} assumed to be 0 because the \ + computed value is too large. Please check the \ + inflation parameters.", + *addr + ); + *normed_inflation + }); // The conversion is computed such that if consecutive // conversions are added together, the // intermediate native tokens cancel/ @@ -308,8 +329,19 @@ where // Express the inflation reward in real terms, that is, with // respect to the native asset in the zeroth // epoch - let real_reward = - (reward.0 * ref_inflation) / *normed_inflation; + let real_reward = ((Uint::from(reward.0) + * Uint::from(ref_inflation)) + / *normed_inflation) + .try_into() + .unwrap_or_else(|_| { + tracing::warn!( + "MASP reward for {} assumed to be 0 because the \ + computed value is too large. Please check the \ + inflation parameters.", + *addr + ); + 0u128 + }); // The conversion is computed such that if consecutive // conversions are added together, the // intermediate tokens cancel/ telescope out diff --git a/core/src/types/uint.rs b/core/src/types/uint.rs index ee14e67ad1..fbf1045ca9 100644 --- a/core/src/types/uint.rs +++ b/core/src/types/uint.rs @@ -21,10 +21,276 @@ pub const ZERO: Uint = Uint::from_u64(0); pub const ONE: Uint = Uint::from_u64(1); impl Uint { + const N_WORDS: usize = 4; + /// Convert a [`u64`] to a [`Uint`]. pub const fn from_u64(x: u64) -> Uint { Uint([x.to_le(), 0, 0, 0]) } + + /// Return the least number of bits needed to represent the number + #[inline] + pub fn bits_512(arr: &[u64; 2 * Self::N_WORDS]) -> usize { + for i in 1..arr.len() { + if arr[arr.len() - i] > 0 { + return (0x40 * (arr.len() - i + 1)) + - arr[arr.len() - i].leading_zeros() as usize; + } + } + 0x40 - arr[0].leading_zeros() as usize + } + + fn div_mod_small_512( + mut slf: [u64; 2 * Self::N_WORDS], + other: u64, + ) -> ([u64; 2 * Self::N_WORDS], Self) { + let mut rem = 0u64; + slf.iter_mut().rev().for_each(|d| { + let (q, r) = Self::div_mod_word(rem, *d, other); + *d = q; + rem = r; + }); + (slf, rem.into()) + } + + fn shr_512( + original: [u64; 2 * Self::N_WORDS], + shift: u32, + ) -> [u64; 2 * Self::N_WORDS] { + let shift = shift as usize; + let mut ret = [0u64; 2 * Self::N_WORDS]; + let word_shift = shift / 64; + let bit_shift = shift % 64; + + // shift + for i in word_shift..original.len() { + ret[i - word_shift] = original[i] >> bit_shift; + } + + // Carry + if bit_shift > 0 { + for i in word_shift + 1..original.len() { + ret[i - word_shift - 1] += original[i] << (64 - bit_shift); + } + } + + ret + } + + fn full_shl_512( + slf: [u64; 2 * Self::N_WORDS], + shift: u32, + ) -> [u64; 2 * Self::N_WORDS + 1] { + debug_assert!(shift < Self::WORD_BITS as u32); + let mut u = [0u64; 2 * Self::N_WORDS + 1]; + let u_lo = slf[0] << shift; + let u_hi = Self::shr_512(slf, Self::WORD_BITS as u32 - shift); + u[0] = u_lo; + u[1..].copy_from_slice(&u_hi[..]); + u + } + + fn full_shr_512( + u: [u64; 2 * Self::N_WORDS + 1], + shift: u32, + ) -> [u64; 2 * Self::N_WORDS] { + debug_assert!(shift < Self::WORD_BITS as u32); + let mut res = [0; 2 * Self::N_WORDS]; + for i in 0..res.len() { + res[i] = u[i] >> shift; + } + // carry + if shift > 0 { + for i in 1..=res.len() { + res[i - 1] |= u[i] << (Self::WORD_BITS as u32 - shift); + } + } + res + } + + // See Knuth, TAOCP, Volume 2, section 4.3.1, Algorithm D. + fn div_mod_knuth_512( + slf: [u64; 2 * Self::N_WORDS], + mut v: Self, + n: usize, + m: usize, + ) -> ([u64; 2 * Self::N_WORDS], Self) { + debug_assert!(Self::bits_512(&slf) >= v.bits() && !v.fits_word()); + debug_assert!(n + m <= slf.len()); + // D1. + // Make sure 64th bit in v's highest word is set. + // If we shift both self and v, it won't affect the quotient + // and the remainder will only need to be shifted back. + let shift = v.0[n - 1].leading_zeros(); + v <<= shift; + // u will store the remainder (shifted) + let mut u = Self::full_shl_512(slf, shift); + + // quotient + let mut q = [0; 2 * Self::N_WORDS]; + let v_n_1 = v.0[n - 1]; + let v_n_2 = v.0[n - 2]; + + // D2. D7. + // iterate from m downto 0 + for j in (0..=m).rev() { + let u_jn = u[j + n]; + + // D3. + // q_hat is our guess for the j-th quotient digit + // q_hat = min(b - 1, (u_{j+n} * b + u_{j+n-1}) / v_{n-1}) + // b = 1 << WORD_BITS + // Theorem B: q_hat >= q_j >= q_hat - 2 + let mut q_hat = if u_jn < v_n_1 { + let (mut q_hat, mut r_hat) = + Self::div_mod_word(u_jn, u[j + n - 1], v_n_1); + // this loop takes at most 2 iterations + loop { + // check if q_hat * v_{n-2} > b * r_hat + u_{j+n-2} + let (hi, lo) = + Self::split_u128(u128::from(q_hat) * u128::from(v_n_2)); + if (hi, lo) <= (r_hat, u[j + n - 2]) { + break; + } + // then iterate till it doesn't hold + q_hat -= 1; + let (new_r_hat, overflow) = r_hat.overflowing_add(v_n_1); + r_hat = new_r_hat; + // if r_hat overflowed, we're done + if overflow { + break; + } + } + q_hat + } else { + // here q_hat >= q_j >= q_hat - 1 + u64::max_value() + }; + + // ex. 20: + // since q_hat * v_{n-2} <= b * r_hat + u_{j+n-2}, + // either q_hat == q_j, or q_hat == q_j + 1 + + // D4. + // let's assume optimistically q_hat == q_j + // subtract (q_hat * v) from u[j..] + let q_hat_v = v.full_mul_u64(q_hat); + // u[j..] -= q_hat_v; + let c = Self::sub_slice(&mut u[j..], &q_hat_v[..n + 1]); + + // D6. + // actually, q_hat == q_j + 1 and u[j..] has overflowed + // highly unlikely ~ (1 / 2^63) + if c { + q_hat -= 1; + // add v to u[j..] + let c = Self::add_slice(&mut u[j..], &v.0[..n]); + u[j + n] = u[j + n].wrapping_add(u64::from(c)); + } + + // D5. + q[j] = q_hat; + } + + // D8. + let remainder = Self::full_shr_512(u, shift); + // The remainder should never exceed the capacity of Self + debug_assert!( + Self::bits_512(&remainder) <= Self::N_WORDS * Self::WORD_BITS + ); + (q, Self(remainder[..Self::N_WORDS].try_into().unwrap())) + } + + /// Returns a pair `(self / other, self % other)`. + /// + /// # Panics + /// + /// Panics if `other` is zero. + pub fn div_mod_512( + slf: [u64; 2 * Self::N_WORDS], + other: Self, + ) -> ([u64; 2 * Self::N_WORDS], Self) { + let my_bits = Self::bits_512(&slf); + let your_bits = other.bits(); + + assert!(your_bits != 0, "division by zero"); + + // Early return in case we are dividing by a larger number than us + if my_bits < your_bits { + return ( + [0; 2 * Self::N_WORDS], + Self(slf[..Self::N_WORDS].try_into().unwrap()), + ); + } + + if your_bits <= Self::WORD_BITS { + return Self::div_mod_small_512(slf, other.low_u64()); + } + + let (n, m) = { + let my_words = Self::words(my_bits); + let your_words = Self::words(your_bits); + (your_words, my_words - your_words) + }; + + Self::div_mod_knuth_512(slf, other, n, m) + } + + /// Returns a pair `(Some((self * num) / denom), (self * num) % denom)` if + /// the quotient fits into Self. Otherwise `(None, (self * num) % denom)` is + /// returned. + /// + /// # Panics + /// + /// Panics if `denom` is zero. + pub fn checked_mul_div( + &self, + num: Self, + denom: Self, + ) -> Option<(Self, Self)> { + if denom.is_zero() { + None + } else { + let prod = uint::uint_full_mul_reg!(Uint, 4, self, num); + let (quotient, remainder) = Self::div_mod_512(prod, denom); + // The compiler WILL NOT inline this if you remove this annotation. + #[inline(always)] + fn any_nonzero(arr: &[u64]) -> bool { + use uint::unroll; + unroll! { + for i in 0..4 { + if arr[i] != 0 { + return true; + } + } + } + + false + } + if any_nonzero("ient[Self::N_WORDS..]) { + None + } else { + Some(( + Self(quotient[0..Self::N_WORDS].try_into().unwrap()), + remainder, + )) + } + } + } + + /// Returns a pair `((self * num) / denom, (self * num) % denom)`. + /// + /// # Panics + /// + /// Panics if `denom` is zero. + pub fn mul_div(&self, num: Self, denom: Self) -> (Self, Self) { + let prod = uint::uint_full_mul_reg!(Uint, 4, self, num); + let (quotient, remainder) = Self::div_mod_512(prod, denom); + ( + Self(quotient[0..Self::N_WORDS].try_into().unwrap()), + remainder, + ) + } } construct_uint! { @@ -171,10 +437,10 @@ impl Uint { /// * `self` * 10^(`denom`) overflows 256 bits /// * `other` is zero (`checked_div` will return `None`). pub fn fixed_precision_div(&self, rhs: &Self, denom: u8) -> Option { - let lhs = Uint::from(10) + Uint::from(10) .checked_pow(Uint::from(denom)) - .and_then(|res| res.checked_mul(*self))?; - lhs.checked_div(*rhs) + .and_then(|res| res.checked_mul_div(*self, *rhs)) + .map(|x| x.0) } /// Compute the two's complement of a number. @@ -554,6 +820,8 @@ impl TryFrom for i128 { #[cfg(test)] mod test_uint { + use std::str::FromStr; + use super::*; /// Test that dividing two [`Uint`]s with the specified precision @@ -581,8 +849,14 @@ mod test_uint { two.fixed_precision_div(&three, 3).expect("Satan lives"), Uint::from(666) ); - assert!(two.fixed_precision_div(&three, 77).is_none()); - assert!(Uint::from(20).fixed_precision_div(&three, 76).is_none()); + assert_eq!( + two.fixed_precision_div(&three, 77).expect("Test failed"), + Uint::from_str("9363ff047551e60c314a09cf62a269d471bafcf44a8c6aaaaaaaaaaaaaaaaaaa").unwrap() + ); + assert_eq!( + Uint::from(20).fixed_precision_div(&three, 76).expect("Test failed"), + Uint::from_str("9363ff047551e60c314a09cf62a269d471bafcf44a8c6aaaaaaaaaaaaaaaaaaa").unwrap() + ); } /// Test that adding one to the max signed @@ -710,4 +984,39 @@ mod test_uint { let amount: Result = serde_json::from_str(r#""1000000000.2""#); assert!(amount.is_err()); } + + #[test] + fn test_mul_div() { + use std::str::FromStr; + let a: Uint = Uint::from_str( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + ).unwrap(); + let b: Uint = Uint::from_str( + "0x8000000000000000000000000000000000000000000000000000000000000000", + ).unwrap(); + let c: Uint = Uint::from_str( + "0x4000000000000000000000000000000000000000000000000000000000000000", + ).unwrap(); + let d: Uint = Uint::from_str( + "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + ).unwrap(); + let e: Uint = Uint::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ).unwrap(); + let f: Uint = Uint::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ).unwrap(); + assert_eq!(a.mul_div(a, a), (a, Uint::zero())); + assert_eq!(b.mul_div(c, b), (c, Uint::zero())); + assert_eq!(a.mul_div(c, b), (d, c)); + assert_eq!(a.mul_div(e, e), (a, Uint::zero())); + assert_eq!(e.mul_div(c, b), (Uint::zero(), c)); + assert_eq!(f.mul_div(a, e), (Uint::zero(), Uint::zero())); + assert_eq!(a.checked_mul_div(a, a), Some((a, Uint::zero()))); + assert_eq!(b.checked_mul_div(c, b), Some((c, Uint::zero()))); + assert_eq!(a.checked_mul_div(c, b), Some((d, c))); + assert_eq!(a.checked_mul_div(e, e), Some((a, Uint::zero()))); + assert_eq!(e.checked_mul_div(c, b), Some((Uint::zero(), c))); + assert_eq!(d.checked_mul_div(a, e), None); + } } From 2e3e64069c56a8ae5d2e65e4fddd8dcc35be5791 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Fri, 20 Oct 2023 12:08:58 +0200 Subject: [PATCH 150/161] Added changelog entry. --- .changelog/unreleased/improvements/1985-compounding-rewards.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1985-compounding-rewards.md diff --git a/.changelog/unreleased/improvements/1985-compounding-rewards.md b/.changelog/unreleased/improvements/1985-compounding-rewards.md new file mode 100644 index 0000000000..1b41c6ca20 --- /dev/null +++ b/.changelog/unreleased/improvements/1985-compounding-rewards.md @@ -0,0 +1,2 @@ +- MASP rewards are now distributed in the manner dictated by the PD-controller + ([\#1985](https://github.com/anoma/namada/pull/1985)) \ No newline at end of file From cf1db8193c71f92b1b296aa43bf39da52fbc658a Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 20 Oct 2023 15:20:23 +0200 Subject: [PATCH 151/161] evil: client from context in rpc --- apps/src/lib/client/rpc.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 80f724e56d..e6ddbff2e9 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -1121,23 +1121,23 @@ pub async fn query_proposal_result<'a>( governance_storage::get_proposal_result_key(proposal_id); let proposal_result = // Try to directly query the result in storage first - match query_storage_value(client, &proposal_result_key).await { + match query_storage_value(context.client(), &proposal_result_key).await { Ok(result) => result, Err(_) => { // If failure, run the tally - let is_author_steward = query_pgf_stewards(client) + let is_author_steward = query_pgf_stewards(context.client()) .await .iter() .any(|steward| steward.address.eq(&proposal.author)); let tally_type = proposal.get_tally_type(is_author_steward); let total_voting_power = get_total_staked_tokens( - client, + context.client(), proposal.voting_end_epoch, ) .await; let votes = compute_proposal_votes( - client, + context.client(), proposal_id, proposal.voting_end_epoch, ) From 454b3f6757857329c3c9d9e8e04f634fcdadff12 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 20 Oct 2023 16:04:18 +0200 Subject: [PATCH 152/161] evil: fixes missing test import and `dry_run` parameter --- .../lib/node/ledger/shell/finalize_block.rs | 260 ++++++++++-------- apps/src/lib/node/ledger/shell/mod.rs | 1 + shared/src/ledger/mod.rs | 1 + 3 files changed, 154 insertions(+), 108 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index baa6075be5..61fa9ed34f 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -2054,9 +2054,11 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); - assert!(rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap() + ); // FINALIZE BLOCK 2. Tell Namada that val1 is the block proposer. // Include votes that correspond to block 1. Make val2 the next block's @@ -2066,9 +2068,11 @@ mod test_finalize_block { assert!(rewards_prod_2.is_empty(&shell.wl_storage).unwrap()); assert!(rewards_prod_3.is_empty(&shell.wl_storage).unwrap()); assert!(rewards_prod_4.is_empty(&shell.wl_storage).unwrap()); - assert!(!rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + !rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap() + ); // Val1 was the proposer, so its reward should be larger than all // others, which should themselves all be equal let acc_sum = get_rewards_sum(&shell.wl_storage); @@ -2182,9 +2186,11 @@ mod test_finalize_block { None, ); } - assert!(rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap() + ); let rp1 = rewards_prod_1 .get(&shell.wl_storage, &Epoch::default()) .unwrap() @@ -2269,21 +2275,25 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check transaction's hash in storage - assert!(shell - .shell - .wl_storage - .write_log - .has_replay_protection_entry(&decrypted_tx.header_hash()) - .unwrap_or_default()); + assert!( + shell + .shell + .wl_storage + .write_log + .has_replay_protection_entry(&decrypted_tx.header_hash()) + .unwrap_or_default() + ); // Check that the hash is present in the merkle tree - assert!(!shell - .shell - .wl_storage - .storage - .block - .tree - .has_key(&decrypted_hash_key) - .unwrap()); + assert!( + !shell + .shell + .wl_storage + .storage + .block + .tree + .has_key(&decrypted_hash_key) + .unwrap() + ); } /// Test replay protection hash handling @@ -2418,36 +2428,48 @@ mod test_finalize_block { .as_str(); assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); - assert!(shell - .wl_storage - .write_log - .has_replay_protection_entry(&invalid_wrapper_hash) - .unwrap_or_default()); - assert!(!shell - .wl_storage - .write_log - .has_replay_protection_entry(&decrypted_3_hash) - .unwrap_or_default()); - assert!(!shell - .wl_storage - .write_log - .has_replay_protection_entry(&decrypted_hash) - .unwrap_or_default()); - assert!(shell - .wl_storage - .write_log - .has_replay_protection_entry(&wrapper_hash) - .unwrap_or_default()); - assert!(shell - .wl_storage - .storage - .has_replay_protection_entry(&decrypted_2_hash) - .expect("test failed")); - assert!(!shell - .wl_storage - .write_log - .has_replay_protection_entry(&wrapper_2_hash) - .unwrap_or_default()); + assert!( + shell + .wl_storage + .write_log + .has_replay_protection_entry(&invalid_wrapper_hash) + .unwrap_or_default() + ); + assert!( + !shell + .wl_storage + .write_log + .has_replay_protection_entry(&decrypted_3_hash) + .unwrap_or_default() + ); + assert!( + !shell + .wl_storage + .write_log + .has_replay_protection_entry(&decrypted_hash) + .unwrap_or_default() + ); + assert!( + shell + .wl_storage + .write_log + .has_replay_protection_entry(&wrapper_hash) + .unwrap_or_default() + ); + assert!( + shell + .wl_storage + .storage + .has_replay_protection_entry(&decrypted_2_hash) + .expect("test failed") + ); + assert!( + !shell + .wl_storage + .write_log + .has_replay_protection_entry(&wrapper_2_hash) + .unwrap_or_default() + ); } // Test that if the fee payer doesn't have enough funds for fee payment the @@ -2735,9 +2757,11 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Consensus) ); - assert!(enqueued_slashes_handle() - .at(&Epoch::default()) - .is_empty(&shell.wl_storage)?); + assert!( + enqueued_slashes_handle() + .at(&Epoch::default()) + .is_empty(&shell.wl_storage)? + ); assert_eq!( get_num_consensus_validators(&shell.wl_storage, Epoch::default()) .unwrap(), @@ -2756,17 +2780,21 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Jailed) ); - assert!(enqueued_slashes_handle() - .at(&epoch) - .is_empty(&shell.wl_storage)?); + assert!( + enqueued_slashes_handle() + .at(&epoch) + .is_empty(&shell.wl_storage)? + ); assert_eq!( get_num_consensus_validators(&shell.wl_storage, epoch).unwrap(), 5_u64 ); } - assert!(!enqueued_slashes_handle() - .at(&processing_epoch) - .is_empty(&shell.wl_storage)?); + assert!( + !enqueued_slashes_handle() + .at(&processing_epoch) + .is_empty(&shell.wl_storage)? + ); // Advance to the processing epoch loop { @@ -2789,9 +2817,11 @@ mod test_finalize_block { // println!("Reached processing epoch"); break; } else { - assert!(enqueued_slashes_handle() - .at(&shell.wl_storage.storage.block.epoch) - .is_empty(&shell.wl_storage)?); + assert!( + enqueued_slashes_handle() + .at(&shell.wl_storage.storage.block.epoch) + .is_empty(&shell.wl_storage)? + ); let stake1 = read_validator_stake( &shell.wl_storage, ¶ms, @@ -3350,13 +3380,15 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); - assert!(namada_proof_of_stake::is_validator_frozen( - &shell.wl_storage, - &val1.address, - current_epoch, - ¶ms - ) - .unwrap()); + assert!( + namada_proof_of_stake::is_validator_frozen( + &shell.wl_storage, + &val1.address, + current_epoch, + ¶ms + ) + .unwrap() + ); assert!( namada_proof_of_stake::validator_slashes_handle(&val1.address) .is_empty(&shell.wl_storage) @@ -3865,14 +3897,12 @@ mod test_finalize_block { assert!(!consensus_val_set.at(&ep).is_empty(storage).unwrap()); // assert!(!below_cap_val_set.at(&ep).is_empty(storage). // unwrap()); - assert!(!validator_positions - .at(&ep) - .is_empty(storage) - .unwrap()); - assert!(!all_validator_addresses - .at(&ep) - .is_empty(storage) - .unwrap()); + assert!( + !validator_positions.at(&ep).is_empty(storage).unwrap() + ); + assert!( + !all_validator_addresses.at(&ep).is_empty(storage).unwrap() + ); } }; @@ -3911,25 +3941,33 @@ mod test_finalize_block { Epoch(1), Epoch(params.pipeline_len + default_past_epochs + 1), ); - assert!(!consensus_val_set - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap()); - assert!(validator_positions - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap()); - assert!(all_validator_addresses - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + !consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + assert!( + validator_positions + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + assert!( + all_validator_addresses + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); // Advance to the epoch `consensus_val_set_len` + 1 loop { - assert!(!consensus_val_set - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + !consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); let votes = get_default_true_votes( &shell.wl_storage, shell.wl_storage.storage.block.epoch, @@ -3940,10 +3978,12 @@ mod test_finalize_block { } } - assert!(consensus_val_set - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); // Advance one more epoch let votes = get_default_true_votes( @@ -3952,19 +3992,23 @@ mod test_finalize_block { ); current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); for ep in Epoch::default().iter_range(2) { - assert!(consensus_val_set - .at(&ep) - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + consensus_val_set + .at(&ep) + .is_empty(&shell.wl_storage) + .unwrap() + ); } for ep in Epoch::iter_bounds_inclusive( Epoch(2), current_epoch + params.pipeline_len, ) { - assert!(!consensus_val_set - .at(&ep) - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + !consensus_val_set + .at(&ep) + .is_empty(&shell.wl_storage) + .unwrap() + ); } Ok(()) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 01f7c14fcf..ddb47acbfd 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -2130,6 +2130,7 @@ mod test_utils { #[cfg(test)] mod shell_tests { + use namada::core::ledger::replay_protection; use namada::proto::{ Code, Data, Section, SignableEthMessage, Signature, Signed, Tx, }; diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 831aeab536..62e460f099 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -59,6 +59,7 @@ where let mut tx_gas_meter = TxGasMeter::new(wrapper.gas_limit.to_owned()); protocol::apply_wrapper_tx( + tx.clone(), &wrapper, None, &request.data, From 44633e28af359e1b87863b9fa7fb81e7702ec4b0 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 30 Aug 2023 20:09:51 +0200 Subject: [PATCH 153/161] Removes redundant signature on `Code` and `Data` --- .../lib/node/ledger/shell/finalize_block.rs | 143 +++++++------ apps/src/lib/node/ledger/shell/mod.rs | 5 +- .../lib/node/ledger/shell/prepare_proposal.rs | 2 +- .../lib/node/ledger/shell/process_proposal.rs | 28 +-- benches/lib.rs | 4 +- core/src/proto/types.rs | 33 +-- core/src/types/transaction/mod.rs | 2 +- core/src/types/transaction/protocol.rs | 6 +- shared/src/ledger/native_vp/ibc/mod.rs | 151 ++++++-------- shared/src/vm/host_env.rs | 2 +- tests/src/vm_host_env/mod.rs | 12 +- vp_prelude/src/lib.rs | 10 +- wasm/checksums.json | 40 ++-- wasm/wasm_source/src/vp_implicit.rs | 62 ++---- wasm/wasm_source/src/vp_testnet_faucet.rs | 6 +- wasm/wasm_source/src/vp_user.rs | 196 ++++++++---------- wasm/wasm_source/src/vp_validator.rs | 100 ++++----- 17 files changed, 353 insertions(+), 449 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 14c24addce..fa329ae681 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -2074,9 +2074,11 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); - assert!(rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap() + ); // FINALIZE BLOCK 2. Tell Namada that val1 is the block proposer. // Include votes that correspond to block 1. Make val2 the next block's @@ -2086,9 +2088,11 @@ mod test_finalize_block { assert!(rewards_prod_2.is_empty(&shell.wl_storage).unwrap()); assert!(rewards_prod_3.is_empty(&shell.wl_storage).unwrap()); assert!(rewards_prod_4.is_empty(&shell.wl_storage).unwrap()); - assert!(!rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + !rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap() + ); // Val1 was the proposer, so its reward should be larger than all // others, which should themselves all be equal let acc_sum = get_rewards_sum(&shell.wl_storage); @@ -2202,9 +2206,11 @@ mod test_finalize_block { None, ); } - assert!(rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap() + ); let rp1 = rewards_prod_1 .get(&shell.wl_storage, &Epoch::default()) .unwrap() @@ -2259,11 +2265,8 @@ mod test_finalize_block { let wrapper_hash_key = replay_protection::get_replay_protection_key( &wrapper_tx.header_hash(), ); - let mut decrypted_tx = wrapper_tx; - - decrypted_tx.update_header(TxType::Raw); let decrypted_hash_key = replay_protection::get_replay_protection_key( - &decrypted_tx.header_hash(), + &wrapper_tx.raw_header_hash(), ); // merkle tree root before finalize_block @@ -2294,22 +2297,26 @@ mod test_finalize_block { assert!(shell.shell.wl_storage.has_key(&wrapper_hash_key).unwrap()); assert!(shell.shell.wl_storage.has_key(&decrypted_hash_key).unwrap()); // Check that non of the hashes is present in the merkle tree - assert!(!shell - .shell - .wl_storage - .storage - .block - .tree - .has_key(&wrapper_hash_key) - .unwrap()); - assert!(!shell - .shell - .wl_storage - .storage - .block - .tree - .has_key(&decrypted_hash_key) - .unwrap()); + assert!( + !shell + .shell + .wl_storage + .storage + .block + .tree + .has_key(&wrapper_hash_key) + .unwrap() + ); + assert!( + !shell + .shell + .wl_storage + .storage + .block + .tree + .has_key(&decrypted_hash_key) + .unwrap() + ); } /// Test that if a decrypted transaction fails because of out-of-gas, its @@ -2380,10 +2387,12 @@ mod test_finalize_block { let code = event.attributes.get("code").expect("Testfailed").as_str(); assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); - assert!(!shell - .wl_storage - .has_key(&inner_hash_key) - .expect("Test failed")) + assert!( + !shell + .wl_storage + .has_key(&inner_hash_key) + .expect("Test failed") + ) } #[test] @@ -2444,14 +2453,18 @@ mod test_finalize_block { let code = event.attributes.get("code").expect("Testfailed").as_str(); assert_eq!(code, String::from(ErrorCodes::InvalidTx).as_str()); - assert!(shell - .wl_storage - .has_key(&wrapper_hash_key) - .expect("Test failed")); - assert!(!shell - .wl_storage - .has_key(&inner_hash_key) - .expect("Test failed")) + assert!( + shell + .wl_storage + .has_key(&wrapper_hash_key) + .expect("Test failed") + ); + assert!( + !shell + .wl_storage + .has_key(&inner_hash_key) + .expect("Test failed") + ) } // Test that if the fee payer doesn't have enough funds for fee payment the @@ -2738,9 +2751,11 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Consensus) ); - assert!(enqueued_slashes_handle() - .at(&Epoch::default()) - .is_empty(&shell.wl_storage)?); + assert!( + enqueued_slashes_handle() + .at(&Epoch::default()) + .is_empty(&shell.wl_storage)? + ); assert_eq!( get_num_consensus_validators(&shell.wl_storage, Epoch::default()) .unwrap(), @@ -2759,17 +2774,21 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Jailed) ); - assert!(enqueued_slashes_handle() - .at(&epoch) - .is_empty(&shell.wl_storage)?); + assert!( + enqueued_slashes_handle() + .at(&epoch) + .is_empty(&shell.wl_storage)? + ); assert_eq!( get_num_consensus_validators(&shell.wl_storage, epoch).unwrap(), 5_u64 ); } - assert!(!enqueued_slashes_handle() - .at(&processing_epoch) - .is_empty(&shell.wl_storage)?); + assert!( + !enqueued_slashes_handle() + .at(&processing_epoch) + .is_empty(&shell.wl_storage)? + ); // Advance to the processing epoch loop { @@ -2792,9 +2811,11 @@ mod test_finalize_block { // println!("Reached processing epoch"); break; } else { - assert!(enqueued_slashes_handle() - .at(&shell.wl_storage.storage.block.epoch) - .is_empty(&shell.wl_storage)?); + assert!( + enqueued_slashes_handle() + .at(&shell.wl_storage.storage.block.epoch) + .is_empty(&shell.wl_storage)? + ); let stake1 = read_validator_stake( &shell.wl_storage, ¶ms, @@ -3340,13 +3361,15 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); - assert!(namada_proof_of_stake::is_validator_frozen( - &shell.wl_storage, - &val1.address, - current_epoch, - ¶ms - ) - .unwrap()); + assert!( + namada_proof_of_stake::is_validator_frozen( + &shell.wl_storage, + &val1.address, + current_epoch, + ¶ms + ) + .unwrap() + ); assert!( namada_proof_of_stake::validator_slashes_handle(&val1.address) .is_empty(&shell.wl_storage) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 5c393ad85c..bfaf513530 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -922,7 +922,6 @@ where pub fn replay_protection_checks( &self, wrapper: &Tx, - tx_bytes: &[u8], temp_wl_storage: &mut TempWlStorage, ) -> Result<()> { let inner_tx_hash = wrapper.raw_header_hash(); @@ -944,9 +943,7 @@ where .write(&inner_hash_key, vec![]) .expect("Couldn't write inner transaction hash to write log"); - let tx = - Tx::try_from(tx_bytes).expect("Deserialization shouldn't fail"); - let wrapper_hash = tx.header_hash(); + let wrapper_hash = wrapper.header_hash(); let wrapper_hash_key = replay_protection::get_replay_protection_key(&wrapper_hash); if temp_wl_storage diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 10f9cbc6c1..986bf32bbe 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -246,7 +246,7 @@ where tx_gas_meter.add_tx_size_gas(tx_bytes).map_err(|_| ())?; // Check replay protection - self.replay_protection_checks(&tx, tx_bytes, temp_wl_storage) + self.replay_protection_checks(&tx, temp_wl_storage) .map_err(|_| ())?; // Check fees diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index e79d3dba42..3b75de9948 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -842,11 +842,9 @@ where } } else { // Replay protection checks - if let Err(e) = self.replay_protection_checks( - &tx, - tx_bytes, - temp_wl_storage, - ) { + if let Err(e) = + self.replay_protection_checks(&tx, temp_wl_storage) + { return TxResult { code: ErrorCodes::ReplayTx.into(), info: e.to_string(), @@ -1088,11 +1086,13 @@ mod test_process_proposal { shell.chain_id.clone(), ) .to_bytes(); - assert!(shell - .process_proposal(ProcessProposal { - txs: vec![tx.clone(), tx] - }) - .is_err()); + assert!( + shell + .process_proposal(ProcessProposal { + txs: vec![tx.clone(), tx] + }) + .is_err() + ); } #[cfg(feature = "abcipp")] @@ -1249,9 +1249,11 @@ mod test_process_proposal { sig, } .sign(shell.mode.get_protocol_key().expect("Test failed")); - let mut txs = vec![EthereumTxData::BridgePool(vote_ext.into()) - .sign(protocol_key, shell.chain_id.clone()) - .to_bytes()]; + let mut txs = vec![ + EthereumTxData::BridgePool(vote_ext.into()) + .sign(protocol_key, shell.chain_id.clone()) + .to_bytes(), + ]; let event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), diff --git a/benches/lib.rs b/benches/lib.rs index 47645abdf4..91e65ec6ff 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -452,7 +452,7 @@ pub fn generate_tx( if let Some(signer) = signer { tx.add_section(Section::Signature(Signature::new( - tx.sechashes(), + vec![tx.raw_header_hash()], [(0, signer.clone())].into_iter().collect(), None, ))); @@ -495,7 +495,7 @@ pub fn generate_foreign_key_tx(signer: &SecretKey) -> Tx { .unwrap(), )); tx.add_section(Section::Signature(Signature::new( - tx.sechashes(), + vec![tx.raw_header_hash()], [(0, signer.clone())].into_iter().collect(), None, ))); diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 5bd16d61cb..98839ccf98 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -1250,7 +1250,7 @@ impl Tx { Section::Header(self.header.clone()).get_hash() } - /// Gets the hash of the raw transaction's header + /// Gets the hash of the decrypted transaction's header pub fn raw_header_hash(&self) -> crate::types::hash::Hash { let mut raw_header = self.header(); raw_header.tx_type = TxType::Raw; @@ -1280,6 +1280,10 @@ impl Tx { ) -> Option> { if self.header_hash() == *hash { return Some(Cow::Owned(Section::Header(self.header.clone()))); + } else if self.raw_header_hash() == *hash { + let mut header = self.header(); + header.tx_type = TxType::Raw; + return Some(Cow::Owned(Section::Header(header))); } for section in &self.sections { if section.get_hash() == *hash { @@ -1366,20 +1370,6 @@ impl Tx { bytes } - /// Get the inner section hashes - pub fn inner_section_targets(&self) -> Vec { - let mut sections_hashes = self - .sections - .iter() - .filter_map(|section| match section { - Section::Data(_) | Section::Code(_) => Some(section.get_hash()), - _ => None, - }) - .collect::>(); - sections_hashes.sort(); - sections_hashes - } - /// Verify that the section with the given hash has been signed by the given /// public key pub fn verify_signatures( @@ -1494,8 +1484,7 @@ impl Tx { public_keys_index_map: &AccountPublicKeysMap, signer: Option

, ) -> Vec { - let mut targets = vec![self.header_hash()]; - targets.extend(self.inner_section_targets()); + let targets = vec![self.raw_header_hash()]; let mut signatures = Vec::new(); let section = Signature::new( targets, @@ -1773,11 +1762,9 @@ impl Tx { account_public_keys_map: AccountPublicKeysMap, signer: Option
, ) -> &mut Self { - // The inner tx signer signs the Raw version of the Header - let mut hashes = vec![self.raw_header_hash()]; + // The inner tx signer signs the Decrypted version of the Header + let hashes = vec![self.raw_header_hash()]; self.protocol_filter(); - let sections_hashes = self.inner_section_targets(); - hashes.extend(sections_hashes); self.add_section(Section::Signature(Signature::new( hashes, @@ -1794,7 +1781,7 @@ impl Tx { ) -> &mut Self { self.protocol_filter(); let mut pk_section = Signature { - targets: self.inner_section_targets(), + targets: vec![self.raw_header_hash()], signatures: BTreeMap::new(), signer: Signer::PubKeys(vec![]), }; @@ -1805,7 +1792,7 @@ impl Tx { // Add the signature under the given multisig address let section = sections.entry(addr.clone()).or_insert_with(|| Signature { - targets: self.inner_section_targets(), + targets: vec![self.raw_header_hash()], signatures: BTreeMap::new(), signer: Signer::Address(addr.clone()), }); diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 0102a228eb..419611e01e 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -234,7 +234,7 @@ mod test_process_tx { .set_data(Data::new("transaction data".as_bytes().to_owned())) .clone(); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.code_sechash(), *tx.data_sechash()], + vec![tx.raw_header_hash()], [(0, gen_keypair())].into_iter().collect(), None, ))); diff --git a/core/src/types/transaction/protocol.rs b/core/src/types/transaction/protocol.rs index 1a51434b29..7770df1fed 100644 --- a/core/src/types/transaction/protocol.rs +++ b/core/src/types/transaction/protocol.rs @@ -335,11 +335,7 @@ mod protocol_txs { .expect("Serializing request should not fail"), )); outer_tx.add_section(Section::Signature(Signature::new( - vec![ - outer_tx.header_hash(), - *outer_tx.code_sechash(), - *outer_tx.data_sechash(), - ], + vec![outer_tx.header_hash()], [(0, signing_key.clone())].into_iter().collect(), None, ))); diff --git a/shared/src/ledger/native_vp/ibc/mod.rs b/shared/src/ledger/native_vp/ibc/mod.rs index 94fca6063e..91b506fc0a 100644 --- a/shared/src/ledger/native_vp/ibc/mod.rs +++ b/shared/src/ledger/native_vp/ibc/mod.rs @@ -244,8 +244,8 @@ pub fn get_dummy_header() -> crate::types::storage::Header { /// A dummy validator used for testing #[cfg(any(test, feature = "testing"))] -pub fn get_dummy_genesis_validator( -) -> namada_proof_of_stake::types::GenesisValidator { +pub fn get_dummy_genesis_validator() +-> namada_proof_of_stake::types::GenesisValidator { use crate::core::types::address::testing::established_address_1; use crate::core::types::dec::Dec; use crate::core::types::key::testing::common_sk_from_simple_seed; @@ -724,11 +724,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![ - outer_tx.header_hash(), - *outer_tx.code_sechash(), - *outer_tx.data_sechash(), - ], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -746,9 +742,10 @@ mod tests { let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -953,9 +950,10 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -1039,11 +1037,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![ - outer_tx.header_hash(), - *outer_tx.code_sechash(), - *outer_tx.data_sechash(), - ], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1067,9 +1061,10 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -1289,9 +1284,10 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -1375,11 +1371,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![ - outer_tx.header_hash(), - *outer_tx.code_sechash(), - *outer_tx.data_sechash(), - ], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1402,9 +1394,10 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -1466,11 +1459,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![ - outer_tx.header_hash(), - *outer_tx.code_sechash(), - *outer_tx.data_sechash(), - ], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1493,9 +1482,10 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -1594,11 +1584,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![ - outer_tx.header_hash(), - *outer_tx.code_sechash(), - *outer_tx.data_sechash(), - ], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1621,9 +1607,10 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -1721,11 +1708,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![ - outer_tx.header_hash(), - *outer_tx.code_sechash(), - *outer_tx.data_sechash(), - ], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1748,9 +1731,10 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -1833,11 +1817,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![ - outer_tx.header_hash(), - *outer_tx.code_sechash(), - *outer_tx.data_sechash(), - ], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1860,9 +1840,10 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -1963,9 +1944,10 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } // skip test_close_init_channel() and test_close_confirm_channel() since it @@ -2104,9 +2086,10 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -2291,9 +2274,10 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -2437,9 +2421,10 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -2587,9 +2572,10 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } #[test] @@ -2738,8 +2724,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx(&tx, &keys_changed, &verifiers) + .expect("validation failed") + ); } } diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index 3e3b78e31b..338bc74a2f 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -1809,7 +1809,7 @@ where let gas_meter = unsafe { env.ctx.gas_meter.get() }; vp_host_fns::add_gas(gas_meter, gas)?; - let hashes = <[Hash; 3]>::try_from_slice(&hash_list) + let hashes = <[Hash; 1]>::try_from_slice(&hash_list) .map_err(vp_host_fns::RuntimeError::EncodingError)?; let (public_keys_map, gas) = env diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index a5ef2d54f1..3a2ee2e786 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -475,11 +475,7 @@ mod tests { assert!( signed_tx_data .verify_signatures( - &[ - signed_tx_data.header_hash(), - *signed_tx_data.data_sechash(), - *signed_tx_data.code_sechash(), - ], + &[signed_tx_data.header_hash(),], pks_map, &None, 1, @@ -495,11 +491,7 @@ mod tests { assert!( signed_tx_data .verify_signatures( - &[ - signed_tx_data.header_hash(), - *signed_tx_data.data_sechash(), - *signed_tx_data.code_sechash(), - ], + &[signed_tx_data.header_hash(),], AccountPublicKeysMap::from_iter([ other_keypair.ref_to() ]), diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index c42a29864a..220dab1daf 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -30,7 +30,6 @@ use namada_core::types::internal::HostEnvResult; use namada_core::types::storage::{ BlockHash, BlockHeight, Epoch, Header, TxIndex, BLOCK_HASH_LENGTH, }; -use namada_core::types::transaction::TxType; pub use namada_core::types::*; pub use namada_macros::validity_predicate; pub use namada_proof_of_stake::storage as proof_of_stake; @@ -89,17 +88,10 @@ pub fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { let threshold = storage_api::account::threshold(&ctx.pre(), owner)?.unwrap_or(1); - // FIXME: add a test to check the invalid signature in vp of the tx header - // hash FIXME: tryo a replay attack on a local devnet - let mut header = tx.header(); - header.tx_type = TxType::Raw; - let targets = - [tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()]; - // Serialize parameters let max_signatures = max_signatures_per_transaction.try_to_vec().unwrap(); let public_keys_map = public_keys_index_map.try_to_vec().unwrap(); - let targets = targets.try_to_vec().unwrap(); + let targets = [tx.raw_header_hash()].try_to_vec().unwrap(); let signer = owner.try_to_vec().unwrap(); let valid = unsafe { diff --git a/wasm/checksums.json b/wasm/checksums.json index 614ab78e6b..de46655847 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,22 +1,22 @@ { - "tx_bond.wasm": "tx_bond.b322054eef9d45e299384b2a363049ce0b0160a0c4781ca357aa59970904726c.wasm", - "tx_bridge_pool.wasm": "tx_bridge_pool.6f6ad3b95e21072af9e854e374fa0d7f691f0743da8cf52a643ed1bdb0e16611.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.9310e0a0b7c14fc7c2427040da8c91eb4067babfaaea9e3b646edbfdd09c8069.wasm", - "tx_ibc.wasm": "tx_ibc.54313469bcc9bcaabf661177f88cb90ac9008f542edbf686f286a02f8cdbfd41.wasm", - "tx_init_account.wasm": "tx_init_account.10ee01dac5325685360119ba8e4b597d776a018ea4c9ac3534dd876ec377789e.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.04cad5a3a71f833a5867bca3ced54b06d34ad07f3f21877599d38581d362ba10.wasm", - "tx_init_validator.wasm": "tx_init_validator.16d53a09e5df06400849aaa161c35e4e377284692f73a71dcbd4573656da7f64.wasm", - "tx_resign_steward.wasm": "tx_resign_steward.b5d92c1bd196be0d196ef16e2ceed9a9ced7ac61d7b177fdbad208c0e784e172.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.32011ddc5316705ae005059d5916b071288a04fb4dee80854af16d61548b5c27.wasm", - "tx_transfer.wasm": "tx_transfer.963ec4c2705377423ddc46b4ff3de63f9b625351467d89290fa771a485710c41.wasm", - "tx_unbond.wasm": "tx_unbond.7f26336db8e8cfebc04d301dc4790138fdd9bc22878fe7542c3da525a09576be.wasm", - "tx_unjail_validator.wasm": "tx_unjail_validator.15a7a399d8fb79f8df959d0ddf4c193020886d1caab1e094cca10ea3aff44a72.wasm", - "tx_update_account.wasm": "tx_update_account.7b4e225a823449d3d8bffde197c439ad24f4f6c95cf754acf62b6373958c4486.wasm", - "tx_update_steward_commission.wasm": "tx_update_steward_commission.0001b21ef3ef4f9b33afb5a5ef75a6a5427fbe221a8350cfbd81781ac18ded6e.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.727e36112fcd0753f758370dff981cc93430fe7d6f95ceb570a02a37529a7531.wasm", - "tx_withdraw.wasm": "tx_withdraw.e70485a8b79c5bff17d3b6ea96a7546cb709137c8a64606bdd1e77637157de33.wasm", - "vp_implicit.wasm": "vp_implicit.e0958c2ec06863f7bd48cd9abb67cc7557f956ce9fa6c714deba885db721fa50.wasm", - "vp_masp.wasm": "vp_masp.037671b60b3e9f312c1c5fdc53d040ebfad21a646b9b1e2dac6b3e20fc0d01ec.wasm", - "vp_user.wasm": "vp_user.0203fddde57bc31ef411370b628963486928a7c4d34614980d1a52616e0f617b.wasm", - "vp_validator.wasm": "vp_validator.39c685bc1407ef484f963aff9f7576273d56bbf283dcbded9f01944cf7ff9bf0.wasm" + "tx_bond.wasm": "tx_bond.c9fde5719da6dd63a79e0da4a099717401fbb8b7a618b3cb0778015a0773ef23.wasm", + "tx_bridge_pool.wasm": "tx_bridge_pool.e07070f6127f18e47fd4e43ef60f78344c0724f400ed69e8f30af31ad1bfd3cb.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.34f0e93ee7e55410e75345c8077299f0cc4aa00aa807b09e0ff380f2709a51c8.wasm", + "tx_ibc.wasm": "tx_ibc.ab49f6c6164e4016b9662405ad8da2ec45d876c1e2d0b756a33a75a9e2f45d38.wasm", + "tx_init_account.wasm": "tx_init_account.926d20201221e325c687a39fe9d338f01fb770f6397efd3882b99493089c2ce4.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.6a95f3f1f6ceeeb57335e122dc2ce6a99cafdaef095f00f3439da63881d73e1a.wasm", + "tx_init_validator.wasm": "tx_init_validator.445646b22db97882b3bbdc82a1b276b2bc8f0e18e04c0ba46c096fd5c729d593.wasm", + "tx_resign_steward.wasm": "tx_resign_steward.7f6810fd9901093b044d1f759a3fec6faef26fac1501d1a0c22f7c36e5b90fb4.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.cf7811df8c17d38faee925fa77996e8e58abf89b3d4e196972be8b99007b682a.wasm", + "tx_transfer.wasm": "tx_transfer.041e11a019e88466328a2508c1754ea49c16fe8d009ffa15fa9e4a8190e8e0d5.wasm", + "tx_unbond.wasm": "tx_unbond.0778900cdb631687121ee4fbf1cc95cead67b337f5e5e0a92ad6c22c14c3ebd9.wasm", + "tx_unjail_validator.wasm": "tx_unjail_validator.616b6874bbc91a7dc8751a34cd512050695622b5c4b2eeb82ea533b5184089d6.wasm", + "tx_update_account.wasm": "tx_update_account.c39ff535e6b67fa65e902352795a8573aeae77ea6158e93492cecfefa7f3c475.wasm", + "tx_update_steward_commission.wasm": "tx_update_steward_commission.d3ae5fca19609aa2cfa126bbf7926e6ba42b0e78570d8adb38e5f9ec786153e6.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.5d6493da13f1a815fe353f9d46b130b2ff779cd59bb7d89b34bb98cb4e271b3a.wasm", + "tx_withdraw.wasm": "tx_withdraw.df045a91abda536abbf7e68fee98cddea6d7faf0a0f0c8f8ecd6ec35b8dcead8.wasm", + "vp_implicit.wasm": "vp_implicit.fd0c536e007782a3b8d3672e9db119725872607a56611e748f185147ac4b3569.wasm", + "vp_masp.wasm": "vp_masp.856241eb315b01531ec3143eec72720b9608616a6f7bb4109c1f818f42c140dd.wasm", + "vp_user.wasm": "vp_user.131360a9656267034d87eaa391390be5d7007c1ffc3f88626e4e407b233c1d18.wasm", + "vp_validator.wasm": "vp_validator.952dcbb21bb2d0cd285e1d75b08174e29ab749ae927d78b130fb6b91bcdfe200.wasm" } \ No newline at end of file diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/wasm_source/src/vp_implicit.rs index 3c6ec99c26..93dfb82fef 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/wasm_source/src/vp_implicit.rs @@ -536,7 +536,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -547,14 +547,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a debit transfer without a valid signature is rejected. @@ -676,7 +672,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -688,14 +684,10 @@ mod tests { let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a transfer on with accounts other than self is accepted. @@ -764,8 +756,8 @@ mod tests { /// Generates a keypair, derive an implicit address from it and generate /// a storage key inside its storage. - fn arb_account_storage_subspace_key( - ) -> impl Strategy { + fn arb_account_storage_subspace_key() + -> impl Strategy { // Generate a keypair key::testing::arb_common_keypair().prop_flat_map(|sk| { let pk = sk.ref_to(); @@ -848,7 +840,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -941,7 +933,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -951,14 +943,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + !validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } #[test] @@ -1000,7 +988,7 @@ mod tests { tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -1010,13 +998,9 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } } diff --git a/wasm/wasm_source/src/vp_testnet_faucet.rs b/wasm/wasm_source/src/vp_testnet_faucet.rs index 091af51372..598b7a77ef 100644 --- a/wasm/wasm_source/src/vp_testnet_faucet.rs +++ b/wasm/wasm_source/src/vp_testnet_faucet.rs @@ -267,7 +267,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -404,7 +404,7 @@ mod tests { tx_data.set_data(Data::new(solution_bytes)); tx_data.set_code(Code::new(vec![])); tx_data.add_section(Section::Signature(Signature::new( - vec![tx_data.raw_header_hash(), *tx_data.data_sechash(), *tx_data.code_sechash()], + vec![tx_data.raw_header_hash()], [(0, target_key)].into_iter().collect(), None, ))); @@ -458,7 +458,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index 5bb0593563..63d87aff10 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -393,7 +393,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -403,14 +403,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a PoS action that must be authorized is rejected without a @@ -566,7 +562,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -576,14 +572,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a transfer on with accounts other than self is accepted. @@ -695,56 +687,56 @@ mod tests { } proptest! { - /// Test that a signed tx that performs arbitrary storage writes or - /// deletes to the account is accepted. - #[test] - fn test_signed_arb_storage_write( - (vp_owner, storage_key) in arb_account_storage_subspace_key(), - // Generate bytes to write. If `None`, delete from the key instead - storage_value in any::>>(), - ) { - // Initialize a tx environment - let mut tx_env = TestTxEnv::default(); - - let keypair = key::testing::keypair_1(); - let public_key = keypair.ref_to(); - - // Spawn all the accounts in the storage key to be able to modify - // their storage - let storage_key_addresses = storage_key.find_addresses(); - tx_env.spawn_accounts(storage_key_addresses); - tx_env.init_account_storage(&vp_owner, vec![public_key.clone()], 1); - - // Initialize VP environment from a transaction - vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |_address| { - // Write or delete some data in the transaction - if let Some(value) = &storage_value { - tx::ctx().write(&storage_key, value).unwrap(); - } else { - tx::ctx().delete(&storage_key).unwrap(); - } - }); - - let pks_map = AccountPublicKeysMap::from_iter(vec![public_key]); - - let mut vp_env = vp_host_env::take(); - let mut tx = vp_env.tx.clone(); - tx.set_code(Code::new(vec![])); - tx.set_data(Data::new(vec![])); - tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], - pks_map.index_secret_keys(vec![keypair]), - None, - ))); - let signed_tx = tx.clone(); - vp_env.tx = signed_tx.clone(); - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= BTreeSet::default(); - vp_host_env::set(vp_env); - assert!(validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers).unwrap()); + /// Test that a signed tx that performs arbitrary storage writes or + /// deletes to the account is accepted. + #[test] + fn test_signed_arb_storage_write( + (vp_owner, storage_key) in arb_account_storage_subspace_key(), + // Generate bytes to write. If `None`, delete from the key instead + storage_value in any::>>(), + ) { + // Initialize a tx environment + let mut tx_env = TestTxEnv::default(); + + let keypair = key::testing::keypair_1(); + let public_key = keypair.ref_to(); + + // Spawn all the accounts in the storage key to be able to modify + // their storage + let storage_key_addresses = storage_key.find_addresses(); + tx_env.spawn_accounts(storage_key_addresses); + tx_env.init_account_storage(&vp_owner, vec![public_key.clone()], 1); + + // Initialize VP environment from a transaction + vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |_address| { + // Write or delete some data in the transaction + if let Some(value) = &storage_value { + tx::ctx().write(&storage_key, value).unwrap(); + } else { + tx::ctx().delete(&storage_key).unwrap(); + } + }); + + let pks_map = AccountPublicKeysMap::from_iter(vec![public_key]); + + let mut vp_env = vp_host_env::take(); + let mut tx = vp_env.tx.clone(); + tx.set_code(Code::new(vec![])); + tx.set_data(Data::new(vec![])); + tx.add_section(Section::Signature(Signature::new( + vec![ tx.raw_header_hash()], + pks_map.index_secret_keys(vec![keypair]), + None, + ))); + let signed_tx = tx.clone(); + vp_env.tx = signed_tx.clone(); + let keys_changed: BTreeSet = + vp_env.all_touched_storage_keys(); + let verifiers: BTreeSet
= BTreeSet::default(); + vp_host_env::set(vp_env); + assert!(validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers).unwrap()); + } } - } /// Test that a validity predicate update without a valid signature is /// rejected. @@ -819,7 +811,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -829,14 +821,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a validity predicate update is rejected if not whitelisted @@ -878,7 +866,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -888,14 +876,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + !validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a validity predicate update is accepted if whitelisted @@ -938,7 +922,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -948,14 +932,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a tx is rejected if not whitelisted @@ -998,7 +978,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -1008,14 +988,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + !validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } #[test] @@ -1058,7 +1034,7 @@ mod tests { tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -1068,13 +1044,9 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } } diff --git a/wasm/wasm_source/src/vp_validator.rs b/wasm/wasm_source/src/vp_validator.rs index 29e0c6f3d2..1c306099c1 100644 --- a/wasm/wasm_source/src/vp_validator.rs +++ b/wasm/wasm_source/src/vp_validator.rs @@ -400,7 +400,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -410,14 +410,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a PoS action that must be authorized is rejected without a @@ -584,7 +580,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -594,14 +590,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a transfer on with accounts other than self is accepted. @@ -750,7 +742,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -836,7 +828,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -846,14 +838,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a validity predicate update is rejected if not whitelisted @@ -895,7 +883,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -905,14 +893,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + !validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a validity predicate update is accepted if whitelisted @@ -955,7 +939,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -965,14 +949,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a tx is rejected if not whitelisted @@ -1015,7 +995,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -1025,14 +1005,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + !validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } #[test] @@ -1075,7 +1051,7 @@ mod tests { tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![tx.raw_header_hash(), *tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -1085,13 +1061,9 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx( - &CTX, - signed_tx, - vp_owner, - keys_changed, - verifiers - ) - .unwrap()); + assert!( + validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) + .unwrap() + ); } } From ca14bfc4b90b32718fb279646a0bcc8cd5c0585a Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 23 Oct 2023 12:02:11 +0200 Subject: [PATCH 154/161] Fixes raw header hash in compressed signature --- core/src/proto/types.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 98839ccf98..75bf5ad5bf 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -628,6 +628,9 @@ impl CompressedSignature { if idx == 0 { // The "zeroth" section is the header targets.push(tx.header_hash()); + } else if idx == 255 { + // The 255th section is the raw header + targets.push(tx.raw_header_hash()); } else { targets.push(tx.sections[idx as usize - 1].get_hash()); } From c40cbc1e2e66c6319179626e7aa5aeb0f2ef3204 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 12 Oct 2023 17:48:04 +0200 Subject: [PATCH 155/161] Changelog #1867 --- .../unreleased/improvements/1867-fix-replay-protection.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1867-fix-replay-protection.md diff --git a/.changelog/unreleased/improvements/1867-fix-replay-protection.md b/.changelog/unreleased/improvements/1867-fix-replay-protection.md new file mode 100644 index 0000000000..ad22c70c55 --- /dev/null +++ b/.changelog/unreleased/improvements/1867-fix-replay-protection.md @@ -0,0 +1,2 @@ +- Reworked the signature of inner transactions to improve safety and fix replay + protection. ([\#1867](https://github.com/anoma/namada/pull/1867)) \ No newline at end of file From 7e011228469c4c5b2ce1c715dfda0cd2b8c0ee01 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 23 Oct 2023 13:06:04 +0200 Subject: [PATCH 156/161] evil: updates calls to `raw_header_hash` --- .../lib/node/ledger/shell/finalize_block.rs | 281 ++++++++++-------- apps/src/lib/node/ledger/shell/mod.rs | 13 +- .../lib/node/ledger/shell/process_proposal.rs | 27 +- sdk/src/tx.rs | 8 +- shared/src/ledger/protocol/mod.rs | 6 +- 5 files changed, 182 insertions(+), 153 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 1f635133e6..4fef1fc7ef 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -930,13 +930,13 @@ where // Allow to replay a specific wasm transaction. Needs as argument the // corresponding wrapper transaction to avoid replay of that in the process - fn allow_tx_replay(&mut self, mut wrapper_tx: Tx) { + fn allow_tx_replay(&mut self, wrapper_tx: Tx) { self.wl_storage .write_tx_hash(wrapper_tx.header_hash()) .expect("Error while deleting tx hash from storage"); self.wl_storage - .delete_tx_hash(wrapper_tx.update_header(TxType::Raw).header_hash()) + .delete_tx_hash(wrapper_tx.raw_header_hash()) .expect("Error while deleting tx hash from storage"); } } @@ -2048,9 +2048,11 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); - assert!(rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap() + ); // FINALIZE BLOCK 2. Tell Namada that val1 is the block proposer. // Include votes that correspond to block 1. Make val2 the next block's @@ -2060,9 +2062,11 @@ mod test_finalize_block { assert!(rewards_prod_2.is_empty(&shell.wl_storage).unwrap()); assert!(rewards_prod_3.is_empty(&shell.wl_storage).unwrap()); assert!(rewards_prod_4.is_empty(&shell.wl_storage).unwrap()); - assert!(!rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + !rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap() + ); // Val1 was the proposer, so its reward should be larger than all // others, which should themselves all be equal let acc_sum = get_rewards_sum(&shell.wl_storage); @@ -2176,9 +2180,11 @@ mod test_finalize_block { None, ); } - assert!(rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + rewards_accumulator_handle() + .is_empty(&shell.wl_storage) + .unwrap() + ); let rp1 = rewards_prod_1 .get(&shell.wl_storage, &Epoch::default()) .unwrap() @@ -2230,12 +2236,10 @@ mod test_finalize_block { let (wrapper_tx, processed_tx) = mk_wrapper_tx(&shell, &crate::wallet::defaults::albert_keypair()); - let mut decrypted_tx = wrapper_tx; - decrypted_tx.update_header(TxType::Raw); let decrypted_hash_key = replay_protection::get_replay_protection_last_key( - &decrypted_tx.header_hash(), + &wrapper_tx.raw_header_hash(), ); // merkle tree root before finalize_block @@ -2263,21 +2267,25 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check transaction's hash in storage - assert!(shell - .shell - .wl_storage - .write_log - .has_replay_protection_entry(&decrypted_tx.header_hash()) - .unwrap_or_default()); + assert!( + shell + .shell + .wl_storage + .write_log + .has_replay_protection_entry(&wrapper_tx.raw_header_hash()) + .unwrap_or_default() + ); // Check that the hash is present in the merkle tree - assert!(!shell - .shell - .wl_storage - .storage - .block - .tree - .has_key(&decrypted_hash_key) - .unwrap()); + assert!( + !shell + .shell + .wl_storage + .storage + .block + .tree + .has_key(&decrypted_hash_key) + .unwrap() + ); } /// Test replay protection hash handling @@ -2321,16 +2329,9 @@ mod test_finalize_block { decrypted_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); decrypted_tx_2.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - let decrypted_hash = - wrapper_tx.clone().update_header(TxType::Raw).header_hash(); - let decrypted_2_hash = wrapper_tx_2 - .clone() - .update_header(TxType::Raw) - .header_hash(); - let decrypted_3_hash = invalid_wrapper_tx - .clone() - .update_header(TxType::Raw) - .header_hash(); + let decrypted_hash = wrapper_tx.raw_header_hash(); + let decrypted_2_hash = wrapper_tx_2.raw_header_hash(); + let decrypted_3_hash = invalid_wrapper_tx.raw_header_hash(); // Write inner hashes in storage for hash in [&decrypted_hash, &decrypted_2_hash] { @@ -2412,36 +2413,48 @@ mod test_finalize_block { .as_str(); assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); - assert!(shell - .wl_storage - .write_log - .has_replay_protection_entry(&invalid_wrapper_hash) - .unwrap_or_default()); - assert!(!shell - .wl_storage - .write_log - .has_replay_protection_entry(&decrypted_3_hash) - .unwrap_or_default()); - assert!(!shell - .wl_storage - .write_log - .has_replay_protection_entry(&decrypted_hash) - .unwrap_or_default()); - assert!(shell - .wl_storage - .write_log - .has_replay_protection_entry(&wrapper_hash) - .unwrap_or_default()); - assert!(shell - .wl_storage - .storage - .has_replay_protection_entry(&decrypted_2_hash) - .expect("test failed")); - assert!(!shell - .wl_storage - .write_log - .has_replay_protection_entry(&wrapper_2_hash) - .unwrap_or_default()); + assert!( + shell + .wl_storage + .write_log + .has_replay_protection_entry(&invalid_wrapper_hash) + .unwrap_or_default() + ); + assert!( + !shell + .wl_storage + .write_log + .has_replay_protection_entry(&decrypted_3_hash) + .unwrap_or_default() + ); + assert!( + !shell + .wl_storage + .write_log + .has_replay_protection_entry(&decrypted_hash) + .unwrap_or_default() + ); + assert!( + shell + .wl_storage + .write_log + .has_replay_protection_entry(&wrapper_hash) + .unwrap_or_default() + ); + assert!( + shell + .wl_storage + .storage + .has_replay_protection_entry(&decrypted_2_hash) + .expect("test failed") + ); + assert!( + !shell + .wl_storage + .write_log + .has_replay_protection_entry(&wrapper_2_hash) + .unwrap_or_default() + ); } // Test that if the fee payer doesn't have enough funds for fee payment the @@ -2729,9 +2742,11 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Consensus) ); - assert!(enqueued_slashes_handle() - .at(&Epoch::default()) - .is_empty(&shell.wl_storage)?); + assert!( + enqueued_slashes_handle() + .at(&Epoch::default()) + .is_empty(&shell.wl_storage)? + ); assert_eq!( get_num_consensus_validators(&shell.wl_storage, Epoch::default()) .unwrap(), @@ -2750,17 +2765,21 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Jailed) ); - assert!(enqueued_slashes_handle() - .at(&epoch) - .is_empty(&shell.wl_storage)?); + assert!( + enqueued_slashes_handle() + .at(&epoch) + .is_empty(&shell.wl_storage)? + ); assert_eq!( get_num_consensus_validators(&shell.wl_storage, epoch).unwrap(), 5_u64 ); } - assert!(!enqueued_slashes_handle() - .at(&processing_epoch) - .is_empty(&shell.wl_storage)?); + assert!( + !enqueued_slashes_handle() + .at(&processing_epoch) + .is_empty(&shell.wl_storage)? + ); // Advance to the processing epoch loop { @@ -2783,9 +2802,11 @@ mod test_finalize_block { // println!("Reached processing epoch"); break; } else { - assert!(enqueued_slashes_handle() - .at(&shell.wl_storage.storage.block.epoch) - .is_empty(&shell.wl_storage)?); + assert!( + enqueued_slashes_handle() + .at(&shell.wl_storage.storage.block.epoch) + .is_empty(&shell.wl_storage)? + ); let stake1 = read_validator_stake( &shell.wl_storage, ¶ms, @@ -3344,13 +3365,15 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); - assert!(namada_proof_of_stake::is_validator_frozen( - &shell.wl_storage, - &val1.address, - current_epoch, - ¶ms - ) - .unwrap()); + assert!( + namada_proof_of_stake::is_validator_frozen( + &shell.wl_storage, + &val1.address, + current_epoch, + ¶ms + ) + .unwrap() + ); assert!( namada_proof_of_stake::validator_slashes_handle(&val1.address) .is_empty(&shell.wl_storage) @@ -3859,14 +3882,12 @@ mod test_finalize_block { assert!(!consensus_val_set.at(&ep).is_empty(storage).unwrap()); // assert!(!below_cap_val_set.at(&ep).is_empty(storage). // unwrap()); - assert!(!validator_positions - .at(&ep) - .is_empty(storage) - .unwrap()); - assert!(!all_validator_addresses - .at(&ep) - .is_empty(storage) - .unwrap()); + assert!( + !validator_positions.at(&ep).is_empty(storage).unwrap() + ); + assert!( + !all_validator_addresses.at(&ep).is_empty(storage).unwrap() + ); } }; @@ -3905,25 +3926,33 @@ mod test_finalize_block { Epoch(1), Epoch(params.pipeline_len + default_past_epochs + 1), ); - assert!(!consensus_val_set - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap()); - assert!(validator_positions - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap()); - assert!(all_validator_addresses - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + !consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + assert!( + validator_positions + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + assert!( + all_validator_addresses + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); // Advance to the epoch `consensus_val_set_len` + 1 loop { - assert!(!consensus_val_set - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + !consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); let votes = get_default_true_votes( &shell.wl_storage, shell.wl_storage.storage.block.epoch, @@ -3934,10 +3963,12 @@ mod test_finalize_block { } } - assert!(consensus_val_set - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); // Advance one more epoch let votes = get_default_true_votes( @@ -3946,19 +3977,23 @@ mod test_finalize_block { ); current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); for ep in Epoch::default().iter_range(2) { - assert!(consensus_val_set - .at(&ep) - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + consensus_val_set + .at(&ep) + .is_empty(&shell.wl_storage) + .unwrap() + ); } for ep in Epoch::iter_bounds_inclusive( Epoch(2), current_epoch + params.pipeline_len, ) { - assert!(!consensus_val_set - .at(&ep) - .is_empty(&shell.wl_storage) - .unwrap()); + assert!( + !consensus_val_set + .at(&ep) + .is_empty(&shell.wl_storage) + .unwrap() + ); } Ok(()) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index ce49a7f211..ab8ebd5b05 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -942,13 +942,12 @@ where ))); } - // Write wrapper hash to tx WAL + // Write wrapper hash to WAL temp_wl_storage .write_tx_hash(wrapper_hash) .map_err(|e| Error::ReplayAttempt(e.to_string()))?; - let inner_tx_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); + let inner_tx_hash = wrapper.raw_header_hash(); if temp_wl_storage .has_replay_protection_entry(&inner_tx_hash) .expect("Error while checking inner tx hash key in storage") @@ -959,7 +958,7 @@ where ))); } - // Write inner hash to tx WAL + // Write inner hash to WAL temp_wl_storage .write_tx_hash(inner_tx_hash) .map_err(|e| Error::ReplayAttempt(e.to_string())) @@ -1249,13 +1248,11 @@ where } // Replay protection check - let mut inner_tx = tx; - inner_tx.update_header(TxType::Raw); - let inner_tx_hash = &inner_tx.header_hash(); + let inner_tx_hash = tx.raw_header_hash(); if self .wl_storage .storage - .has_replay_protection_entry(inner_tx_hash) + .has_replay_protection_entry(&tx.raw_header_hash()) .expect("Error while checking inner tx hash key in storage") { response.code = ErrorCodes::ReplayTx.into(); diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index e05ae21fd0..a83bc7f62e 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -1087,11 +1087,13 @@ mod test_process_proposal { shell.chain_id.clone(), ) .to_bytes(); - assert!(shell - .process_proposal(ProcessProposal { - txs: vec![tx.clone(), tx] - }) - .is_err()); + assert!( + shell + .process_proposal(ProcessProposal { + txs: vec![tx.clone(), tx] + }) + .is_err() + ); } #[cfg(feature = "abcipp")] @@ -1248,9 +1250,11 @@ mod test_process_proposal { sig, } .sign(shell.mode.get_protocol_key().expect("Test failed")); - let mut txs = vec![EthereumTxData::BridgePool(vote_ext.into()) - .sign(protocol_key, shell.chain_id.clone()) - .to_bytes()]; + let mut txs = vec![ + EthereumTxData::BridgePool(vote_ext.into()) + .sign(protocol_key, shell.chain_id.clone()) + .to_bytes(), + ]; let event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), @@ -2176,15 +2180,12 @@ mod test_process_proposal { response[1].result.code, u32::from(ErrorCodes::ReplayTx) ); - // The checks happens on the inner hash first, so the tx is - // rejected because of this hash, not the - // wrapper one assert_eq!( response[1].result.info, format!( "Transaction replay attempt: Wrapper transaction hash \ {} already in storage", - wrapper.raw_header_hash() + wrapper.header_hash() ) ); } @@ -2247,7 +2248,7 @@ mod test_process_proposal { format!( "Transaction replay attempt: Inner transaction hash \ {} already in storage", - inner_unsigned_hash + wrapper.raw_header_hash() ) ); } diff --git a/sdk/src/tx.rs b/sdk/src/tx.rs index f1a85ebca8..962c6964a3 100644 --- a/sdk/src/tx.rs +++ b/sdk/src/tx.rs @@ -45,7 +45,7 @@ use namada_core::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use namada_core::types::transaction::pgf::UpdateStewardCommission; -use namada_core::types::transaction::{pos, TxType}; +use namada_core::types::transaction::pos; use namada_core::types::{storage, token}; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; @@ -200,11 +200,7 @@ pub async fn process_tx<'a>( let wrapper_hash = tx.header_hash().to_string(); // We use this to determine when the decrypted inner tx makes it // on-chain - let decrypted_hash = tx - .clone() - .update_header(TxType::Raw) - .header_hash() - .to_string(); + let decrypted_hash = tx.raw_header_hash().to_string(); let to_broadcast = TxBroadcastData::Live { tx, wrapper_hash, diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index dcbcb9b5ba..b8a9902ac5 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -217,7 +217,7 @@ where /// Returns the set of changed storage keys. The caller should write the hash of /// the wrapper header to storage in case of failure. pub(crate) fn apply_wrapper_tx<'a, D, H, CA, WLS>( - mut tx: Tx, + tx: Tx, wrapper: &WrapperTx, fee_unshield_transaction: Option, tx_bytes: &[u8], @@ -247,10 +247,10 @@ where // If wrapper was succesful, write inner tx hash to storage shell_params .wl_storage - .write_tx_hash(tx.update_header(TxType::Raw).header_hash()) + .write_tx_hash(tx.raw_header_hash()) .expect("Error while writing tx hash to storage"); changed_keys.insert(replay_protection::get_replay_protection_last_key( - &tx.header_hash(), + &tx.raw_header_hash(), )); Ok(changed_keys) From b0e45abba5dd2f72e76a79baf1450561a8abfc2b Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 23 Oct 2023 13:48:48 +0200 Subject: [PATCH 157/161] evil: updates lock files --- Cargo.lock | 2 +- wasm/Cargo.lock | 2 +- wasm_for_tests/wasm_source/Cargo.lock | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 510f1973d1..0334476964 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4230,7 +4230,7 @@ dependencies = [ [[package]] name = "namada_sdk" -version = "0.23.0" +version = "0.23.1" dependencies = [ "assert_matches", "async-trait", diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 7ab11f5ab1..5ca9ccd5a1 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3409,7 +3409,7 @@ dependencies = [ [[package]] name = "namada_sdk" -version = "0.23.0" +version = "0.23.1" dependencies = [ "async-trait", "bimap", diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 327aa0f732..e91bd5c4d5 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -3409,7 +3409,7 @@ dependencies = [ [[package]] name = "namada_sdk" -version = "0.23.0" +version = "0.23.1" dependencies = [ "async-trait", "bimap", From adbc18597ea819ecf7da42d65d5ea4f6a7f55d42 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Mon, 23 Oct 2023 12:39:45 +0200 Subject: [PATCH 158/161] Now re-export crates that may be needed to use the SDK. --- sdk/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 622a63a1d1..2af1b9dae9 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -3,7 +3,10 @@ pub use namada_core::proto; pub use tendermint_rpc; #[cfg(feature = "tendermint-rpc-abcipp")] pub use tendermint_rpc_abcipp as tendermint_rpc; -pub use {bip39, namada_core as core, namada_proof_of_stake as proof_of_stake}; +pub use { + bip39, borsh, masp_primitives, masp_proofs, namada_core as core, + namada_proof_of_stake as proof_of_stake, zeroize, +}; #[cfg(feature = "abcipp")] pub use { ibc_abcipp as ibc, ibc_proto_abcipp as ibc_proto, From abc49d63cb4323f372a49720945f290d1699ad9a Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Mon, 23 Oct 2023 12:46:59 +0200 Subject: [PATCH 159/161] Added changelog entry. --- .changelog/unreleased/SDK/2033-sdk-re-exports.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/SDK/2033-sdk-re-exports.md diff --git a/.changelog/unreleased/SDK/2033-sdk-re-exports.md b/.changelog/unreleased/SDK/2033-sdk-re-exports.md new file mode 100644 index 0000000000..049af49e8f --- /dev/null +++ b/.changelog/unreleased/SDK/2033-sdk-re-exports.md @@ -0,0 +1,2 @@ +- Now re-exporting crates that will commonly be used with the SDK. + ([\#2033](https://github.com/anoma/namada/pull/2033)) \ No newline at end of file From 7f3fc5ec4ded2d369a71f19b1e25b4c677cd4a03 Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Wed, 27 Sep 2023 17:48:09 +0200 Subject: [PATCH 160/161] Reintroduced a dummy field in order to achieve compatability with hardware wallet. --- .../lib/node/ledger/shell/process_proposal.rs | 3 ++- core/src/types/transaction/wrapper.rs | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index ab544de3f8..9494069ce7 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -1000,7 +1000,7 @@ mod test_process_proposal { use namada::types::token; use namada::types::token::Amount; use namada::types::transaction::protocol::EthereumTxData; - use namada::types::transaction::{Fee, WrapperTx}; + use namada::types::transaction::{Fee, Solution, WrapperTx}; #[cfg(feature = "abcipp")] use namada::types::vote_extensions::bridge_pool_roots::MultiSignedVext; #[cfg(feature = "abcipp")] @@ -2005,6 +2005,7 @@ mod test_process_proposal { epoch: Epoch(0), gas_limit: GAS_LIMIT_MULTIPLIER.into(), unshield_section_hash: None, + pow_solution: Solution::None, }; let tx = Tx::from_type(TxType::Wrapper(Box::new(wrapper))); diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index e9b49b0c07..0e44eb660c 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -164,6 +164,21 @@ pub mod wrapper_tx { } } + /// A degenerate PoW solution type + #[derive( + Debug, + Clone, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Serialize, + Deserialize, + )] + pub enum Solution { + /// No PoW solution + None, + } + /// A transaction with an encrypted payload, an optional shielded pool /// unshielding tx for fee payment and some non-encrypted metadata for /// inclusion and / or verification purposes @@ -190,6 +205,8 @@ pub mod wrapper_tx { /// The hash of the optional, unencrypted, unshielding transaction for /// fee payment pub unshield_section_hash: Option, + /// Mandatory 0x00 byte for deprecated field + pub pow_solution: Solution, } impl WrapperTx { @@ -211,6 +228,7 @@ pub mod wrapper_tx { epoch, gas_limit, unshield_section_hash: unshield_hash, + pow_solution: Solution::None, } } From 696c8beb04179899102fbcddf15d8c11ab90980c Mon Sep 17 00:00:00 2001 From: Murisi Tarusenga Date: Wed, 27 Sep 2023 17:56:20 +0200 Subject: [PATCH 161/161] Added changelog entry. --- .changelog/unreleased/bug-fixes/1949-pow-solution-fix.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/bug-fixes/1949-pow-solution-fix.md diff --git a/.changelog/unreleased/bug-fixes/1949-pow-solution-fix.md b/.changelog/unreleased/bug-fixes/1949-pow-solution-fix.md new file mode 100644 index 0000000000..6addee1bdd --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1949-pow-solution-fix.md @@ -0,0 +1,2 @@ +- Reintroduced a dummy field in order to achieve compatibility with hardware + wallet. ([\#1949](https://github.com/anoma/namada/pull/1949)) \ No newline at end of file