diff --git a/Cargo.lock b/Cargo.lock index 8aba5b5646..a1ba5ac35b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3685,6 +3685,7 @@ version = "0.14.0" dependencies = [ "ark-serialize", "ark-std", + "assert_matches", "async-std", "async-trait", "base64 0.13.0", @@ -3715,6 +3716,7 @@ dependencies = [ "masp_proofs", "namada", "num-derive", + "num-rational", "num-traits 0.2.15", "num_cpus", "once_cell", diff --git a/apps/Cargo.toml b/apps/Cargo.toml index f1bf95a05d..01cc7910f8 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -75,6 +75,7 @@ ark-serialize = "0.3.0" ark-std = "0.3.0" # branch = "bat/arse-merkle-tree" arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "04ad1eeb28901b57a7599bbe433b3822965dabe8", features = ["std", "borsh"]} +assert_matches = "1.5.0" async-std = {version = "=1.11.0", features = ["unstable"]} async-trait = "0.1.51" base64 = "0.13.0" @@ -100,6 +101,7 @@ itertools = "0.10.1" libc = "0.2.97" libloading = "0.7.2" num-derive = "0.3.3" +num-rational = "0.4.1" num-traits = "0.2.14" num_cpus = "1.13.0" once_cell = "1.8.0" diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc.rs b/apps/src/lib/node/ledger/shell/block_space_alloc.rs new file mode 100644 index 0000000000..0a11ba10e1 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc.rs @@ -0,0 +1,476 @@ +//! Primitives that facilitate keeping track of the number +//! of bytes utilized by some Tendermint consensus round's proposal. +//! +//! This is important, because Tendermint places an upper bound +//! on the size of a block, rejecting blocks whose size exceeds +//! the limit stated in [`RequestPrepareProposal`]. +//! +//! The code in this module doesn't perform any deserializing to +//! verify if we are, in fact, allocating space for the correct +//! kind of tx for the current [`BlockSpaceAllocator`] state. It +//! is up to `PrepareProposal` to dispatch the correct kind of tx +//! into the current state of the allocator. +//! +//! # How space is allocated +//! +//! In the current implementation, we allocate space for transactions +//! in the following order of preference: +//! +//! - First, we allot space for DKG encrypted txs. We allow DKG encrypted txs to +//! take up at most 1/3 of the total block space. +//! - Next, we allot space for DKG decrypted txs. Decrypted txs take up as much +//! space as needed. We will see, shortly, why in practice this is fine. +//! - Finally, we allot space for protocol txs. Protocol txs get half of the +//! remaining block space allotted to them. +//! +//! Since at some fixed height `H` decrypted txs only take up as +//! much space as the encrypted txs from height `H - 1`, and we +//! restrict the space of encrypted txs to at most 1/3 of the +//! total block space, we roughly divide the Tendermint block +//! space in 3, for each major type of tx. + +pub mod states; + +// TODO: what if a tx has a size greater than the threshold for +// its bin? how do we handle this? if we keep it in the mempool +// forever, it'll be a DoS vec, as we can make nodes run out of +// memory! maybe we should allow block decisions for txs that are +// too big to fit in their respective bin? in these special block +// decisions, we would only decide proposals with "large" txs?? +// +// MAYBE: in the state machine impl, reset to beginning state, and +// and alloc space for large tx right at the start. the problem with +// this is that then we may not have enough space for decrypted txs + +// TODO: panic if we don't have enough space reserved for a +// decrypted tx; in theory, we should always have enough space +// reserved for decrypted txs, given the invariants of the state +// machine + +// TODO: refactor our measure of space to also reflect gas costs. +// the total gas of all chosen txs cannot exceed the configured max +// gas per block, otherwise a proposal will be rejected! + +use std::marker::PhantomData; + +use namada::core::ledger::storage::{self, WlStorage}; +use namada::proof_of_stake::pos_queries::PosQueries; + +#[allow(unused_imports)] +use crate::facade::tendermint_proto::abci::RequestPrepareProposal; + +/// Block space allocation failure status responses. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum AllocFailure { + /// The transaction can only be included in an upcoming block. + /// + /// We return the space left in the tx bin for logging purposes. + Rejected { bin_space_left: u64 }, + /// The transaction would overflow the allotted bin space, + /// therefore it needs to be handled separately. + /// + /// We return the size of the tx bin for logging purposes. + OverflowsBin { bin_size: u64 }, +} + +/// Allotted space for a batch of transactions in some proposed block, +/// measured in bytes. +/// +/// We keep track of the current space utilized by: +/// +/// - DKG encrypted transactions. +/// - DKG decrypted transactions. +/// - Protocol transactions. +#[derive(Debug, Default)] +pub struct BlockSpaceAllocator { + /// The current state of the [`BlockSpaceAllocator`] state machine. + _state: PhantomData<*const State>, + /// The total space Tendermint has allotted to the + /// application for the current block height. + block: TxBin, + /// The current space utilized by protocol transactions. + protocol_txs: TxBin, + /// The current space utilized by DKG encrypted transactions. + encrypted_txs: TxBin, + /// The current space utilized by DKG decrypted transactions. + decrypted_txs: TxBin, +} + +impl From<&WlStorage> + for BlockSpaceAllocator> +where + D: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, +{ + #[inline] + fn from(storage: &WlStorage) -> Self { + Self::init(storage.pos_queries().get_max_proposal_bytes().get()) + } +} + +impl BlockSpaceAllocator> { + /// Construct a new [`BlockSpaceAllocator`], with an upper bound + /// on the max size of all txs in a block defined by Tendermint. + #[inline] + pub fn init(tendermint_max_block_space_in_bytes: u64) -> Self { + let max = tendermint_max_block_space_in_bytes; + Self { + _state: PhantomData, + block: TxBin::init(max), + protocol_txs: TxBin::default(), + encrypted_txs: TxBin::init_over_ratio(max, threshold::ONE_THIRD), + decrypted_txs: TxBin::default(), + } + } +} + +impl BlockSpaceAllocator { + /// Return the amount of space left to initialize in all + /// [`TxBin`] instances. + /// + /// This is calculated based on the difference between the Tendermint + /// block space for a given round and the sum of the allotted space + /// to each [`TxBin`] instance in a [`BlockSpaceAllocator`]. + #[inline] + fn uninitialized_space_in_bytes(&self) -> u64 { + let total_bin_space = self.protocol_txs.allotted_space_in_bytes + + self.encrypted_txs.allotted_space_in_bytes + + self.decrypted_txs.allotted_space_in_bytes; + self.block.allotted_space_in_bytes - total_bin_space + } +} + +/// Allotted space for a batch of transactions of the same kind in some +/// proposed block, measured in bytes. +#[derive(Debug, Copy, Clone, Default)] +pub struct TxBin { + /// The current space utilized by the batch of transactions. + occupied_space_in_bytes: u64, + /// The maximum space the batch of transactions may occupy. + allotted_space_in_bytes: u64, +} + +impl TxBin { + /// Return a new [`TxBin`] with a total allotted space equal to the + /// floor of the fraction `frac` of the available block space `max_bytes`. + #[inline] + pub fn init_over_ratio(max_bytes: u64, frac: threshold::Threshold) -> Self { + let allotted_space_in_bytes = frac.over(max_bytes); + Self { + allotted_space_in_bytes, + occupied_space_in_bytes: 0, + } + } + + /// Return the amount of space left in this [`TxBin`]. + #[inline] + pub fn space_left_in_bytes(&self) -> u64 { + self.allotted_space_in_bytes - self.occupied_space_in_bytes + } + + /// Construct a new [`TxBin`], with a capacity of `max_bytes`. + #[inline] + pub fn init(max_bytes: u64) -> Self { + Self { + allotted_space_in_bytes: max_bytes, + occupied_space_in_bytes: 0, + } + } + + /// Shrink the allotted space of this [`TxBin`] to whatever + /// space is currently being utilized. + #[inline] + pub fn shrink_to_fit(&mut self) { + self.allotted_space_in_bytes = self.occupied_space_in_bytes; + } + + /// Try to dump a new transaction into this [`TxBin`]. + /// + /// Signal the caller if the tx is larger than its max + /// allotted bin space. + pub fn try_dump(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + let tx_len = tx.len() as u64; + if tx_len > self.allotted_space_in_bytes { + let bin_size = self.allotted_space_in_bytes; + return Err(AllocFailure::OverflowsBin { bin_size }); + } + let occupied = self.occupied_space_in_bytes + tx_len; + if occupied <= self.allotted_space_in_bytes { + self.occupied_space_in_bytes = occupied; + Ok(()) + } else { + let bin_space_left = self.space_left_in_bytes(); + Err(AllocFailure::Rejected { bin_space_left }) + } + } +} + +pub mod threshold { + //! Transaction allotment thresholds. + + use num_rational::Ratio; + + /// Threshold over a portion of block space. + #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] + pub struct Threshold(Ratio); + + impl Threshold { + /// Return a new [`Threshold`]. + const fn new(numer: u64, denom: u64) -> Self { + // constrain ratio to a max of 1 + let numer = if numer > denom { denom } else { numer }; + Self(Ratio::new_raw(numer, denom)) + } + + /// Return a [`Threshold`] over some free space. + pub fn over(self, free_space_in_bytes: u64) -> u64 { + (self.0 * free_space_in_bytes).to_integer() + } + } + + /// Divide free space in three. + pub const ONE_THIRD: Threshold = Threshold::new(1, 3); +} + +#[cfg(test)] +mod tests { + use std::cell::RefCell; + + use assert_matches::assert_matches; + use proptest::prelude::*; + + use super::states::{ + BuildingEncryptedTxBatch, NextState, TryAlloc, WithEncryptedTxs, + WithoutEncryptedTxs, + }; + use super::*; + use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; + + /// Convenience alias for a block space allocator at a state with encrypted + /// txs. + type BsaWrapperTxs = + BlockSpaceAllocator>; + + /// Convenience alias for a block space allocator at a state without + /// encrypted txs. + type BsaNoWrapperTxs = + BlockSpaceAllocator>; + + /// Proptest generated txs. + #[derive(Debug)] + struct PropTx { + tendermint_max_block_space_in_bytes: u64, + protocol_txs: Vec, + encrypted_txs: Vec, + decrypted_txs: Vec, + } + + /// Check that at most 1/3 of the block space is + /// reserved for each kind of tx type, in the + /// allocator's common path. + #[test] + fn test_txs_are_evenly_split_across_block() { + const BLOCK_SIZE: u64 = 60; + + // reserve block space for encrypted txs + let mut alloc = BsaWrapperTxs::init(BLOCK_SIZE); + + // allocate ~1/3 of the block space to encrypted txs + assert!(alloc.try_alloc(&[0; 18]).is_ok()); + + // reserve block space for decrypted txs + let mut alloc = alloc.next_state(); + + // the space we allotted to encrypted txs was shrunk to + // the total space we actually used up + assert_eq!(alloc.encrypted_txs.allotted_space_in_bytes, 18); + + // check that the allotted space for decrypted txs is correct + assert_eq!( + alloc.decrypted_txs.allotted_space_in_bytes, + BLOCK_SIZE - 18 + ); + + // add about ~1/3 worth of decrypted txs + assert!(alloc.try_alloc(&[0; 17]).is_ok()); + + // reserve block space for protocol txs + let mut alloc = alloc.next_state(); + + // check that space was shrunk + assert_eq!( + alloc.protocol_txs.allotted_space_in_bytes, + BLOCK_SIZE - (18 + 17) + ); + + // add protocol txs to the block space allocator + assert!(alloc.try_alloc(&[0; 25]).is_ok()); + + // the block should be full at this point + assert_matches!( + alloc.try_alloc(&[0; 1]), + Err(AllocFailure::Rejected { .. }) + ); + } + + // Test that we cannot include encrypted txs in a block + // when the state invariants banish them from inclusion. + #[test] + fn test_encrypted_txs_are_rejected() { + let mut alloc = BsaNoWrapperTxs::init(1234); + assert_matches!( + alloc.try_alloc(&[0; 1]), + Err(AllocFailure::Rejected { .. }) + ); + } + + proptest! { + /// Check if we reject a tx when its respective bin + /// capacity has been reached on a [`BlockSpaceAllocator`]. + #[test] + fn test_reject_tx_on_bin_cap_reached(max in prop::num::u64::ANY) { + proptest_reject_tx_on_bin_cap_reached(max) + } + + /// Check if the initial bin capcity of the [`BlockSpaceAllocator`] + /// is correct. + #[test] + fn test_initial_bin_capacity(max in prop::num::u64::ANY) { + proptest_initial_bin_capacity(max) + } + + /// Test that dumping txs whose total combined size + /// is less than the bin cap does not fill up the bin. + #[test] + fn test_tx_dump_doesnt_fill_up_bin(args in arb_transactions()) { + proptest_tx_dump_doesnt_fill_up_bin(args) + } + } + + /// Implementation of [`test_reject_tx_on_bin_cap_reached`]. + fn proptest_reject_tx_on_bin_cap_reached( + tendermint_max_block_space_in_bytes: u64, + ) { + let mut bins = BsaWrapperTxs::init(tendermint_max_block_space_in_bytes); + + // fill the entire bin of encrypted txs + bins.encrypted_txs.occupied_space_in_bytes = + bins.encrypted_txs.allotted_space_in_bytes; + + // make sure we can't dump any new encrypted txs in the bin + assert_matches!( + bins.try_alloc(b"arbitrary tx bytes"), + Err(AllocFailure::Rejected { .. }) + ); + } + + /// Implementation of [`test_initial_bin_capacity`]. + fn proptest_initial_bin_capacity(tendermint_max_block_space_in_bytes: u64) { + let bins = BsaWrapperTxs::init(tendermint_max_block_space_in_bytes); + let expected = tendermint_max_block_space_in_bytes + - threshold::ONE_THIRD.over(tendermint_max_block_space_in_bytes); + assert_eq!(expected, bins.uninitialized_space_in_bytes()); + } + + /// Implementation of [`test_tx_dump_doesnt_fill_up_bin`]. + fn proptest_tx_dump_doesnt_fill_up_bin(args: PropTx) { + let PropTx { + tendermint_max_block_space_in_bytes, + protocol_txs, + encrypted_txs, + decrypted_txs, + } = args; + + // produce new txs until the moment we would have + // filled up the bins. + // + // iterate over the produced txs to make sure we can keep + // dumping new txs without filling up the bins + + let bins = RefCell::new(BsaWrapperTxs::init( + tendermint_max_block_space_in_bytes, + )); + let encrypted_txs = encrypted_txs.into_iter().take_while(|tx| { + let bin = bins.borrow().encrypted_txs; + let new_size = bin.occupied_space_in_bytes + tx.len() as u64; + new_size < bin.allotted_space_in_bytes + }); + for tx in encrypted_txs { + assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + } + + let bins = RefCell::new(bins.into_inner().next_state()); + let decrypted_txs = decrypted_txs.into_iter().take_while(|tx| { + let bin = bins.borrow().decrypted_txs; + let new_size = bin.occupied_space_in_bytes + tx.len() as u64; + new_size < bin.allotted_space_in_bytes + }); + for tx in decrypted_txs { + assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + } + + let bins = RefCell::new(bins.into_inner().next_state()); + let protocol_txs = protocol_txs.into_iter().take_while(|tx| { + let bin = bins.borrow().protocol_txs; + let new_size = bin.occupied_space_in_bytes + tx.len() as u64; + new_size < bin.allotted_space_in_bytes + }); + for tx in protocol_txs { + assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + } + } + + prop_compose! { + /// Generate arbitrarily sized txs of different kinds. + fn arb_transactions() + // create base strategies + ( + (tendermint_max_block_space_in_bytes, protocol_tx_max_bin_size, encrypted_tx_max_bin_size, + decrypted_tx_max_bin_size) in arb_max_bin_sizes(), + ) + // compose strategies + ( + tendermint_max_block_space_in_bytes in Just(tendermint_max_block_space_in_bytes), + protocol_txs in arb_tx_list(protocol_tx_max_bin_size), + encrypted_txs in arb_tx_list(encrypted_tx_max_bin_size), + decrypted_txs in arb_tx_list(decrypted_tx_max_bin_size), + ) + -> PropTx { + PropTx { + tendermint_max_block_space_in_bytes, + protocol_txs, + encrypted_txs, + decrypted_txs, + } + } + } + + /// Return random bin sizes for a [`BlockSpaceAllocator`]. + fn arb_max_bin_sizes() -> impl Strategy + { + const MAX_BLOCK_SIZE_BYTES: u64 = 1000; + (1..=MAX_BLOCK_SIZE_BYTES).prop_map( + |tendermint_max_block_space_in_bytes| { + ( + tendermint_max_block_space_in_bytes, + threshold::ONE_THIRD + .over(tendermint_max_block_space_in_bytes) + as usize, + threshold::ONE_THIRD + .over(tendermint_max_block_space_in_bytes) + as usize, + threshold::ONE_THIRD + .over(tendermint_max_block_space_in_bytes) + as usize, + ) + }, + ) + } + + /// Return a list of txs. + fn arb_tx_list(max_bin_size: usize) -> impl Strategy>> { + const MAX_TX_NUM: usize = 64; + let tx = prop::collection::vec(prop::num::u8::ANY, 0..=max_bin_size); + prop::collection::vec(tx, 0..=MAX_TX_NUM) + } +} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states.rs new file mode 100644 index 0000000000..18712998e3 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states.rs @@ -0,0 +1,119 @@ +//! All the states of the [`BlockSpaceAllocator`] state machine, +//! over the extent of a Tendermint consensus round +//! block proposal. +//! +//! # States +//! +//! The state machine moves through the following state DAG: +//! +//! 1. [`BuildingEncryptedTxBatch`] - the initial state. In +//! this state, we populate a block with DKG encrypted txs. +//! This state supports two modes of operation, which you can +//! think of as two sub-states: +//! * [`WithoutEncryptedTxs`] - When this mode is active, no encrypted txs are +//! included in a block proposal. +//! * [`WithEncryptedTxs`] - When this mode is active, we are able to include +//! encrypted txs in a block proposal. +//! 2. [`BuildingDecryptedTxBatch`] - the second state. In +//! this state, we populate a block with DKG decrypted txs. +//! 3. [`BuildingProtocolTxBatch`] - the third state. In +//! this state, we populate a block with protocol txs. + +mod decrypted_txs; +mod encrypted_txs; +mod protocol_txs; + +use super::{AllocFailure, BlockSpaceAllocator}; + +/// Convenience wrapper for a [`BlockSpaceAllocator`] state that allocates +/// encrypted transactions. +#[allow(dead_code)] +pub enum EncryptedTxBatchAllocator { + WithEncryptedTxs( + BlockSpaceAllocator>, + ), + WithoutEncryptedTxs( + BlockSpaceAllocator>, + ), +} + +/// The leader of the current Tendermint round is building +/// a new batch of DKG decrypted transactions. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum BuildingDecryptedTxBatch {} + +/// The leader of the current Tendermint round is building +/// a new batch of Namada protocol transactions. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum BuildingProtocolTxBatch {} + +/// The leader of the current Tendermint round is building +/// a new batch of DKG encrypted transactions. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub struct BuildingEncryptedTxBatch { + /// One of [`WithEncryptedTxs`] and [`WithoutEncryptedTxs`]. + _mode: Mode, +} + +/// Allow block proposals to include encrypted txs. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum WithEncryptedTxs {} + +/// Prohibit block proposals from including encrypted txs. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum WithoutEncryptedTxs {} + +/// Try to allocate a new transaction on a [`BlockSpaceAllocator`] state. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait TryAlloc { + /// Try to allocate space for a new transaction. + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure>; +} + +/// Represents a state transition in the [`BlockSpaceAllocator`] state machine. +/// +/// This trait should not be used directly. Instead, consider using one of +/// [`NextState`], [`NextStateWithEncryptedTxs`] or +/// [`NextStateWithoutEncryptedTxs`]. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait NextStateImpl { + /// The next state in the [`BlockSpaceAllocator`] state machine. + type Next; + + /// Transition to the next state in the [`BlockSpaceAllocator`] state + /// machine. + fn next_state_impl(self) -> Self::Next; +} + +/// Convenience extension of [`NextStateImpl`], to transition to a new +/// state with a null transition function. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait NextState: NextStateImpl { + /// Transition to the next state in the [`BlockSpaceAllocator`] state, + /// using a null transiiton function. + #[inline] + fn next_state(self) -> Self::Next + where + Self: Sized, + { + self.next_state_impl() + } +} + +impl NextState for S where S: NextStateImpl {} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states/decrypted_txs.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states/decrypted_txs.rs new file mode 100644 index 0000000000..7fd15671a3 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states/decrypted_txs.rs @@ -0,0 +1,43 @@ +use std::marker::PhantomData; + +use super::super::{AllocFailure, BlockSpaceAllocator, TxBin}; +use super::{ + BuildingDecryptedTxBatch, BuildingProtocolTxBatch, NextStateImpl, TryAlloc, +}; + +impl TryAlloc for BlockSpaceAllocator { + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + self.decrypted_txs.try_dump(tx) + } +} + +impl NextStateImpl for BlockSpaceAllocator { + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(mut self) -> Self::Next { + self.decrypted_txs.shrink_to_fit(); + + // the remaining space is allocated to protocol txs + let remaining_free_space = self.uninitialized_space_in_bytes(); + self.protocol_txs = TxBin::init(remaining_free_space); + + // cast state + let Self { + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + .. + } = self; + + BlockSpaceAllocator { + _state: PhantomData, + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + } + } +} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states/encrypted_txs.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states/encrypted_txs.rs new file mode 100644 index 0000000000..f5fb2447ff --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states/encrypted_txs.rs @@ -0,0 +1,110 @@ +use std::marker::PhantomData; + +use super::super::{AllocFailure, BlockSpaceAllocator, TxBin}; +use super::{ + BuildingDecryptedTxBatch, BuildingEncryptedTxBatch, + EncryptedTxBatchAllocator, NextStateImpl, TryAlloc, WithEncryptedTxs, + WithoutEncryptedTxs, +}; + +impl TryAlloc + for BlockSpaceAllocator> +{ + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + self.encrypted_txs.try_dump(tx) + } +} + +impl NextStateImpl + for BlockSpaceAllocator> +{ + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(self) -> Self::Next { + next_state(self) + } +} + +impl TryAlloc + for BlockSpaceAllocator> +{ + #[inline] + fn try_alloc(&mut self, _tx: &[u8]) -> Result<(), AllocFailure> { + Err(AllocFailure::Rejected { bin_space_left: 0 }) + } +} + +impl NextStateImpl + for BlockSpaceAllocator> +{ + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(self) -> Self::Next { + next_state(self) + } +} + +#[inline] +fn next_state( + mut alloc: BlockSpaceAllocator>, +) -> BlockSpaceAllocator { + alloc.encrypted_txs.shrink_to_fit(); + + // decrypted txs can use as much space as they need - which + // in practice will only be, at most, 1/3 of the block space + // used by encrypted txs at the prev height + let remaining_free_space = alloc.uninitialized_space_in_bytes(); + alloc.decrypted_txs = TxBin::init(remaining_free_space); + + // cast state + let BlockSpaceAllocator { + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + .. + } = alloc; + + BlockSpaceAllocator { + _state: PhantomData, + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + } +} + +impl TryAlloc for EncryptedTxBatchAllocator { + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + match self { + EncryptedTxBatchAllocator::WithEncryptedTxs(state) => { + state.try_alloc(tx) + } + EncryptedTxBatchAllocator::WithoutEncryptedTxs(state) => { + // NOTE: this operation will cause the allocator to + // run out of memory immediately + state.try_alloc(tx) + } + } + } +} + +impl NextStateImpl for EncryptedTxBatchAllocator { + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(self) -> Self::Next { + match self { + EncryptedTxBatchAllocator::WithEncryptedTxs(state) => { + state.next_state_impl() + } + EncryptedTxBatchAllocator::WithoutEncryptedTxs(state) => { + state.next_state_impl() + } + } + } +} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states/protocol_txs.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states/protocol_txs.rs new file mode 100644 index 0000000000..bc31717ddd --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states/protocol_txs.rs @@ -0,0 +1,9 @@ +use super::super::{AllocFailure, BlockSpaceAllocator}; +use super::{BuildingProtocolTxBatch, TryAlloc}; + +impl TryAlloc for BlockSpaceAllocator { + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + self.protocol_txs.try_dump(tx) + } +} diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 6b4b05b5ad..f90dde5862 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -5,6 +5,7 @@ //! and [`Shell::process_proposal`] must be also reverted //! (unless we can simply overwrite them in the next block). //! More info in . +mod block_space_alloc; mod finalize_block; mod governance; mod init_chain; @@ -120,7 +121,7 @@ impl From for TxResult { /// The different error codes that the ledger may /// send back to a client indicating the status /// of their submitted tx -#[derive(Debug, Clone, FromPrimitive, ToPrimitive, PartialEq)] +#[derive(Debug, Copy, Clone, FromPrimitive, ToPrimitive, PartialEq)] pub enum ErrorCodes { Ok = 0, InvalidTx = 1, @@ -129,6 +130,21 @@ pub enum ErrorCodes { InvalidOrder = 4, ExtraTxs = 5, Undecryptable = 6, + AllocationError = 7, +} + +impl ErrorCodes { + /// Checks if the given [`ErrorCodes`] value is a protocol level error, + /// that can be recovered from at the finalize block stage. + pub const fn is_recoverable(&self) -> bool { + use ErrorCodes::*; + // NOTE: pattern match on all `ErrorCodes` variants, in order + // to catch potential bugs when adding new codes + match self { + Ok | InvalidTx | InvalidSig | WasmRuntimeError => true, + InvalidOrder | ExtraTxs | Undecryptable | AllocationError => false, + } + } } impl From for u32 { diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 1783a127fd..cf73517eb9 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -1,6 +1,8 @@ //! Implementation of the [`RequestPrepareProposal`] ABCI++ method for the Shell +use namada::core::hints; use namada::ledger::storage::{DBIter, StorageHasher, DB}; +use namada::proof_of_stake::pos_queries::PosQueries; use namada::proto::Tx; use namada::types::internal::WrapperTxInQueue; use namada::types::transaction::tx_types::TxType; @@ -8,18 +10,18 @@ use namada::types::transaction::wrapper::wrapper_tx::PairingEngine; use namada::types::transaction::{AffineCurve, DecryptedTx, EllipticCurve}; use super::super::*; -use crate::facade::tendermint_proto::abci::RequestPrepareProposal; +#[allow(unused_imports)] +use super::block_space_alloc; +use super::block_space_alloc::states::{ + BuildingDecryptedTxBatch, BuildingProtocolTxBatch, + EncryptedTxBatchAllocator, NextState, TryAlloc, +}; +use super::block_space_alloc::{AllocFailure, BlockSpaceAllocator}; #[cfg(feature = "abcipp")] -use crate::facade::tendermint_proto::abci::{tx_record::TxAction, TxRecord}; +use crate::facade::tendermint_proto::abci::ExtendedCommitInfo; +use crate::facade::tendermint_proto::abci::RequestPrepareProposal; use crate::node::ledger::shell::{process_tx, ShellMode}; -use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; - -// TODO: remove this hard-coded value; Tendermint, and thus -// Namada uses 20 MiB max block sizes by default; 5 MiB leaves -// plenty of room for header data, evidence and protobuf serialization -// overhead -const MAX_PROPOSAL_SIZE: usize = 5 << 20; -const HALF_MAX_PROPOSAL_SIZE: usize = MAX_PROPOSAL_SIZE / 2; +use crate::node::ledger::shims::abcipp_shim_types::shim::{response, TxBytes}; impl Shell where @@ -28,10 +30,8 @@ where { /// Begin a new block. /// - /// We fill half the block space with new wrapper txs given to us - /// from the mempool by tendermint. The rest of the block is filled - /// with decryptions of the wrapper txs from the previously - /// committed block. + /// Block construction is documented in [`block_space_alloc`] + /// and [`block_space_alloc::states`]. /// /// INVARIANT: Any changes applied in this method must be reverted if /// the proposal is rejected (unless we can simply overwrite @@ -41,64 +41,155 @@ where req: RequestPrepareProposal, ) -> response::PrepareProposal { let txs = if let ShellMode::Validator { .. } = self.mode { - // TODO: This should not be hardcoded - let privkey = ::G2Affine::prime_subgroup_generator(); + // start counting allotted space for txs + let alloc = self.get_encrypted_txs_allocator(); - // TODO: Craft the Ethereum state update tx - // filter in half of the new txs from Tendermint, only keeping - // wrappers - let mut total_proposal_size = 0; - #[cfg(feature = "abcipp")] - let mut txs: Vec = req - .txs - .into_iter() - .map(|tx_bytes| { - if let Ok(Ok(TxType::Wrapper(_))) = - Tx::try_from(tx_bytes.as_slice()).map(process_tx) - { - record::keep(tx_bytes) - } else { - record::remove(tx_bytes) - } - }) - .take_while(|tx_record| { - let new_size = total_proposal_size + tx_record.tx.len(); - if new_size > HALF_MAX_PROPOSAL_SIZE - || tx_record.action != TxAction::Unmodified as i32 - { - false - } else { - total_proposal_size = new_size; - true - } - }) - .collect(); - #[cfg(not(feature = "abcipp"))] - let mut txs: Vec = req - .txs - .into_iter() - .filter_map(|tx_bytes| { - if let Ok(Ok(TxType::Wrapper(_))) = - Tx::try_from(tx_bytes.as_slice()).map(process_tx) - { - Some(tx_bytes) - } else { - None - } - }) - .take_while(|tx_bytes| { - let new_size = total_proposal_size + tx_bytes.len(); - if new_size > HALF_MAX_PROPOSAL_SIZE { - false - } else { - total_proposal_size = new_size; - true - } - }) - .collect(); + // add encrypted txs + let (encrypted_txs, alloc) = + self.build_encrypted_txs(alloc, &req.txs); + let mut txs = encrypted_txs; // decrypt the wrapper txs included in the previous block - let decrypted_txs = self.wl_storage.storage.tx_queue.iter().map( + let (mut decrypted_txs, alloc) = self.build_decrypted_txs(alloc); + txs.append(&mut decrypted_txs); + + // add vote extension protocol txs + let mut protocol_txs = self.build_protocol_txs( + alloc, + #[cfg(feature = "abcipp")] + req.local_last_commit, + #[cfg(not(feature = "abcipp"))] + &req.txs, + ); + txs.append(&mut protocol_txs); + + txs + } else { + vec![] + }; + + tracing::info!( + height = req.height, + num_of_txs = txs.len(), + "Proposing block" + ); + + response::PrepareProposal { txs } + } + + /// Depending on the current block height offset within the epoch, + /// transition state accordingly, return a block space allocator + /// with or without encrypted txs. + /// + /// # How to determine which path to take in the states DAG + /// + /// If we are at the second or third block height offset within an + /// epoch, we do not allow encrypted transactions to be included in + /// a block, therefore we return an allocator wrapped in an + /// [`EncryptedTxBatchAllocator::WithoutEncryptedTxs`] value. + /// Otherwise, we return an allocator wrapped in an + /// [`EncryptedTxBatchAllocator::WithEncryptedTxs`] value. + #[inline] + fn get_encrypted_txs_allocator(&self) -> EncryptedTxBatchAllocator { + let pos_queries = self.wl_storage.pos_queries(); + + let is_2nd_height_off = pos_queries.is_deciding_offset_within_epoch(1); + let is_3rd_height_off = pos_queries.is_deciding_offset_within_epoch(2); + + if hints::unlikely(is_2nd_height_off || is_3rd_height_off) { + tracing::warn!( + proposal_height = + ?pos_queries.get_current_decision_height(), + "No mempool txs are being included in the current proposal" + ); + EncryptedTxBatchAllocator::WithoutEncryptedTxs( + (&self.wl_storage).into(), + ) + } else { + EncryptedTxBatchAllocator::WithEncryptedTxs( + (&self.wl_storage).into(), + ) + } + } + + /// Builds a batch of encrypted transactions, retrieved from + /// Tendermint's mempool. + fn build_encrypted_txs( + &self, + mut alloc: EncryptedTxBatchAllocator, + txs: &[TxBytes], + ) -> (Vec, BlockSpaceAllocator) { + let pos_queries = self.wl_storage.pos_queries(); + let txs = txs + .iter() + .filter_map(|tx_bytes| { + if let Ok(Ok(TxType::Wrapper(_))) = + Tx::try_from(tx_bytes.as_slice()).map(process_tx) + { + Some(tx_bytes.clone()) + } else { + None + } + }) + .take_while(|tx_bytes| { + alloc.try_alloc(&tx_bytes[..]) + .map_or_else( + |status| match status { + AllocFailure::Rejected { bin_space_left } => { + tracing::debug!( + tx_bytes_len = tx_bytes.len(), + bin_space_left, + proposal_height = + ?pos_queries.get_current_decision_height(), + "Dropping encrypted tx from the current proposal", + ); + false + } + AllocFailure::OverflowsBin { bin_size } => { + // TODO: handle tx whose size is greater + // than bin size + tracing::warn!( + tx_bytes_len = tx_bytes.len(), + bin_size, + proposal_height = + ?pos_queries.get_current_decision_height(), + "Dropping large encrypted tx from the current proposal", + ); + true + } + }, + |()| true, + ) + }) + .collect(); + let alloc = alloc.next_state(); + + (txs, alloc) + } + + /// Builds a batch of DKG decrypted transactions. + // NOTE: we won't have frontrunning protection until V2 of the + // Anoma protocol; Namada runs V1, therefore this method is + // essentially a NOOP + // + // sources: + // - https://specs.namada.net/main/releases/v2.html + // - https://github.com/anoma/ferveo + fn build_decrypted_txs( + &self, + mut alloc: BlockSpaceAllocator, + ) -> (Vec, BlockSpaceAllocator) { + // TODO: This should not be hardcoded + let privkey = + ::G2Affine::prime_subgroup_generator(); + + let pos_queries = self.wl_storage.pos_queries(); + let txs = self + .wl_storage + .storage + .tx_queue + .iter() + .map( |WrapperTxInQueue { tx, #[cfg(not(feature = "mainnet"))] @@ -114,63 +205,52 @@ where }) .to_bytes() }, - ); - #[cfg(feature = "abcipp")] - let mut decrypted_txs: Vec<_> = - decrypted_txs.map(record::add).collect(); - #[cfg(not(feature = "abcipp"))] - let mut decrypted_txs: Vec<_> = decrypted_txs.collect(); - - txs.append(&mut decrypted_txs); - txs - } else { - vec![] - }; - - #[cfg(feature = "abcipp")] - { - response::PrepareProposal { - tx_records: txs, - ..Default::default() - } - } - #[cfg(not(feature = "abcipp"))] - { - response::PrepareProposal { txs } - } - } -} - -/// Functions for creating the appropriate TxRecord given the -/// numeric code -#[cfg(feature = "abcipp")] -pub(super) mod record { - use super::*; - - /// Keep this transaction in the proposal - pub fn keep(tx: TxBytes) -> TxRecord { - TxRecord { - action: TxAction::Unmodified as i32, - tx, - } - } + ) + // TODO: make sure all decrypted txs are accepted + .take_while(|tx_bytes| { + alloc.try_alloc(&tx_bytes[..]).map_or_else( + |status| match status { + AllocFailure::Rejected { bin_space_left } => { + tracing::warn!( + tx_bytes_len = tx_bytes.len(), + bin_space_left, + proposal_height = + ?pos_queries.get_current_decision_height(), + "Dropping decrypted tx from the current proposal", + ); + false + } + AllocFailure::OverflowsBin { bin_size } => { + tracing::warn!( + tx_bytes_len = tx_bytes.len(), + bin_size, + proposal_height = + ?pos_queries.get_current_decision_height(), + "Dropping large decrypted tx from the current proposal", + ); + true + } + }, + |()| true, + ) + }) + .collect(); + let alloc = alloc.next_state(); - /// A transaction added to the proposal not provided by - /// Tendermint from the mempool - pub fn add(tx: TxBytes) -> TxRecord { - TxRecord { - action: TxAction::Added as i32, - tx, - } + (txs, alloc) } - /// Remove this transaction from the set provided - /// by Tendermint from the mempool - pub fn remove(tx: TxBytes) -> TxRecord { - TxRecord { - action: TxAction::Removed as i32, - tx, - } + /// Builds a batch of protocol transactions. + fn build_protocol_txs( + &self, + _alloc: BlockSpaceAllocator, + #[cfg(feature = "abcipp")] _local_last_commit: Option< + ExtendedCommitInfo, + >, + #[cfg(not(feature = "abcipp"))] _txs: &[TxBytes], + ) -> Vec { + // no protocol txs are implemented yet + vec![] } } @@ -181,29 +261,22 @@ mod test_prepare_proposal { use namada::types::transaction::{Fee, WrapperTx}; use super::*; - use crate::node::ledger::shell::test_utils::{gen_keypair, TestShell}; + use crate::node::ledger::shell::test_utils::{self, gen_keypair}; /// Test that if a tx from the mempool is not a /// WrapperTx type, it is not included in the /// proposed block. #[test] fn test_prepare_proposal_rejects_non_wrapper_tx() { - let (shell, _) = TestShell::new(); + let (shell, _) = test_utils::setup(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction_data".as_bytes().to_owned()), ); let req = RequestPrepareProposal { txs: vec![tx.to_bytes()], - max_tx_bytes: 0, ..Default::default() }; - #[cfg(feature = "abcipp")] - assert_eq!( - shell.prepare_proposal(req).tx_records, - vec![record::remove(tx.to_bytes())] - ); - #[cfg(not(feature = "abcipp"))] assert!(shell.prepare_proposal(req).txs.is_empty()); } @@ -212,7 +285,7 @@ mod test_prepare_proposal { /// we simply exclude it from the proposal #[test] fn test_error_in_processing_tx() { - let (shell, _) = TestShell::new(); + let (shell, _) = test_utils::setup(); let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), @@ -243,15 +316,8 @@ mod test_prepare_proposal { #[allow(clippy::redundant_clone)] let req = RequestPrepareProposal { txs: vec![wrapper.clone()], - max_tx_bytes: 0, ..Default::default() }; - #[cfg(feature = "abcipp")] - assert_eq!( - shell.prepare_proposal(req).tx_records, - vec![record::remove(wrapper)] - ); - #[cfg(not(feature = "abcipp"))] assert!(shell.prepare_proposal(req).txs.is_empty()); } @@ -260,14 +326,13 @@ mod test_prepare_proposal { /// corresponding wrappers #[test] fn test_decrypted_txs_in_correct_order() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(); let keypair = gen_keypair(); let mut expected_wrapper = vec![]; let mut expected_decrypted = vec![]; let mut req = RequestPrepareProposal { txs: vec![], - max_tx_bytes: 0, ..Default::default() }; // create a request with two new wrappers from mempool and @@ -300,58 +365,26 @@ mod test_prepare_proposal { expected_wrapper.push(wrapper.clone()); req.txs.push(wrapper.to_bytes()); } - // we extract the inner data from the txs for testing - // equality since otherwise changes in timestamps would - // fail the test - expected_wrapper.append(&mut expected_decrypted); - let expected_txs: Vec> = expected_wrapper - .iter() - .map(|tx| tx.data.clone().expect("Test failed")) + let expected_txs: Vec = expected_wrapper + .into_iter() + .chain(expected_decrypted.into_iter()) + // we extract the inner data from the txs for testing + // equality since otherwise changes in timestamps would + // fail the test + .map(|tx| tx.data.expect("Test failed")) .collect(); - #[cfg(feature = "abcipp")] - { - let received: Vec> = shell - .prepare_proposal(req) - .tx_records - .iter() - .filter_map( - |TxRecord { - tx: tx_bytes, - action, - }| { - if *action == (TxAction::Unmodified as i32) - || *action == (TxAction::Added as i32) - { - Some( - Tx::try_from(tx_bytes.as_slice()) - .expect("Test failed") - .data - .expect("Test failed"), - ) - } else { - None - } - }, - ) - .collect(); - // check that the order of the txs is correct - assert_eq!(received, expected_txs); - } - #[cfg(not(feature = "abcipp"))] - { - let received: Vec> = shell - .prepare_proposal(req) - .txs - .into_iter() - .map(|tx_bytes| { - Tx::try_from(tx_bytes.as_slice()) - .expect("Test failed") - .data - .expect("Test failed") - }) - .collect(); - // check that the order of the txs is correct - assert_eq!(received, expected_txs); - } + let received: Vec = shell + .prepare_proposal(req) + .txs + .into_iter() + .map(|tx_bytes| { + Tx::try_from(tx_bytes.as_slice()) + .expect("Test failed") + .data + .expect("Test failed") + }) + .collect(); + // check that the order of the txs is correct + assert_eq!(received, expected_txs); } } diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 11deec9e13..90c626d1b2 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -1,12 +1,58 @@ //! Implementation of the ['VerifyHeader`], [`ProcessProposal`], //! and [`RevertProposal`] ABCI++ methods for the Shell +use data_encoding::HEXUPPER; +use namada::core::hints; +use namada::core::ledger::storage::WlStorage; +use namada::proof_of_stake::pos_queries::PosQueries; use namada::types::internal::WrapperTxInQueue; use super::*; use crate::facade::tendermint_proto::abci::response_process_proposal::ProposalStatus; use crate::facade::tendermint_proto::abci::RequestProcessProposal; +use crate::node::ledger::shell::block_space_alloc::{ + threshold, AllocFailure, TxBin, +}; use crate::node::ledger::shims::abcipp_shim_types::shim::response::ProcessProposal; +use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; + +/// Validation metadata, to keep track of used resources or +/// transaction numbers, in a block proposal. +#[derive(Default)] +pub struct ValidationMeta { + /// Space utilized by encrypted txs. + pub encrypted_txs_bin: TxBin, + /// Space utilized by all txs. + pub txs_bin: TxBin, + /// Check if the decrypted tx queue has any elements + /// left. + /// + /// This field will only evaluate to true if a block + /// proposer didn't include all decrypted txs in a block. + pub decrypted_queue_has_remaining_txs: bool, + /// Check if a block has decrypted txs. + pub has_decrypted_txs: bool, +} + +impl From<&WlStorage> for ValidationMeta +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn from(storage: &WlStorage) -> Self { + let max_proposal_bytes = + storage.pos_queries().get_max_proposal_bytes().get(); + let encrypted_txs_bin = + TxBin::init_over_ratio(max_proposal_bytes, threshold::ONE_THIRD); + let txs_bin = TxBin::init(max_proposal_bytes); + Self { + decrypted_queue_has_remaining_txs: false, + has_decrypted_txs: false, + encrypted_txs_bin, + txs_bin, + } + } +} impl Shell where @@ -25,30 +71,79 @@ where /// but we only reject the entire block if the order of the /// included txs violates the order decided upon in the previous /// block. + // TODO: add block space alloc validation logic to ProcessProposal pub fn process_proposal( &self, req: RequestProcessProposal, ) -> ProcessProposal { - let tx_results = self.process_txs(&req.txs); + let (tx_results, metadata) = self.process_txs(&req.txs); + + // Erroneous transactions were detected when processing + // the leader's proposal. We allow txs that do not + // deserialize properly, that have invalid signatures + // and that have invalid wasm code to reach FinalizeBlock. + let invalid_txs = tx_results.iter().any(|res| { + let error = ErrorCodes::from_u32(res.code).expect( + "All error codes returned from process_single_tx are valid", + ); + !error.is_recoverable() + }); + if invalid_txs { + tracing::warn!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + "Found invalid transactions, proposed block will be rejected" + ); + } + + let has_remaining_decrypted_txs = + metadata.decrypted_queue_has_remaining_txs; + if has_remaining_decrypted_txs { + tracing::warn!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + "Not all decrypted txs from the previous height were included in + the proposal, the block will be rejected" + ); + } + + let will_reject_proposal = invalid_txs || has_remaining_decrypted_txs; + + let status = if will_reject_proposal { + ProposalStatus::Reject + } else { + ProposalStatus::Accept + }; ProcessProposal { - status: if tx_results.iter().any(|res| res.code > 3) { - ProposalStatus::Reject as i32 - } else { - ProposalStatus::Accept as i32 - }, + status: status as i32, tx_results, } } /// Check all the given txs. - pub fn process_txs(&self, txs: &[Vec]) -> Vec { + pub fn process_txs( + &self, + txs: &[TxBytes], + ) -> (Vec, ValidationMeta) { let mut tx_queue_iter = self.wl_storage.storage.tx_queue.iter(); - txs.iter() + let mut metadata = ValidationMeta::from(&self.wl_storage); + let tx_results = txs + .iter() .map(|tx_bytes| { - self.process_single_tx(tx_bytes, &mut tx_queue_iter) + self.process_single_tx( + tx_bytes, + &mut tx_queue_iter, + &mut metadata, + ) }) - .collect() + .collect(); + metadata.decrypted_queue_has_remaining_txs = + !self.wl_storage.storage.tx_queue.is_empty() + && tx_queue_iter.next().is_some(); + (tx_results, metadata) } /// Checks if the Tx can be deserialized from bytes. Checks the fees and @@ -65,6 +160,8 @@ where /// 3: Wasm runtime error /// 4: Invalid order of decrypted txs /// 5. More decrypted txs than expected + /// 6. A transaction could not be decrypted + /// 7. Not enough block space was available for some tx /// /// INVARIANT: Any changes applied in this method must be reverted if the /// proposal is rejected (unless we can simply overwrite them in the @@ -73,41 +170,74 @@ where &self, tx_bytes: &[u8], tx_queue_iter: &mut impl Iterator, + metadata: &mut ValidationMeta, ) -> TxResult { - let tx = match Tx::try_from(tx_bytes) { - Ok(tx) => tx, - Err(_) => { - return TxResult { + // try to allocate space for this tx + if let Err(e) = metadata.txs_bin.try_dump(tx_bytes) { + return TxResult { + code: ErrorCodes::AllocationError.into(), + info: match e { + AllocFailure::Rejected { .. } => { + "No more space left in the block" + } + AllocFailure::OverflowsBin { .. } => { + "The given tx is larger than the max configured \ + proposal size" + } + } + .into(), + }; + } + + let maybe_tx = Tx::try_from(tx_bytes).map_or_else( + |err| { + tracing::debug!( + ?err, + "Couldn't deserialize transaction received during \ + PrepareProposal" + ); + Err(TxResult { code: ErrorCodes::InvalidTx.into(), info: "The submitted transaction was not deserializable" .into(), - }; - } + }) + }, + |tx| { + process_tx(tx).map_err(|err| { + // This occurs if the wrapper / protocol tx signature is + // invalid + TxResult { + code: ErrorCodes::InvalidSig.into(), + info: err.to_string(), + } + }) + }, + ); + let tx = match maybe_tx { + Ok(tx) => tx, + Err(tx_result) => return tx_result, }; + // TODO: This should not be hardcoded let privkey = ::G2Affine::prime_subgroup_generator(); - match process_tx(tx) { - // This occurs if the wrapper / protocol tx signature is invalid - Err(err) => TxResult { - code: ErrorCodes::InvalidSig.into(), - info: err.to_string(), + match tx { + // If it is a raw transaction, we do no further validation + TxType::Raw(_) => TxResult { + code: ErrorCodes::InvalidTx.into(), + info: "Transaction rejected: Non-encrypted transactions are \ + not supported" + .into(), }, - Ok(result) => match result { - // If it is a raw transaction, we do no further validation - TxType::Raw(_) => TxResult { - code: ErrorCodes::InvalidTx.into(), - info: "Transaction rejected: Non-encrypted transactions \ - are not supported" - .into(), - }, - TxType::Protocol(_) => TxResult { - code: ErrorCodes::InvalidTx.into(), - info: "Protocol transactions are a fun new feature that \ - is coming soon to a blockchain near you. Patience." - .into(), - }, - TxType::Decrypted(tx) => match tx_queue_iter.next() { + TxType::Protocol(_) => TxResult { + code: ErrorCodes::InvalidTx.into(), + info: "Protocol transactions are a fun new feature that is \ + coming soon to a blockchain near you. Patience." + .into(), + }, + TxType::Decrypted(tx) => { + metadata.has_decrypted_txs = true; + match tx_queue_iter.next() { Some(WrapperTxInQueue { tx: wrapper, #[cfg(not(feature = "mainnet"))] @@ -142,59 +272,89 @@ where info: "Received more decrypted txs than expected" .into(), }, - }, - TxType::Wrapper(tx) => { - // validate the ciphertext via Ferveo - if !tx.validate_ciphertext() { + } + } + TxType::Wrapper(tx) => { + // decrypted txs shouldn't show up before wrapper txs + if metadata.has_decrypted_txs { + return TxResult { + code: ErrorCodes::InvalidTx.into(), + info: "Decrypted txs should not be proposed before \ + wrapper txs" + .into(), + }; + } + // try to allocate space for this encrypted tx + if let Err(e) = metadata.encrypted_txs_bin.try_dump(tx_bytes) { + return TxResult { + code: ErrorCodes::AllocationError.into(), + info: match e { + AllocFailure::Rejected { .. } => { + "No more space left in the block for wrapper \ + txs" + } + AllocFailure::OverflowsBin { .. } => { + "The given wrapper tx is larger than 1/3 of \ + the available block space" + } + } + .into(), + }; + } + if hints::unlikely(self.encrypted_txs_not_allowed()) { + return TxResult { + code: ErrorCodes::AllocationError.into(), + info: "Wrapper txs not allowed at the current block \ + height" + .into(), + }; + } + + // validate the ciphertext via Ferveo + if !tx.validate_ciphertext() { + TxResult { + code: ErrorCodes::InvalidTx.into(), + info: format!( + "The ciphertext of the wrapped tx {} is invalid", + hash_tx(tx_bytes) + ), + } + } else { + // If the public key corresponds to the MASP sentinel + // transaction key, then the fee payer is effectively + // the MASP, otherwise derive + // they payer from public key. + let fee_payer = if tx.pk != masp_tx_key().ref_to() { + tx.fee_payer() + } else { + masp() + }; + // check that the fee payer has sufficient balance + let balance = self.get_balance(&tx.fee.token, &fee_payer); + + // In testnets, tx is allowed to skip fees if it + // includes a valid PoW + #[cfg(not(feature = "mainnet"))] + let has_valid_pow = self.has_valid_pow_solution(&tx); + #[cfg(feature = "mainnet")] + let has_valid_pow = false; + + if has_valid_pow || self.get_wrapper_tx_fees() <= balance { TxResult { - code: ErrorCodes::InvalidTx.into(), - info: format!( - "The ciphertext of the wrapped tx {} is \ - invalid", - hash_tx(tx_bytes) - ), + code: ErrorCodes::Ok.into(), + info: "Process proposal accepted this transaction" + .into(), } } else { - // If the public key corresponds to the MASP sentinel - // transaction key, then the fee payer is effectively - // the MASP, otherwise derive - // they payer from public key. - let fee_payer = if tx.pk != masp_tx_key().ref_to() { - tx.fee_payer() - } else { - masp() - }; - // check that the fee payer has sufficient balance - let balance = - self.get_balance(&tx.fee.token, &fee_payer); - - // In testnets, tx is allowed to skip fees if it - // includes a valid PoW - #[cfg(not(feature = "mainnet"))] - let has_valid_pow = self.has_valid_pow_solution(&tx); - #[cfg(feature = "mainnet")] - let has_valid_pow = false; - - if has_valid_pow - || self.get_wrapper_tx_fees() <= balance - { - TxResult { - code: ErrorCodes::Ok.into(), - info: "Process proposal accepted this \ - transaction" - .into(), - } - } else { - TxResult { - code: ErrorCodes::InvalidTx.into(), - info: "The address given does not have \ - sufficient balance to pay fee" - .into(), - } + TxResult { + code: ErrorCodes::InvalidTx.into(), + info: "The address given does not have sufficient \ + balance to pay fee" + .into(), } } } - }, + } } } @@ -204,6 +364,15 @@ where ) -> shim::response::RevertProposal { Default::default() } + + /// Checks if it is not possible to include encrypted txs at the current + /// block height. + fn encrypted_txs_not_allowed(&self) -> bool { + let pos_queries = self.wl_storage.pos_queries(); + let is_2nd_height_off = pos_queries.is_deciding_offset_within_epoch(1); + let is_3rd_height_off = pos_queries.is_deciding_offset_within_epoch(2); + is_2nd_height_off || is_3rd_height_off + } } /// We test the failure cases of [`process_proposal`]. The happy flows @@ -211,26 +380,25 @@ where #[cfg(test)] mod test_process_proposal { use borsh::BorshDeserialize; + use namada::ledger::parameters::storage::get_wrapper_tx_fees_key; use namada::proto::SignedTxData; use namada::types::hash::Hash; use namada::types::key::*; use namada::types::storage::Epoch; use namada::types::token::Amount; use namada::types::transaction::encrypted::EncryptedTx; - use namada::types::transaction::{EncryptionKey, Fee, WrapperTx}; + use namada::types::transaction::{EncryptionKey, Fee, WrapperTx, MIN_FEE}; use super::*; - use crate::facade::tendermint_proto::abci::RequestInitChain; - use crate::facade::tendermint_proto::google::protobuf::Timestamp; use crate::node::ledger::shell::test_utils::{ - gen_keypair, ProcessProposal, TestError, TestShell, + self, gen_keypair, ProcessProposal, TestError, }; /// Test that if a wrapper tx is not signed, it is rejected /// by [`process_proposal`]. #[test] fn test_unsigned_wrapper_rejected() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(); let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), @@ -278,7 +446,7 @@ mod test_process_proposal { /// Test that a wrapper tx with invalid signature is rejected #[test] fn test_wrapper_bad_signature_rejected() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(); let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), @@ -363,8 +531,16 @@ mod test_process_proposal { /// non-zero, [`process_proposal`] rejects that tx #[test] fn test_wrapper_unknown_address() { - let (mut shell, _) = TestShell::new(); - let keypair = crate::wallet::defaults::keys().remove(0).1; + let (mut shell, _) = test_utils::setup(); + shell + .wl_storage + .storage + .write( + &get_wrapper_tx_fees_key(), + token::Amount::whole(MIN_FEE).try_to_vec().unwrap(), + ) + .unwrap(); + let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), @@ -384,17 +560,19 @@ mod test_process_proposal { ) .sign(&keypair) .expect("Test failed"); - let request = ProcessProposal { - txs: vec![wrapper.to_bytes()], - }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") + let response = { + let request = ProcessProposal { + txs: vec![wrapper.to_bytes()], + }; + if let [resp] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed") + } }; assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); assert_eq!( @@ -409,7 +587,7 @@ mod test_process_proposal { /// [`process_proposal`] rejects that tx #[test] fn test_wrapper_insufficient_balance_address() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(); let keypair = crate::wallet::defaults::daewon_keypair(); // reduce address balance to match the 100 token fee let balance_key = token::balance_key( @@ -421,6 +599,14 @@ mod test_process_proposal { .storage .write(&balance_key, Amount::whole(99).try_to_vec().unwrap()) .unwrap(); + shell + .wl_storage + .storage + .write( + &get_wrapper_tx_fees_key(), + token::Amount::whole(MIN_FEE).try_to_vec().unwrap(), + ) + .unwrap(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), @@ -468,7 +654,7 @@ mod test_process_proposal { /// validated, [`process_proposal`] rejects it #[test] fn test_decrypted_txs_out_of_order() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(); let keypair = gen_keypair(); let mut txs = vec![]; for i in 0..3 { @@ -496,38 +682,26 @@ mod test_process_proposal { has_valid_pow: false, }))); } - let req_1 = ProcessProposal { - txs: vec![txs[0].to_bytes()], - }; - let response_1 = if let [resp] = shell - .process_proposal(req_1) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response_1.result.code, u32::from(ErrorCodes::Ok)); - - let req_2 = ProcessProposal { - txs: vec![txs[2].to_bytes()], - }; - - let response_2 = if let Err(TestError::RejectProposal(resp)) = - shell.process_proposal(req_2) - { - if let [resp] = resp.as_slice() { - resp.clone() + let response = { + let request = ProcessProposal { + txs: vec![ + txs[0].to_bytes(), + txs[2].to_bytes(), + txs[1].to_bytes(), + ], + }; + if let Err(TestError::RejectProposal(mut resp)) = + shell.process_proposal(request) + { + assert_eq!(resp.len(), 3); + resp.remove(1) } else { panic!("Test failed") } - } else { - panic!("Test failed") }; - assert_eq!(response_2.result.code, u32::from(ErrorCodes::InvalidOrder)); + assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidOrder)); assert_eq!( - response_2.result.info, + response.result.info, String::from( "Process proposal rejected a decrypted transaction that \ violated the tx order determined in the previous block" @@ -539,7 +713,7 @@ mod test_process_proposal { /// is rejected by [`process_proposal`] #[test] fn test_incorrectly_labelled_as_undecryptable() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(); let keypair = gen_keypair(); let tx = Tx::new( @@ -592,15 +766,7 @@ mod test_process_proposal { /// undecryptable but still accepted #[test] fn test_invalid_hash_commitment() { - let (mut shell, _) = TestShell::new(); - shell.init_chain(RequestInitChain { - time: Some(Timestamp { - seconds: 0, - nanos: 0, - }), - chain_id: ChainId::default().to_string(), - ..Default::default() - }); + let (mut shell, _) = test_utils::setup(); let keypair = crate::wallet::defaults::daewon_keypair(); let tx = Tx::new( @@ -648,15 +814,7 @@ mod test_process_proposal { /// marked undecryptable and the errors handled correctly #[test] fn test_undecryptable() { - let (mut shell, _) = TestShell::new(); - shell.init_chain(RequestInitChain { - time: Some(Timestamp { - seconds: 0, - nanos: 0, - }), - chain_id: ChainId::default().to_string(), - ..Default::default() - }); + let (mut shell, _) = test_utils::setup(); let keypair = crate::wallet::defaults::daewon_keypair(); let pubkey = EncryptionKey::default(); // not valid tx bytes @@ -700,7 +858,7 @@ mod test_process_proposal { /// [`process_proposal`] than expected, they are rejected #[test] fn test_too_many_decrypted_txs() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), @@ -737,7 +895,7 @@ mod test_process_proposal { /// Process Proposal should reject a RawTx, but not panic #[test] fn test_raw_tx_rejected() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/apps/src/lib/node/ledger/shims/abcipp_shim.rs index 74a56a4ddc..970607f655 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -103,7 +103,7 @@ impl AbcippShim { #[cfg(feature = "abcipp")] Req::FinalizeBlock(block) => { let unprocessed_txs = block.txs.clone(); - let processing_results = + let (processing_results, _) = self.service.process_txs(&block.txs); let mut txs = Vec::with_capacity(unprocessed_txs.len()); for (result, tx) in processing_results @@ -137,7 +137,7 @@ impl AbcippShim { } #[cfg(not(feature = "abcipp"))] Req::EndBlock(_) => { - let processing_results = + let (processing_results, _) = self.service.process_txs(&self.delivered_txs); let mut txs = Vec::with_capacity(self.delivered_txs.len()); let mut delivered = vec![]; diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs b/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs index cb3145f0e2..d4f4e07d02 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs @@ -129,7 +129,7 @@ pub mod shim { InitChain(ResponseInitChain), Info(ResponseInfo), Query(ResponseQuery), - PrepareProposal(ResponsePrepareProposal), + PrepareProposal(response::PrepareProposal), VerifyHeader(response::VerifyHeader), ProcessProposal(response::ProcessProposal), RevertProposal(response::RevertProposal), @@ -176,7 +176,7 @@ pub mod shim { Ok(Resp::ApplySnapshotChunk(inner)) } Response::PrepareProposal(inner) => { - Ok(Resp::PrepareProposal(inner)) + Ok(Resp::PrepareProposal(inner.into())) } #[cfg(feature = "abcipp")] Response::ExtendVote(inner) => Ok(Resp::ExtendVote(inner)), @@ -282,6 +282,26 @@ pub mod shim { types::ConsensusParams, }; + #[derive(Debug, Default)] + pub struct PrepareProposal { + pub txs: Vec, + } + + #[cfg(feature = "abcipp")] + impl From for super::ResponsePrepareProposal { + fn from(_: PrepareProposal) -> Self { + // TODO(namada#198): When abci++ arrives, we should return a + // real response. + Self::default() + } + } + + #[cfg(not(feature = "abcipp"))] + impl From for super::ResponsePrepareProposal { + fn from(resp: PrepareProposal) -> Self { + Self { txs: resp.txs } + } + } #[derive(Debug, Default)] pub struct VerifyHeader; diff --git a/core/src/hints.rs b/core/src/hints.rs new file mode 100644 index 0000000000..78d49eeab5 --- /dev/null +++ b/core/src/hints.rs @@ -0,0 +1,44 @@ +//! Compiler hints, to improve the performance of certain operations. + +/// A function that is seldom called. +#[inline] +#[cold] +pub fn cold() {} + +/// A likely path to be taken in an if-expression. +/// +/// # Example +/// +/// ```ignore +/// if likely(frequent_condition()) { +/// // most common path to take +/// } else { +/// // ... +/// } +/// ``` +#[inline] +pub fn likely(b: bool) -> bool { + if !b { + cold() + } + b +} + +/// An unlikely path to be taken in an if-expression. +/// +/// # Example +/// +/// ```ignore +/// if unlikely(rare_condition()) { +/// // ... +/// } else { +/// // most common path to take +/// } +/// ``` +#[inline] +pub fn unlikely(b: bool) -> bool { + if b { + cold() + } + b +} diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index e2ac4da235..d81331af01 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -12,6 +12,7 @@ pub mod write_log; use core::fmt::Debug; +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] use merkle_tree::StorageBytes; pub use merkle_tree::{ MembershipProof, MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, diff --git a/core/src/lib.rs b/core/src/lib.rs index c9bd40084e..44ca420409 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -7,6 +7,7 @@ #![deny(rustdoc::private_intra_doc_links)] pub mod bytes; +pub mod hints; pub mod ledger; pub mod proto; pub mod types; diff --git a/core/src/types/storage.rs b/core/src/types/storage.rs index 5f46105480..255bd69e7f 100644 --- a/core/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -1106,6 +1106,13 @@ impl Epochs { } None } + + /// Return all starting block heights for each successive Epoch. + /// + /// __INVARIANT:__ The returned values are sorted in ascending order. + pub fn first_block_heights(&self) -> &[BlockHeight] { + &self.first_block_heights + } } /// A value of a storage prefix iterator. diff --git a/documentation/specs/src/base-ledger/images/block-space-allocator-bins.svg b/documentation/specs/src/base-ledger/images/block-space-allocator-bins.svg new file mode 100644 index 0000000000..f9d7209da1 --- /dev/null +++ b/documentation/specs/src/base-ledger/images/block-space-allocator-bins.svg @@ -0,0 +1,4 @@ + + + +
DECRYPTED
DECRYPT...
E
E
E
E
E
E
E
E
E
E
E
E
E
E
bin.try_dump(tx)
bin.try_dump(tx)
PROTOCOL
PROTOC...
ENCRYPTED
ENCRYPT...
Set M of mempool transactions
Set P of proposed transactions
BlockSpaceAllocator
BlockSpaceAllocator
Viewer does not support full SVG 1.1
diff --git a/documentation/specs/src/base-ledger/images/block-space-allocator-example.svg b/documentation/specs/src/base-ledger/images/block-space-allocator-example.svg new file mode 100644 index 0000000000..b19ad90ce7 --- /dev/null +++ b/documentation/specs/src/base-ledger/images/block-space-allocator-example.svg @@ -0,0 +1,4 @@ + + + +
Height
Height
H
H
D
D
P
P
E
E
H+1
H+1
D
D
P
P
H+2
H+2
P
P
H+3
H+3
P
P
E
E
P
P
H+4
H+4
E
E
P
P
D
D
Block space
Block space
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 869708655a..0511ce1350 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -15,6 +15,7 @@ pub mod btree_set; pub mod epoched; pub mod parameters; +pub mod pos_queries; pub mod storage; pub mod types; // pub mod validation; diff --git a/proof_of_stake/src/pos_queries.rs b/proof_of_stake/src/pos_queries.rs new file mode 100644 index 0000000000..bceabd3157 --- /dev/null +++ b/proof_of_stake/src/pos_queries.rs @@ -0,0 +1,314 @@ +//! Storage API for querying data about Proof-of-stake related +//! data. This includes validator and epoch related data. +use borsh::BorshDeserialize; +use namada_core::ledger::parameters::storage::get_max_proposal_bytes_key; +use namada_core::ledger::parameters::EpochDuration; +use namada_core::ledger::storage::WlStorage; +use namada_core::ledger::storage_api::collections::lazy_map::NestedSubKey; +use namada_core::ledger::{storage, storage_api}; +use namada_core::tendermint_proto::google::protobuf; +use namada_core::tendermint_proto::types::EvidenceParams; +use namada_core::types::address::Address; +use namada_core::types::chain::ProposalBytes; +use namada_core::types::storage::{BlockHeight, Epoch}; +use namada_core::types::{key, token}; +use thiserror::Error; + +use crate::types::{ConsensusValidatorSet, WeightedValidator}; +use crate::{consensus_validator_set_handle, PosParams}; + +/// Errors returned by [`PosQueries`] operations. +#[derive(Error, Debug)] +pub enum Error { + /// The given address is not among the set of consensus validators for + /// the corresponding epoch. + #[error( + "The address '{0:?}' is not among the consensus validator set for \ + epoch {1}" + )] + NotValidatorAddress(Address, Epoch), + /// The given public key does not correspond to any consensus validator's + /// key at the provided epoch. + #[error( + "The public key '{0}' is not among the consensus validator set for \ + epoch {1}" + )] + NotValidatorKey(String, Epoch), + /// The given public key hash does not correspond to any consensus + /// validator's key at the provided epoch. + #[error( + "The public key hash '{0}' is not among the consensus validator set \ + for epoch {1}" + )] + NotValidatorKeyHash(String, Epoch), + /// An invalid Tendermint validator address was detected. + #[error("Invalid validator tendermint address")] + InvalidTMAddress, +} + +/// Result type returned by [`PosQueries`] operations. +pub type Result = ::std::result::Result; + +/// Methods used to query blockchain proof-of-stake related state, +/// such as the current set of consensus validators. +pub trait PosQueries { + /// The underlying storage type. + type Storage; + + /// Return a handle to [`PosQueries`]. + fn pos_queries(&self) -> PosQueriesHook<'_, Self::Storage>; +} + +impl PosQueries for WlStorage +where + D: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, +{ + type Storage = Self; + + #[inline] + fn pos_queries(&self) -> PosQueriesHook<'_, Self> { + PosQueriesHook { wl_storage: self } + } +} + +/// A handle to [`PosQueries`]. +/// +/// This type is a wrapper around a pointer to a +/// [`WlStorage`]. +#[derive(Debug)] +#[repr(transparent)] +pub struct PosQueriesHook<'db, DB> { + wl_storage: &'db DB, +} + +impl<'db, DB> Clone for PosQueriesHook<'db, DB> { + fn clone(&self) -> Self { + Self { + wl_storage: self.wl_storage, + } + } +} + +impl<'db, DB> Copy for PosQueriesHook<'db, DB> {} + +impl<'db, D, H> PosQueriesHook<'db, WlStorage> +where + D: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, +{ + /// Return a handle to the inner [`WlStorage`]. + #[inline] + pub fn storage(self) -> &'db WlStorage { + self.wl_storage + } + + /// Get the set of consensus validators for a given epoch (defaulting to the + /// epoch of the current yet-to-be-committed block). + #[inline] + pub fn get_consensus_validators( + self, + epoch: Option, + ) -> ConsensusValidators<'db, D, H> { + let epoch = epoch + .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); + ConsensusValidators { + wl_storage: self.wl_storage, + validator_set: consensus_validator_set_handle().at(&epoch), + } + } + + /// Lookup the total voting power for an epoch (defaulting to the + /// epoch of the current yet-to-be-committed block). + pub fn get_total_voting_power(self, epoch: Option) -> token::Amount { + self.get_consensus_validators(epoch) + .iter() + .map(|validator| u64::from(validator.bonded_stake)) + .sum::() + .into() + } + + /// Simple helper function for the ledger to get balances + /// of the specified token at the specified address. + pub fn get_balance( + self, + token: &Address, + owner: &Address, + ) -> token::Amount { + storage_api::token::read_balance(self.wl_storage, token, owner) + .expect("Storage read in the protocol must not fail") + } + + /// Return evidence parameters. + // TODO: impove this docstring + pub fn get_evidence_params( + self, + epoch_duration: &EpochDuration, + pos_params: &PosParams, + ) -> EvidenceParams { + // Minimum number of epochs before tokens are unbonded and can be + // withdrawn + let len_before_unbonded = + std::cmp::max(pos_params.unbonding_len as i64 - 1, 0); + let max_age_num_blocks: i64 = + epoch_duration.min_num_of_blocks as i64 * len_before_unbonded; + let min_duration_secs = epoch_duration.min_duration.0 as i64; + let max_age_duration = Some(protobuf::Duration { + seconds: min_duration_secs * len_before_unbonded, + nanos: 0, + }); + EvidenceParams { + max_age_num_blocks, + max_age_duration, + ..EvidenceParams::default() + } + } + + /// Lookup data about a validator from their address. + pub fn get_validator_from_address( + self, + address: &Address, + epoch: Option, + ) -> Result<(token::Amount, key::common::PublicKey)> { + let epoch = epoch + .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); + self.get_consensus_validators(Some(epoch)) + .iter() + .find(|validator| address == &validator.address) + .map(|validator| { + let protocol_pk_key = key::protocol_pk_key(&validator.address); + // TODO: rewrite this, to use `StorageRead::read` + let bytes = self + .wl_storage + .storage + .read(&protocol_pk_key) + .expect("Validator should have public protocol key") + .0 + .expect("Validator should have public protocol key"); + let protocol_pk: key::common::PublicKey = + BorshDeserialize::deserialize(&mut bytes.as_ref()).expect( + "Protocol public key in storage should be \ + deserializable", + ); + (validator.bonded_stake, protocol_pk) + }) + .ok_or_else(|| Error::NotValidatorAddress(address.clone(), epoch)) + } + + /// Given a tendermint validator, the address is the hash + /// of the validators public key. We look up the native + /// address from storage using this hash. + // TODO: We may change how this lookup is done, see + // https://github.com/anoma/namada/issues/200 + pub fn get_validator_from_tm_address( + self, + _tm_address: &[u8], + _epoch: Option, + ) -> Result
{ + // let epoch = epoch.unwrap_or_else(|| self.get_current_epoch().0); + // let validator_raw_hash = core::str::from_utf8(tm_address) + // .map_err(|_| Error::InvalidTMAddress)?; + // self.read_validator_address_raw_hash(validator_raw_hash) + // .ok_or_else(|| { + // Error::NotValidatorKeyHash( + // validator_raw_hash.to_string(), + // epoch, + // ) + // }) + todo!() + } + + /// Check if we are at a given [`BlockHeight`] offset, `height_offset`, + /// within the current [`Epoch`]. + pub fn is_deciding_offset_within_epoch(self, height_offset: u64) -> bool { + let current_decision_height = self.get_current_decision_height(); + + // NOTE: the first stored height in `fst_block_heights_of_each_epoch` + // is 0, because of a bug (should be 1), so this code needs to + // handle that case + // + // we can remove this check once that's fixed + if self.wl_storage.storage.get_current_epoch().0 == Epoch(0) { + let height_offset_within_epoch = BlockHeight(1 + height_offset); + return current_decision_height == height_offset_within_epoch; + } + + let fst_heights_of_each_epoch = self + .wl_storage + .storage + .block + .pred_epochs + .first_block_heights(); + + fst_heights_of_each_epoch + .last() + .map(|&h| { + let height_offset_within_epoch = h + height_offset; + current_decision_height == height_offset_within_epoch + }) + .unwrap_or(false) + } + + #[inline] + /// Given some [`BlockHeight`], return the corresponding [`Epoch`]. + pub fn get_epoch(self, height: BlockHeight) -> Option { + self.wl_storage.storage.block.pred_epochs.get_epoch(height) + } + + #[inline] + /// Retrieves the [`BlockHeight`] that is currently being decided. + pub fn get_current_decision_height(self) -> BlockHeight { + self.wl_storage.storage.last_height + 1 + } + + /// Retrieve the `max_proposal_bytes` consensus parameter from storage. + pub fn get_max_proposal_bytes(self) -> ProposalBytes { + storage_api::StorageRead::read( + self.wl_storage, + &get_max_proposal_bytes_key(), + ) + .expect("Must be able to read ProposalBytes from storage") + .expect("ProposalBytes must be present in storage") + } +} + +/// A handle to the set of consensus validators in Namada, +/// at some given epoch. +pub struct ConsensusValidators<'db, D, H> +where + D: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, +{ + wl_storage: &'db WlStorage, + validator_set: ConsensusValidatorSet, +} + +impl<'db, D, H> ConsensusValidators<'db, D, H> +where + D: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, +{ + /// Iterate over the set of consensus validators in Namada, at some given + /// epoch. + pub fn iter<'this: 'db>( + &'this self, + ) -> impl Iterator + 'db { + self.validator_set + .iter(self.wl_storage) + .expect("Must be able to iterate over consensus validators") + .map(|res| { + let ( + NestedSubKey::Data { + key: bonded_stake, .. + }, + address, + ) = res.expect( + "We should be able to decode validators in storage", + ); + WeightedValidator { + address, + bonded_stake, + } + }) + } +} diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 9437103546..d739a627a5 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -1787,7 +1787,7 @@ fn pos_bonds() -> Result<()> { let test = setup::network( |genesis| { let parameters = ParametersConfig { - min_num_of_blocks: 2, + min_num_of_blocks: 6, max_expected_time_per_block: 1, epochs_per_year: 31_536_000, ..genesis.parameters @@ -1920,7 +1920,7 @@ fn pos_bonds() -> Result<()> { epoch, delegation_withdrawable_epoch ); let start = Instant::now(); - let loop_timeout = Duration::new(20, 0); + let loop_timeout = Duration::new(60, 0); loop { if Instant::now().duration_since(start) > loop_timeout { panic!( @@ -1991,7 +1991,7 @@ fn pos_init_validator() -> Result<()> { let test = setup::network( |genesis| { let parameters = ParametersConfig { - min_num_of_blocks: 2, + min_num_of_blocks: 4, epochs_per_year: 31_536_000, max_expected_time_per_block: 1, ..genesis.parameters @@ -2263,7 +2263,7 @@ fn proposal_submission() -> Result<()> { let parameters = ParametersConfig { epochs_per_year: epochs_per_year_from_min_duration(1), max_proposal_bytes: Default::default(), - min_num_of_blocks: 1, + min_num_of_blocks: 4, max_expected_time_per_block: 1, vp_whitelist: Some(get_all_wasms_hashes( &working_dir,