diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 8c999bb76fc14..660ba7ab86229 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1832,8 +1832,12 @@ impl_runtime_apis! { fn generate_proof(leaf_index: pallet_mmr::primitives::LeafIndex) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { - Mmr::generate_proof(leaf_index) - .map(|(leaf, proof)| (mmr::EncodableOpaqueLeaf::from_leaf(&leaf), proof)) + Mmr::generate_batch_proof(vec![leaf_index]).and_then(|(leaves, proof)| + Ok(( + mmr::EncodableOpaqueLeaf::from_leaf(&leaves[0]), + mmr::BatchProof::into_single_leaf_proof(proof)? + )) + ) } fn verify_proof(leaf: mmr::EncodableOpaqueLeaf, proof: mmr::Proof) @@ -1843,7 +1847,7 @@ impl_runtime_apis! { .into_opaque_leaf() .try_decode() .ok_or(mmr::Error::Verify)?; - Mmr::verify_leaf(leaf, proof) + Mmr::verify_leaves(vec![leaf], mmr::Proof::into_batch_proof(proof)) } fn verify_proof_stateless( @@ -1852,12 +1856,38 @@ impl_runtime_apis! { proof: mmr::Proof ) -> Result<(), mmr::Error> { let node = mmr::DataOrHash::Data(leaf.into_opaque_leaf()); - pallet_mmr::verify_leaf_proof::(root, node, proof) + pallet_mmr::verify_leaves_proof::(root, vec![node], mmr::Proof::into_batch_proof(proof)) } fn mmr_root() -> Result { Ok(Mmr::mmr_root()) } + + fn generate_batch_proof(leaf_indices: Vec) + -> Result<(Vec, mmr::BatchProof), mmr::Error> + { + Mmr::generate_batch_proof(leaf_indices) + .map(|(leaves, proof)| (leaves.into_iter().map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)).collect(), proof)) + } + + fn verify_batch_proof(leaves: Vec, proof: mmr::BatchProof) + -> Result<(), mmr::Error> + { + let leaves = leaves.into_iter().map(|leaf| + leaf.into_opaque_leaf() + .try_decode() + .ok_or(mmr::Error::Verify)).collect::, mmr::Error>>()?; + Mmr::verify_leaves(leaves, proof) + } + + fn verify_batch_proof_stateless( + root: mmr::Hash, + leaves: Vec, + proof: mmr::BatchProof + ) -> Result<(), mmr::Error> { + let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); + pallet_mmr::verify_leaves_proof::(root, nodes, proof) + } } impl sp_session::SessionKeys for Runtime { diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index a5a15bac5f8f9..fecb9557df6ea 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -73,7 +73,6 @@ pub(crate) struct Rounds { best_done: Option>, session_start: NumberFor, validator_set: ValidatorSet, - prev_validator_set: ValidatorSet, } impl Rounds @@ -81,18 +80,8 @@ where P: Ord + Hash + Clone, B: Block, { - pub(crate) fn new( - session_start: NumberFor, - validator_set: ValidatorSet, - prev_validator_set: ValidatorSet, - ) -> Self { - Rounds { - rounds: BTreeMap::new(), - best_done: None, - session_start, - validator_set, - prev_validator_set, - } + pub(crate) fn new(session_start: NumberFor, validator_set: ValidatorSet) -> Self { + Rounds { rounds: BTreeMap::new(), best_done: None, session_start, validator_set } } } @@ -101,24 +90,12 @@ where P: Ord + Hash + Clone, B: Block, { - pub(crate) fn validator_set_id_for(&self, block_number: NumberFor) -> ValidatorSetId { - if block_number > self.session_start { - self.validator_set.id() - } else { - self.prev_validator_set.id() - } - } - - pub(crate) fn validators_for(&self, block_number: NumberFor) -> &[Public] { - if block_number > self.session_start { - self.validator_set.validators() - } else { - self.prev_validator_set.validators() - } + pub(crate) fn validator_set_id(&self) -> ValidatorSetId { + self.validator_set.id() } - pub(crate) fn validator_set(&self) -> &ValidatorSet { - &self.validator_set + pub(crate) fn validators(&self) -> &[Public] { + self.validator_set.validators() } pub(crate) fn session_start(&self) -> &NumberFor { @@ -143,7 +120,7 @@ where round.1 ); false - } else if !self.validator_set.validators().iter().any(|id| vote.0 == *id) { + } else if !self.validators().iter().any(|id| vote.0 == *id) { debug!( target: "beefy", "🥩 received vote {:?} from validator that is not in the validator set, ignoring", @@ -170,12 +147,11 @@ where // remove this and older (now stale) rounds let signatures = self.rounds.remove(round)?.votes; self.rounds.retain(|&(_, number), _| number > round.1); - self.best_done = self.best_done.clone().max(Some(round.1.clone())); + self.best_done = self.best_done.max(Some(round.1)); debug!(target: "beefy", "🥩 Concluded round #{}", round.1); Some( - self.validator_set - .validators() + self.validators() .iter() .map(|authority_id| signatures.get(authority_id).cloned()) .collect(), @@ -247,13 +223,13 @@ mod tests { .unwrap(); let session_start = 1u64.into(); - let rounds = Rounds::::new(session_start, validators.clone(), validators); + let rounds = Rounds::::new(session_start, validators); - assert_eq!(42, rounds.validator_set_id_for(session_start)); + assert_eq!(42, rounds.validator_set_id()); assert_eq!(1, *rounds.session_start()); assert_eq!( &vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], - rounds.validators_for(session_start) + rounds.validators() ); } @@ -274,7 +250,7 @@ mod tests { let round = (H256::from_low_u64_le(1), 1); let session_start = 1u64.into(); - let mut rounds = Rounds::::new(session_start, validators.clone(), validators); + let mut rounds = Rounds::::new(session_start, validators); // no self vote yet, should self vote assert!(rounds.should_self_vote(&round)); @@ -347,7 +323,7 @@ mod tests { .unwrap(); let session_start = 1u64.into(); - let mut rounds = Rounds::::new(session_start, validators.clone(), validators); + let mut rounds = Rounds::::new(session_start, validators); // round 1 assert!(rounds.add_vote( diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index e568daba8e112..1d035a6a447c2 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -39,7 +39,9 @@ use beefy_primitives::{ crypto::AuthorityId, BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, }; -use sp_mmr_primitives::{EncodableOpaqueLeaf, Error as MmrError, LeafIndex, MmrApi, Proof}; +use sp_mmr_primitives::{ + BatchProof, EncodableOpaqueLeaf, Error as MmrError, LeafIndex, MmrApi, Proof, +}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_consensus::BlockOrigin; @@ -259,6 +261,22 @@ macro_rules! create_test_api { fn mmr_root() -> Result { Ok($mmr_root) } + + fn generate_batch_proof(_leaf_indices: Vec) -> Result<(Vec, BatchProof), MmrError> { + unimplemented!() + } + + fn verify_batch_proof(_leaves: Vec, _proof: BatchProof) -> Result<(), MmrError> { + unimplemented!() + } + + fn verify_batch_proof_stateless( + _root: MmrRootHash, + _leaves: Vec, + _proof: BatchProof + ) -> Result<(), MmrError> { + unimplemented!() + } } } } @@ -469,8 +487,8 @@ fn finalize_block_and_wait_for_beefy( } if expected_beefy.is_empty() { - // run for 1 second then verify no new best beefy block available - let timeout = Some(Duration::from_millis(500)); + // run for quarter second then verify no new best beefy block available + let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, runtime, timeout); streams_empty_after_timeout(signed_commitments, &net, runtime, None); } else { @@ -535,8 +553,8 @@ fn lagging_validators() { let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); - // push 42 blocks including `AuthorityChange` digests every 30 blocks. - net.generate_blocks(42, session_len, &validator_set, true); + // push 62 blocks including `AuthorityChange` digests every 30 blocks. + net.generate_blocks(62, session_len, &validator_set, true); net.block_until_sync(); let net = Arc::new(Mutex::new(net)); @@ -550,7 +568,7 @@ fn lagging_validators() { let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY - let timeout = Some(Duration::from_millis(500)); + let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); @@ -563,6 +581,26 @@ fn lagging_validators() { // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[30, 32], &[30, 31, 32]); + + // Verify that session-boundary votes get buffered by client and only processed once + // session-boundary block is GRANDPA-finalized (this guarantees authenticity for the new session + // validator set). + + // Alice finalizes session-boundary mandatory block #60, Bob lags behind + let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let finalize = BlockId::number(60); + net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); + // verify nothing gets finalized by BEEFY + let timeout = Some(Duration::from_millis(250)); + streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); + streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); + + // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) + let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); + // verify beefy skips intermediary votes, and successfully finalizes mandatory block #40 + wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); + wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[60]); } #[test] @@ -624,7 +662,7 @@ fn correct_beefy_payload() { .unwrap(); // verify consensus is _not_ reached - let timeout = Some(Duration::from_millis(500)); + let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 8ab18c58f9dd3..ae466a71abb57 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -16,7 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::BTreeSet, fmt::Debug, marker::PhantomData, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt::Debug, + marker::PhantomData, + sync::Arc, + time::Duration, +}; use codec::{Codec, Decode, Encode}; use futures::{future, FutureExt, StreamExt}; @@ -27,7 +33,7 @@ use sc_client_api::{Backend, FinalityNotification, FinalityNotifications}; use sc_network_gossip::GossipEngine; use sp_api::{BlockId, ProvideRuntimeApi}; -use sp_arithmetic::traits::AtLeast32Bit; +use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; use sp_consensus::SyncOracle; use sp_mmr_primitives::MmrApi; use sp_runtime::{ @@ -80,6 +86,8 @@ pub(crate) struct BeefyWorker { min_block_delta: u32, metrics: Option, rounds: Option>, + /// Buffer holding votes for blocks that the client hasn't seen finality for. + pending_votes: BTreeMap, Vec, AuthorityId, Signature>>>, finality_notifications: FinalityNotifications, /// Best block we received a GRANDPA notification for best_grandpa_block_header: ::Header, @@ -141,6 +149,7 @@ where min_block_delta: min_block_delta.max(1), metrics, rounds: None, + pending_votes: BTreeMap::new(), finality_notifications: client.finality_notification_stream(), best_grandpa_block_header: last_finalized_header, best_beefy_block: None, @@ -238,7 +247,11 @@ where } /// Handle session changes by starting new voting round for mandatory blocks. - fn init_session_at(&mut self, active: ValidatorSet, session_start: NumberFor) { + fn init_session_at( + &mut self, + active: ValidatorSet, + new_session_start: NumberFor, + ) { debug!(target: "beefy", "🥩 New active validator set: {:?}", active); metric_set!(self, beefy_validator_set_id, active.id()); // BEEFY should produce a signed commitment for each session @@ -246,23 +259,22 @@ where active.id() != GENESIS_AUTHORITY_SET_ID && self.last_signed_id != 0 { + debug!( + target: "beefy", "🥩 Detected skipped session: active-id {:?}, last-signed-id {:?}", + active.id(), + self.last_signed_id, + ); metric_inc!(self, beefy_skipped_sessions); } if log_enabled!(target: "beefy", log::Level::Debug) { // verify the new validator set - only do it if we're also logging the warning - let _ = self.verify_validator_set(&session_start, &active); + let _ = self.verify_validator_set(&new_session_start, &active); } - let prev_validator_set = if let Some(r) = &self.rounds { - r.validator_set().clone() - } else { - // no previous rounds present use new validator set instead (genesis case) - active.clone() - }; let id = active.id(); - self.rounds = Some(Rounds::new(session_start, active, prev_validator_set)); - info!(target: "beefy", "🥩 New Rounds for validator set id: {:?} with session_start {:?}", id, session_start); + self.rounds = Some(Rounds::new(new_session_start, active)); + info!(target: "beefy", "🥩 New Rounds for validator set id: {:?} with session_start {:?}", id, new_session_start); } fn handle_finality_notification(&mut self, notification: &FinalityNotification) { @@ -287,12 +299,36 @@ where self.init_session_at(new_validator_set, *header.number()); } + // Handle any pending votes for now finalized blocks. + self.check_pending_votes(); + // Vote if there's now a new vote target. if let Some(target_number) = self.current_vote_target() { self.do_vote(target_number); } } + // Handles all buffered votes for now finalized blocks. + fn check_pending_votes(&mut self) { + let not_finalized = self.best_grandpa_block_header.number().saturating_add(1u32.into()); + let still_pending = self.pending_votes.split_off(¬_finalized); + let votes_to_handle = std::mem::replace(&mut self.pending_votes, still_pending); + for (num, votes) in votes_to_handle.into_iter() { + if Some(num) > self.best_beefy_block { + debug!(target: "beefy", "🥩 Handling buffered votes for now GRANDPA finalized block: {:?}.", num); + for v in votes.into_iter() { + self.handle_vote( + (v.commitment.payload, v.commitment.block_number), + (v.id, v.signature), + false, + ); + } + } else { + debug!(target: "beefy", "🥩 Dropping outdated buffered votes for now BEEFY finalized block: {:?}.", num); + } + } + } + fn handle_vote( &mut self, round: (Payload, NumberFor), @@ -313,7 +349,7 @@ where self.gossip_validator.conclude_round(round.1); // id is stored for skipped session metric calculation - self.last_signed_id = rounds.validator_set_id_for(round.1); + self.last_signed_id = rounds.validator_set_id(); let block_num = round.1; let commitment = Commitment { @@ -390,7 +426,7 @@ where debug!(target: "beefy", "🥩 Don't double vote for block number: {:?}", target_number); return } - (rounds.validators_for(target_number), rounds.validator_set_id_for(target_number)) + (rounds.validators(), rounds.validator_set_id()) } else { debug!(target: "beefy", "🥩 Missing validator set - can't vote for: {:?}", target_hash); return @@ -506,11 +542,23 @@ where }, vote = votes.next().fuse() => { if let Some(vote) = vote { - self.handle_vote( - (vote.commitment.payload, vote.commitment.block_number), - (vote.id, vote.signature), - false - ); + let block_num = vote.commitment.block_number; + if block_num > *self.best_grandpa_block_header.number() { + // Only handle votes for blocks we _know_ have been finalized. + // Buffer vote to be handled later. + debug!( + target: "beefy", + "🥩 Buffering vote for not (yet) finalized block: {:?}.", + block_num + ); + self.pending_votes.entry(block_num).or_default().push(vote); + } else { + self.handle_vote( + (vote.commitment.payload, vote.commitment.block_number), + (vote.id, vote.signature), + false + ); + } } else { return; } @@ -854,8 +902,7 @@ pub(crate) mod tests { worker.best_grandpa_block_header = grandpa_header; worker.best_beefy_block = best_beefy; worker.min_block_delta = min_delta; - worker.rounds = - Some(Rounds::new(session_start, validator_set.clone(), validator_set.clone())); + worker.rounds = Some(Rounds::new(session_start, validator_set.clone())); }; // under min delta @@ -970,11 +1017,10 @@ pub(crate) mod tests { worker.init_session_at(validator_set.clone(), 1); let worker_rounds = worker.rounds.as_ref().unwrap(); - assert_eq!(worker_rounds.validator_set(), &validator_set); assert_eq!(worker_rounds.session_start(), &1); // in genesis case both current and prev validator sets are the same - assert_eq!(worker_rounds.validator_set_id_for(1), validator_set.id()); - assert_eq!(worker_rounds.validator_set_id_for(2), validator_set.id()); + assert_eq!(worker_rounds.validators(), validator_set.validators()); + assert_eq!(worker_rounds.validator_set_id(), validator_set.id()); // new validator set let keys = &[Keyring::Bob]; @@ -984,11 +1030,8 @@ pub(crate) mod tests { worker.init_session_at(new_validator_set.clone(), 11); let worker_rounds = worker.rounds.as_ref().unwrap(); - assert_eq!(worker_rounds.validator_set(), &new_validator_set); assert_eq!(worker_rounds.session_start(), &11); - // mandatory block gets prev set, further blocks get new set - assert_eq!(worker_rounds.validator_set_id_for(11), validator_set.id()); - assert_eq!(worker_rounds.validator_set_id_for(12), new_validator_set.id()); - assert_eq!(worker_rounds.validator_set_id_for(13), new_validator_set.id()); + assert_eq!(worker_rounds.validators(), new_validator_set.validators()); + assert_eq!(worker_rounds.validator_set_id(), new_validator_set.id()); } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 5db8f102d037b..387a7b3fdde90 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -356,13 +356,15 @@ where let mut default_sets_reserved = HashSet::new(); for reserved in network_config.default_peers_set.reserved_nodes.iter() { default_sets_reserved.insert(reserved.peer_id); - known_addresses.push((reserved.peer_id, reserved.multiaddr.clone())); + + if !reserved.multiaddr.is_empty() { + known_addresses.push((reserved.peer_id, reserved.multiaddr.clone())); + } } let mut bootnodes = Vec::with_capacity(network_config.boot_nodes.len()); for bootnode in network_config.boot_nodes.iter() { bootnodes.push(bootnode.peer_id); - known_addresses.push((bootnode.peer_id, bootnode.multiaddr.clone())); } // Set number 0 is used for block announces. diff --git a/client/network/src/service.rs b/client/network/src/service.rs index d2600e3295bf0..edd30e9c9dee4 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -228,12 +228,10 @@ where )?; // List of multiaddresses that we know in the network. - let mut bootnodes = Vec::new(); let mut boot_node_ids = HashSet::new(); // Process the bootnodes. for bootnode in params.network_config.boot_nodes.iter() { - bootnodes.push(bootnode.peer_id); boot_node_ids.insert(bootnode.peer_id); known_addresses.push((bootnode.peer_id, bootnode.multiaddr.clone())); } @@ -241,12 +239,18 @@ where let boot_node_ids = Arc::new(boot_node_ids); // Check for duplicate bootnodes. - known_addresses.iter().try_for_each(|(peer_id, addr)| { - if let Some(other) = known_addresses.iter().find(|o| o.1 == *addr && o.0 != *peer_id) { + params.network_config.boot_nodes.iter().try_for_each(|bootnode| { + if let Some(other) = params + .network_config + .boot_nodes + .iter() + .filter(|o| o.multiaddr == bootnode.multiaddr) + .find(|o| o.peer_id != bootnode.peer_id) + { Err(Error::DuplicateBootnode { - address: addr.clone(), - first_id: *peer_id, - second_id: other.0, + address: bootnode.multiaddr.clone(), + first_id: bootnode.peer_id, + second_id: other.peer_id, }) } else { Ok(()) diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml index 94c895ea91517..359ee88a9c485 100644 --- a/frame/merkle-mountain-range/rpc/Cargo.toml +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -22,7 +22,7 @@ serde = { version = "1.0.136", features = ["derive"] } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-core = { version = "6.0.0", path = "../../../primitives/core" } -sp-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/merkle-mountain-range" } +sp-mmr-primitives = { version = "4.0.0-dev", path = "../../../primitives/merkle-mountain-range" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index 99359bfea8eb6..be1a74450d1f4 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -29,7 +29,7 @@ use serde::{Deserialize, Serialize}; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use sp_mmr_primitives::{Error as MmrError, LeafIndex, Proof}; +use sp_mmr_primitives::{BatchProof, Error as MmrError, LeafIndex, Proof}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; pub use sp_mmr_primitives::MmrApi as MmrRuntimeApi; @@ -57,6 +57,34 @@ impl LeafProof { } } +/// Retrieved MMR leaves and their proof. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct LeafBatchProof { + /// Block hash the proof was generated for. + pub block_hash: BlockHash, + /// SCALE-encoded vector of `LeafData`. + pub leaves: Bytes, + /// SCALE-encoded proof data. See [sp_mmr_primitives::BatchProof]. + pub proof: Bytes, +} + +impl LeafBatchProof { + /// Create new `LeafBatchProof` from a given vector of `Leaf` and a + /// [sp_mmr_primitives::BatchProof]. + pub fn new( + block_hash: BlockHash, + leaves: Vec, + proof: BatchProof, + ) -> Self + where + Leaf: Encode, + MmrHash: Encode, + { + Self { block_hash, leaves: Bytes(leaves.encode()), proof: Bytes(proof.encode()) } + } +} + /// MMR RPC methods. #[rpc] pub trait MmrApi { @@ -74,6 +102,23 @@ pub trait MmrApi { leaf_index: LeafIndex, at: Option, ) -> Result>; + + /// Generate MMR proof for the given leaf indices. + /// + /// This method calls into a runtime with MMR pallet included and attempts to generate + /// MMR proof for a set of leaves at the given `leaf_indices`. + /// Optionally, a block hash at which the runtime should be queried can be specified. + /// + /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of + /// the leaves). Both parameters are SCALE-encoded. + /// The order of entries in the `leaves` field of the returned struct + /// is the same as the order of the entries in `leaf_indices` supplied + #[rpc(name = "mmr_generateBatchProof")] + fn generate_batch_proof( + &self, + leaf_indices: Vec, + at: Option, + ) -> Result>; } /// An implementation of MMR specific RPC methods. @@ -117,6 +162,28 @@ where Ok(LeafProof::new(block_hash, leaf, proof)) } + + fn generate_batch_proof( + &self, + leaf_indices: Vec, + at: Option<::Hash>, + ) -> Result::Hash>> { + let api = self.client.runtime_api(); + let block_hash = at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash); + + let (leaves, proof) = api + .generate_batch_proof_with_context( + &BlockId::hash(block_hash), + sp_core::ExecutionContext::OffchainCall(None), + leaf_indices, + ) + .map_err(runtime_error_into_rpc_error)? + .map_err(mmr_error_into_rpc_error)?; + + Ok(LeafBatchProof::new(block_hash, leaves, proof)) + } } const RUNTIME_ERROR: i64 = 8000; @@ -179,6 +246,28 @@ mod tests { ); } + #[test] + fn should_serialize_leaf_batch_proof() { + // given + let leaf = vec![1_u8, 2, 3, 4]; + let proof = BatchProof { + leaf_indices: vec![1], + leaf_count: 9, + items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], + }; + + let leaf_proof = LeafBatchProof::new(H256::repeat_byte(0), vec![leaf], proof); + + // when + let actual = serde_json::to_string(&leaf_proof).unwrap(); + + // then + assert_eq!( + actual, + r#"{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","leaves":"0x041001020304","proof":"0x04010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202"}"# + ); + } + #[test] fn should_deserialize_leaf_proof() { // given @@ -205,4 +294,31 @@ mod tests { // then assert_eq!(actual, expected); } + + #[test] + fn should_deserialize_leaf_batch_proof() { + // given + let expected = LeafBatchProof { + block_hash: H256::repeat_byte(0), + leaves: Bytes(vec![vec![1_u8, 2, 3, 4]].encode()), + proof: Bytes( + BatchProof { + leaf_indices: vec![1], + leaf_count: 9, + items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], + } + .encode(), + ), + }; + + // when + let actual: LeafBatchProof = serde_json::from_str(r#"{ + "blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "leaves":"0x041001020304", + "proof":"0x04010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202" + }"#).unwrap(); + + // then + assert_eq!(actual, expected); + } } diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 855eb0a7436dc..d6cf3240692fc 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -71,6 +71,7 @@ mod tests; pub use pallet::*; pub use sp_mmr_primitives::{self as primitives, Error, LeafDataProvider, LeafIndex, NodeIndex}; +use sp_std::prelude::*; /// The most common use case for MMRs is to store historical block hashes, /// so that any point in time in the future we can receive a proof about some past @@ -228,22 +229,23 @@ type LeafOf = <>::LeafData as primitives::LeafDataProvider> /// Hashing used for the pallet. pub(crate) type HashingOf = >::Hashing; -/// Stateless MMR proof verification. +/// Stateless MMR proof verification for batch of leaves. /// -/// This function can be used to verify received MMR proof (`proof`) -/// for given leaf data (`leaf`) against a known MMR root hash (`root`). -/// -/// The verification does not require any storage access. -pub fn verify_leaf_proof( +/// This function can be used to verify received MMR [primitives::BatchProof] (`proof`) +/// for given leaves set (`leaves`) against a known MMR root hash (`root`). +/// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the +/// same position in both the `leaves` vector and the `leaf_indices` vector contained in the +/// [primitives::BatchProof]. +pub fn verify_leaves_proof( root: H::Output, - leaf: mmr::Node, - proof: primitives::Proof, + leaves: Vec>, + proof: primitives::BatchProof, ) -> Result<(), primitives::Error> where H: traits::Hash, L: primitives::FullLeaf, { - let is_valid = mmr::verify_leaf_proof::(root, leaf, proof)?; + let is_valid = mmr::verify_leaves_proof::(root, leaves, proof)?; if is_valid { Ok(()) } else { @@ -255,29 +257,36 @@ impl, I: 'static> Pallet { fn offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec { (T::INDEXING_PREFIX, pos).encode() } - - /// Generate a MMR proof for the given `leaf_index`. + /// Generate a MMR proof for the given `leaf_indices`. /// /// Note this method can only be used from an off-chain context /// (Offchain Worker or Runtime API call), since it requires /// all the leaves to be present. /// It may return an error or panic if used incorrectly. - pub fn generate_proof( - leaf_index: LeafIndex, - ) -> Result<(LeafOf, primitives::Proof<>::Hash>), primitives::Error> { + pub fn generate_batch_proof( + leaf_indices: Vec, + ) -> Result< + (Vec>, primitives::BatchProof<>::Hash>), + primitives::Error, + > { let mmr: ModuleMmr = mmr::Mmr::new(Self::mmr_leaves()); - mmr.generate_proof(leaf_index) + mmr.generate_batch_proof(leaf_indices) + } + + /// Return the on-chain MMR root hash. + pub fn mmr_root() -> >::Hash { + Self::mmr_root_hash() } - /// Verify MMR proof for given `leaf`. + /// Verify MMR proof for given `leaves`. /// /// This method is safe to use within the runtime code. /// It will return `Ok(())` if the proof is valid /// and an `Err(..)` if MMR is inconsistent (some leaves are missing) /// or the proof is invalid. - pub fn verify_leaf( - leaf: LeafOf, - proof: primitives::Proof<>::Hash>, + pub fn verify_leaves( + leaves: Vec>, + proof: primitives::BatchProof<>::Hash>, ) -> Result<(), primitives::Error> { if proof.leaf_count > Self::mmr_leaves() || proof.leaf_count == 0 || @@ -288,16 +297,11 @@ impl, I: 'static> Pallet { } let mmr: ModuleMmr = mmr::Mmr::new(proof.leaf_count); - let is_valid = mmr.verify_leaf_proof(leaf, proof)?; + let is_valid = mmr.verify_leaves_proof(leaves, proof)?; if is_valid { Ok(()) } else { Err(primitives::Error::Verify.log_debug("The proof is incorrect.")) } } - - /// Return the on-chain MMR root hash. - pub fn mmr_root() -> >::Hash { - Self::mmr_root_hash() - } } diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs index a1516ee8607f4..44e684c1bdcac 100644 --- a/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -24,27 +24,39 @@ use crate::{ primitives::{self, Error, NodeIndex}, Config, HashingOf, }; -#[cfg(not(feature = "std"))] -use sp_std::vec; +use sp_std::prelude::*; -/// Stateless verification of the leaf proof. -pub fn verify_leaf_proof( +/// Stateless verification of the proof for a batch of leaves. +/// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the +/// same position in both the `leaves` vector and the `leaf_indices` vector contained in the +/// [primitives::BatchProof] +pub fn verify_leaves_proof( root: H::Output, - leaf: Node, - proof: primitives::Proof, + leaves: Vec>, + proof: primitives::BatchProof, ) -> Result where H: sp_runtime::traits::Hash, L: primitives::FullLeaf, { let size = NodesUtils::new(proof.leaf_count).size(); - let leaf_position = mmr_lib::leaf_index_to_pos(proof.leaf_index); + + if leaves.len() != proof.leaf_indices.len() { + return Err(Error::Verify.log_debug("Proof leaf_indices not same length with leaves")) + } + + let leaves_and_position_data = proof + .leaf_indices + .into_iter() + .map(|index| mmr_lib::leaf_index_to_pos(index)) + .zip(leaves.into_iter()) + .collect(); let p = mmr_lib::MerkleProof::, Hasher>::new( size, proof.items.into_iter().map(Node::Hash).collect(), ); - p.verify(Node::Hash(root), vec![(leaf_position, leaf)]) + p.verify(Node::Hash(root), leaves_and_position_data) .map_err(|e| Error::Verify.log_debug(e)) } @@ -76,19 +88,32 @@ where Self { mmr: mmr_lib::MMR::new(size, Default::default()), leaves } } - /// Verify proof of a single leaf. - pub fn verify_leaf_proof( + /// Verify proof for a set of leaves. + /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have + /// the same position in both the `leaves` vector and the `leaf_indices` vector contained in the + /// [primitives::BatchProof] + pub fn verify_leaves_proof( &self, - leaf: L, - proof: primitives::Proof<>::Hash>, + leaves: Vec, + proof: primitives::BatchProof<>::Hash>, ) -> Result { let p = mmr_lib::MerkleProof::, Hasher, L>>::new( self.mmr.mmr_size(), proof.items.into_iter().map(Node::Hash).collect(), ); - let position = mmr_lib::leaf_index_to_pos(proof.leaf_index); + + if leaves.len() != proof.leaf_indices.len() { + return Err(Error::Verify.log_debug("Proof leaf_indices not same length with leaves")) + } + + let leaves_positions_and_data = proof + .leaf_indices + .into_iter() + .map(|index| mmr_lib::leaf_index_to_pos(index)) + .zip(leaves.into_iter().map(|leaf| Node::Data(leaf))) + .collect(); let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; - p.verify(root, vec![(position, Node::Data(leaf))]) + p.verify(root, leaves_positions_and_data) .map_err(|e| Error::Verify.log_debug(e)) } @@ -134,29 +159,36 @@ where I: 'static, L: primitives::FullLeaf + codec::Decode, { - /// Generate a proof for given leaf index. + /// Generate a proof for given leaf indices. /// /// Proof generation requires all the nodes (or their hashes) to be available in the storage. /// (i.e. you can't run the function in the pruned storage). - pub fn generate_proof( + pub fn generate_batch_proof( &self, - leaf_index: NodeIndex, - ) -> Result<(L, primitives::Proof<>::Hash>), Error> { - let position = mmr_lib::leaf_index_to_pos(leaf_index); + leaf_indices: Vec, + ) -> Result<(Vec, primitives::BatchProof<>::Hash>), Error> { + let positions = leaf_indices + .iter() + .map(|index| mmr_lib::leaf_index_to_pos(*index)) + .collect::>(); let store = >::default(); - let leaf = match mmr_lib::MMRStore::get_elem(&store, position) { - Ok(Some(Node::Data(leaf))) => leaf, - e => return Err(Error::LeafNotFound.log_debug(e)), - }; + let leaves = positions + .iter() + .map(|pos| match mmr_lib::MMRStore::get_elem(&store, *pos) { + Ok(Some(Node::Data(leaf))) => Ok(leaf), + e => Err(Error::LeafNotFound.log_debug(e)), + }) + .collect::, Error>>()?; + let leaf_count = self.leaves; self.mmr - .gen_proof(vec![position]) + .gen_proof(positions) .map_err(|e| Error::GenerateProof.log_error(e)) - .map(|p| primitives::Proof { - leaf_index, + .map(|p| primitives::BatchProof { + leaf_indices, leaf_count, items: p.proof_items().iter().map(|x| x.hash()).collect(), }) - .map(|p| (leaf, p)) + .map(|p| (leaves, p)) } } diff --git a/frame/merkle-mountain-range/src/mmr/mod.rs b/frame/merkle-mountain-range/src/mmr/mod.rs index 1cb4e8535b991..04fdfa199e72b 100644 --- a/frame/merkle-mountain-range/src/mmr/mod.rs +++ b/frame/merkle-mountain-range/src/mmr/mod.rs @@ -22,7 +22,7 @@ pub mod utils; use sp_mmr_primitives::{DataOrHash, FullLeaf}; use sp_runtime::traits; -pub use self::mmr::{verify_leaf_proof, Mmr}; +pub use self::mmr::{verify_leaves_proof, Mmr}; /// Node type for runtime `T`. pub type NodeOf = Node<>::Hashing, L>; diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index 70d1395aa94d5..d025910a9ee5c 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -23,7 +23,7 @@ use sp_core::{ offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, H256, }; -use sp_mmr_primitives::{Compact, Proof}; +use sp_mmr_primitives::{BatchProof, Compact}; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { frame_system::GenesisConfig::default().build_storage::().unwrap().into() @@ -225,16 +225,18 @@ fn should_generate_proofs_correctly() { // when generate proofs for all leaves let proofs = (0_u64..crate::NumberOfLeaves::::get()) .into_iter() - .map(|leaf_index| crate::Pallet::::generate_proof(leaf_index).unwrap()) + .map(|leaf_index| { + crate::Pallet::::generate_batch_proof(vec![leaf_index]).unwrap() + }) .collect::>(); // then assert_eq!( proofs[0], ( - Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),)), - Proof { - leaf_index: 0, + vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], + BatchProof { + leaf_indices: vec![0], leaf_count: 7, items: vec![ hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705"), @@ -247,9 +249,9 @@ fn should_generate_proofs_correctly() { assert_eq!( proofs[4], ( - Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),)), - Proof { - leaf_index: 4, + vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], + BatchProof { + leaf_indices: vec![4], leaf_count: 7, items: vec![ hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252"), @@ -262,9 +264,9 @@ fn should_generate_proofs_correctly() { assert_eq!( proofs[6], ( - Compact::new(((6, H256::repeat_byte(7)).into(), LeafData::new(7).into(),)), - Proof { - leaf_index: 6, + vec![Compact::new(((6, H256::repeat_byte(7)).into(), LeafData::new(7).into(),))], + BatchProof { + leaf_indices: vec![6], leaf_count: 7, items: vec![ hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252"), @@ -276,6 +278,37 @@ fn should_generate_proofs_correctly() { }); } +#[test] +fn should_generate_batch_proof_correctly() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + // given + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proofs now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + ext.execute_with(|| { + // when generate proofs for all leaves + let (.., proof) = crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap(); + + // then + assert_eq!( + proof, + BatchProof { + leaf_indices: vec![0, 4, 5], + leaf_count: 7, + items: vec![ + hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705"), + hex("cb24f4614ad5b2a5430344c99545b421d9af83c46fd632d70a332200884b4d46"), + hex("611c2174c6164952a66d985cfe1ec1a623794393e3acff96b136d198f37a648c"), + ], + } + ); + }); +} + #[test] fn should_verify() { let _ = env_logger::try_init(); @@ -289,15 +322,40 @@ fn should_verify() { // Try to generate proof now. This requires the offchain extensions to be present // to retrieve full leaf data. register_offchain_ext(&mut ext); - let (leaf, proof5) = ext.execute_with(|| { + let (leaves, proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_batch_proof(vec![5]).unwrap() + }); + + ext.execute_with(|| { + init_chain(7); + // then + assert_eq!(crate::Pallet::::verify_leaves(leaves, proof5), Ok(())); + }); +} + +#[test] +fn should_verify_batch_proof() { + let _ = env_logger::try_init(); + + // Start off with chain initialisation and storing indexing data off-chain + // (MMR Leafs) + let mut ext = new_test_ext(); + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proof now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + let (leaves, proof) = ext.execute_with(|| { // when - crate::Pallet::::generate_proof(5).unwrap() + crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() }); ext.execute_with(|| { init_chain(7); // then - assert_eq!(crate::Pallet::::verify_leaf(leaf, proof5), Ok(())); + assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); }); } @@ -314,16 +372,49 @@ fn verification_should_be_stateless() { // Try to generate proof now. This requires the offchain extensions to be present // to retrieve full leaf data. register_offchain_ext(&mut ext); - let (leaf, proof5) = ext.execute_with(|| { + let (leaves, proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_batch_proof(vec![5]).unwrap() + }); + let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); + + // Verify proof without relying on any on-chain data. + let leaf = crate::primitives::DataOrHash::Data(leaves[0].clone()); + assert_eq!( + crate::verify_leaves_proof::<::Hashing, _>(root, vec![leaf], proof5), + Ok(()) + ); +} + +#[test] +fn should_verify_batch_proof_statelessly() { + let _ = env_logger::try_init(); + + // Start off with chain initialisation and storing indexing data off-chain + // (MMR Leafs) + let mut ext = new_test_ext(); + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proof now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + let (leaves, proof) = ext.execute_with(|| { // when - crate::Pallet::::generate_proof(5).unwrap() + crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() }); let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); // Verify proof without relying on any on-chain data. - let leaf = crate::primitives::DataOrHash::Data(leaf); assert_eq!( - crate::verify_leaf_proof::<::Hashing, _>(root, leaf, proof5), + crate::verify_leaves_proof::<::Hashing, _>( + root, + leaves + .into_iter() + .map(|leaf| crate::primitives::DataOrHash::Data(leaf)) + .collect(), + proof + ), Ok(()) ); } @@ -340,10 +431,10 @@ fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { ext.execute_with(|| { // when - let (leaf, proof5) = crate::Pallet::::generate_proof(5).unwrap(); + let (leaves, proof5) = crate::Pallet::::generate_batch_proof(vec![5]).unwrap(); new_block(); // then - assert_eq!(crate::Pallet::::verify_leaf(leaf, proof5), Ok(())); + assert_eq!(crate::Pallet::::verify_leaves(leaves, proof5), Ok(())); }); } diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 60ef02c53001c..5a339d069062c 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -22,9 +22,9 @@ use sp_debug_derive::RuntimeDebug; use sp_runtime::traits; -use sp_std::fmt; #[cfg(not(feature = "std"))] use sp_std::prelude::Vec; +use sp_std::{fmt, vec}; /// A type to describe node position in the MMR (node index). pub type NodeIndex = u64; @@ -351,6 +351,38 @@ impl_leaf_data_for_tuple!(A:0, B:1, C:2); impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3); impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); +/// A MMR proof data for a group of leaves. +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] +pub struct BatchProof { + /// The indices of the leaves the proof is for. + pub leaf_indices: Vec, + /// Number of leaves in MMR, when the proof was generated. + pub leaf_count: NodeIndex, + /// Proof elements (hashes of siblings of inner nodes on the path to the leaf). + pub items: Vec, +} + +impl BatchProof { + /// Converts batch proof to single leaf proof + pub fn into_single_leaf_proof(proof: BatchProof) -> Result, Error> { + Ok(Proof { + leaf_index: *proof.leaf_indices.get(0).ok_or(Error::InvalidLeafIndex)?, + leaf_count: proof.leaf_count, + items: proof.items, + }) + } +} + +impl Proof { + /// Converts a single leaf proof into a batch proof + pub fn into_batch_proof(proof: Proof) -> BatchProof { + BatchProof { + leaf_indices: vec![proof.leaf_index], + leaf_count: proof.leaf_count, + items: proof.items, + } + } +} /// Merkle Mountain Range operation error. #[derive(RuntimeDebug, codec::Encode, codec::Decode, PartialEq, Eq)] pub enum Error { @@ -366,6 +398,10 @@ pub enum Error { Verify, /// Leaf not found in the storage. LeafNotFound, + /// Mmr Pallet not included in runtime + PalletNotIncluded, + /// Cannot find the requested leaf index + InvalidLeafIndex, } impl Error { @@ -417,6 +453,27 @@ sp_api::decl_runtime_apis! { /// Return the on-chain MMR root hash. fn mmr_root() -> Result; + + /// Generate MMR proof for a series of leaves under given indices. + fn generate_batch_proof(leaf_indices: Vec) -> Result<(Vec, BatchProof), Error>; + + /// Verify MMR proof against on-chain MMR for a batch of leaves. + /// + /// Note this function will use on-chain MMR root hash and check if the proof + /// matches the hash. + /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the + /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [BatchProof] + fn verify_batch_proof(leaves: Vec, proof: BatchProof) -> Result<(), Error>; + + /// Verify MMR proof against given root hash or a batch of leaves. + /// + /// Note this function does not require any on-chain storage - the + /// proof is verified against given MMR root hash. + /// + /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the + /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [BatchProof] + fn verify_batch_proof_stateless(root: Hash, leaves: Vec, proof: BatchProof) + -> Result<(), Error>; } }