diff --git a/Cargo.lock b/Cargo.lock index ec5af8aca4ecf..dd698045bcdc8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8176,6 +8176,7 @@ name = "sp-storage" version = "2.0.0-rc5" dependencies = [ "impl-serde 0.2.3", + "parity-scale-codec", "ref-cast", "serde", "sp-debug-derive", @@ -8236,12 +8237,14 @@ version = "2.0.0-rc5" dependencies = [ "criterion 0.2.11", "hash-db", + "hashbrown 0.8.0", "hex-literal", "memory-db", "parity-scale-codec", "sp-core", "sp-runtime", "sp-std", + "sp-storage", "trie-bench", "trie-db", "trie-root", diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index 886dc6011492f..8eff2967a8b63 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -23,7 +23,8 @@ use kvdb::KeyValueDB; use lazy_static::lazy_static; use rand::Rng; use hash_db::Prefix; -use sp_state_machine::Backend as _; +use sp_state_machine::backend::Backend as _; +use sp_state_machine::SimpleProof; use sp_trie::{trie_types::TrieDBMut, TrieMut as _}; use node_primitives::Hash; @@ -181,7 +182,7 @@ impl core::Benchmark for TrieReadBenchmark { let storage: Arc> = Arc::new(Storage(db.open(self.database_type))); - let trie_backend = sp_state_machine::TrieBackend::new( + let trie_backend = sp_state_machine::TrieBackend::<_, _, SimpleProof>::new( storage, self.root, ); diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index efc5ca4ee8ca0..4860542265fb5 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -40,12 +40,22 @@ use sp_blockchain; use sp_consensus::BlockOrigin; use parking_lot::RwLock; -pub use sp_state_machine::Backend as StateBackend; +pub use sp_state_machine::backend::Backend as StateBackend; +pub use sp_state_machine::backend::ProofRawFor; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. pub type StateBackendFor = >::State; +/// Extracts the proof for the given backend. +pub type ProofFor = < + RecProofForB as StateBackend> +>::StorageProof; + +type RecProofForSB = >>::RecProofBackend; + +type RecProofForB = RecProofForSB, Block>; + /// Extracts the transaction for the given state backend. pub type TransactionForSB = >>::Transaction; diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index d9d43900dfc94..8e2fd97f4f91c 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -24,7 +24,7 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, HashFor}, }; use sp_state_machine::{ - OverlayedChanges, ExecutionManager, ExecutionStrategy, StorageProof, + OverlayedChanges, ExecutionManager, ExecutionStrategy, }; use sc_executor::{RuntimeVersion, NativeVersion}; use sp_externalities::Extensions; @@ -32,6 +32,7 @@ use sp_core::{NativeOrEncoded,offchain::storage::OffchainOverlayedChanges}; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; use crate::execution_extensions::ExecutionExtensions; +use sp_state_machine::backend::ProofRawFor; /// Executor Provider pub trait ExecutorProvider { @@ -93,7 +94,9 @@ pub trait CallExecutor { initialize_block: InitializeBlock<'a, B>, execution_manager: ExecutionManager, native_call: Option, - proof_recorder: &Option>, + proof_recorder: Option<&RefCell< + ProofRecorder<>::State, B> + >>, extensions: Option, ) -> sp_blockchain::Result> where ExecutionManager: Clone; @@ -105,31 +108,31 @@ pub trait CallExecutor { /// Execute a call to a contract on top of given state, gathering execution proof. /// /// No changes are made. - fn prove_at_state>>( + fn prove_at_state>>( &self, - mut state: S, + state: S, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8] - ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - let trie_state = state.as_trie_backend() + ) -> Result<(Vec, ProofRawFor>), sp_blockchain::Error> { + let proof_state = state.as_proof_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box )?; - self.prove_at_trie_state(trie_state, overlay, method, call_data) + self.prove_at_proof_backend_state(&proof_state, overlay, method, call_data) } /// Execute a call to a contract on top of given trie state, gathering execution proof. /// /// No changes are made. - fn prove_at_trie_state>>( + fn prove_at_proof_backend_state>>( &self, - trie_state: &sp_state_machine::TrieBackend>, + proof_backend: &P, overlay: &mut OverlayedChanges, method: &str, - call_data: &[u8] - ) -> Result<(Vec, StorageProof), sp_blockchain::Error>; + call_data: &[u8], + ) -> Result<(Vec, ProofRawFor>), sp_blockchain::Error>; /// Get runtime version if supported. fn native_runtime_version(&self) -> Option<&NativeVersion>; diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 30cfd3a1b671b..4f2834f9bad15 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -32,12 +32,13 @@ use sp_trie; use sp_core::{H256, convert_hash}; use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One}; use sp_state_machine::{ - MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend + backend::Backend as StateBackend, SimpleProof, InMemoryBackend, + prove_read_on_proof_backend, read_proof_check, read_proof_check_on_proving_backend, }; - use sp_blockchain::{Error as ClientError, Result as ClientResult}; +type ProofCheckBackend = sp_state_machine::InMemoryProofCheckBackend; + /// The size of each CHT. This value is passed to every CHT-related function from /// production code. Other values are passed from tests. const SIZE: u32 = 2048; @@ -104,7 +105,7 @@ pub fn build_proof( cht_num: Header::Number, blocks: BlocksI, hashes: HashesI -) -> ClientResult +) -> ClientResult where Header: HeaderT, Hasher: hash_db::Hasher, @@ -116,11 +117,11 @@ pub fn build_proof( .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)]); - let trie_storage = storage.as_trie_backend() - .expect("InMemoryState::as_trie_backend always returns Some; qed"); - prove_read_on_trie_backend( - trie_storage, + let storage = InMemoryBackend::::default().update(vec![(None, transaction)]); + let proof_backend = storage.as_proof_backend() + .expect("InMemoryState::as_proof_backend always returns Some; qed"); + prove_read_on_proof_backend( + &proof_backend, blocks.into_iter().map(|number| encode_cht_key(number)), ).map_err(ClientError::Execution) } @@ -130,7 +131,7 @@ pub fn check_proof( local_root: Header::Hash, local_number: Header::Number, remote_hash: Header::Hash, - remote_proof: StorageProof, + remote_proof: SimpleProof, ) -> ClientResult<()> where Header: HeaderT, @@ -142,7 +143,7 @@ pub fn check_proof( local_number, remote_hash, move |local_root, local_cht_key| - read_proof_check::( + read_proof_check::, Hasher, _>( local_root, remote_proof, ::std::iter::once(local_cht_key), @@ -159,7 +160,7 @@ pub fn check_proof_on_proving_backend( local_root: Header::Hash, local_number: Header::Number, remote_hash: Header::Hash, - proving_backend: &TrieBackend, Hasher>, + proving_backend: &ProofCheckBackend, ) -> ClientResult<()> where Header: HeaderT, @@ -171,7 +172,7 @@ pub fn check_proof_on_proving_backend( local_number, remote_hash, |_, local_cht_key| - read_proof_check_on_proving_backend::( + read_proof_check_on_proving_backend::, Hasher>( proving_backend, local_cht_key, ).map_err(|e| ClientError::from(e)), diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 306c3c2b2f10c..bd46d2f6389f1 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -29,7 +29,7 @@ use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor}; use sp_runtime::{Justification, Storage}; use sp_state_machine::{ - ChangesTrieTransaction, InMemoryBackend, Backend as StateBackend, StorageCollection, + ChangesTrieTransaction, backend::Backend as StateBackend, StorageCollection, ChildStorageCollection, }; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; @@ -44,6 +44,8 @@ use crate::{ leaves::LeafSet, }; +type InMemoryBackend = sp_state_machine::InMemoryBackend; + struct PendingBlock { block: StoredBlock, state: NewBlockState, diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 677066936330e..02128ca7fe21e 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -37,7 +37,8 @@ pub use light::*; pub use notifications::*; pub use proof_provider::*; -pub use sp_state_machine::{StorageProof, ExecutionStrategy}; +pub use sp_state_machine::{ProofCommon, SimpleProof, ExecutionStrategy, + ProofNodes, BackendProof}; /// Usage Information Provider interface /// diff --git a/client/api/src/light.rs b/client/api/src/light.rs index b359c1149eea6..906d30d242abb 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -27,7 +27,7 @@ use sp_runtime::{ generic::BlockId }; use sp_core::{ChangesTrieConfigurationRange, storage::PrefixedStorageKey}; -use sp_state_machine::StorageProof; +use sp_state_machine::SimpleProof; use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, Error as ClientError, Result as ClientResult, @@ -124,7 +124,7 @@ pub struct ChangesProof { pub roots: BTreeMap, /// The proofs for all changes tries roots that have been touched AND are /// missing from the requester' node. It is a map of CHT number => proof. - pub roots_proof: StorageProof, + pub roots_proof: SimpleProof, } /// Remote block body request @@ -190,31 +190,31 @@ pub trait Fetcher: Send + Sync { /// /// Implementations of this trait should not use any prunable blockchain data /// except that is passed to its methods. -pub trait FetchChecker: Send + Sync { +pub trait FetchChecker: Send + Sync { /// Check remote header proof. fn check_header_proof( &self, request: &RemoteHeaderRequest, header: Option, - remote_proof: StorageProof, + remote_proof: SimpleProof, ) -> ClientResult; /// Check remote storage read proof. fn check_read_proof( &self, request: &RemoteReadRequest, - remote_proof: StorageProof, + remote_proof: P, ) -> ClientResult, Option>>>; /// Check remote storage read proof. fn check_read_child_proof( &self, request: &RemoteReadChildRequest, - remote_proof: StorageProof, + remote_proof: P, ) -> ClientResult, Option>>>; /// Check remote method execution proof. fn check_execution_proof( &self, request: &RemoteCallRequest, - remote_proof: StorageProof, + remote_proof: P, ) -> ClientResult>; /// Check remote changes query proof. fn check_changes_proof( diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 5749ae0576fc3..2720eb208d9b1 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -19,19 +19,20 @@ //! Proof utilities use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT}, + traits::{Block as BlockT, HashFor}, }; -use crate::{StorageProof, ChangesProof}; +use crate::{SimpleProof, ChangesProof}; use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; +use sp_trie::BackendProof; /// Interface for providing block proving utilities. -pub trait ProofProvider { +pub trait ProofProvider>> { /// Reads storage value at a given block + key, returning read proof. fn read_proof( &self, id: &BlockId, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result; + ) -> sp_blockchain::Result; /// Reads child storage value at a given block + storage_key + key, returning /// read proof. @@ -40,7 +41,7 @@ pub trait ProofProvider { id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result; + ) -> sp_blockchain::Result; /// Execute a call to a contract on top of state in a block of given hash /// AND returning execution proof. @@ -51,9 +52,10 @@ pub trait ProofProvider { id: &BlockId, method: &str, call_data: &[u8], - ) -> sp_blockchain::Result<(Vec, StorageProof)>; + ) -> sp_blockchain::Result<(Vec, Proof::ProofRaw)>; + /// Reads given header and generates CHT-based header proof. - fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)>; + fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, SimpleProof)>; /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. /// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 41d12970464f4..e9f449583b2d7 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -22,7 +22,7 @@ use std::{time, sync::Arc}; use sc_client_api::backend; -use codec::Decode; +use codec::{Encode, Decode}; use sp_consensus::{evaluation, Proposal, RecordProof}; use sp_inherents::InherentData; use log::{error, info, debug, trace, warn}; @@ -312,7 +312,8 @@ impl Proposer error!("Failed to evaluate authored block: {:?}", err); } - Ok(Proposal { block, proof, storage_changes }) + let proof: Option> = proof.map(Into::into); + Ok(Proposal { block, encoded_proof: proof.map(|p| p.encode()), storage_changes }) } } diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index b405fc6de0f01..44dd905844efe 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -25,10 +25,7 @@ //! # use sp_consensus::{Environment, Proposer, RecordProof}; //! # use sp_runtime::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; -//! # use substrate_test_runtime_client::{ -//! # runtime::{Extrinsic, Transfer}, AccountKeyring, -//! # DefaultTestClientBuilderExt, TestClientBuilderExt, -//! # }; +//! # use substrate_test_runtime_client::{self, runtime::{Extrinsic, Transfer}, AccountKeyring}; //! # use sc_transaction_pool::{BasicPool, FullChainApi}; //! # let client = Arc::new(substrate_test_runtime_client::new()); //! # let spawner = sp_core::testing::TaskExecutor::new(); diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 904667b1afc6e..caa332668673b 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -35,7 +35,7 @@ use sp_runtime::{ use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; use sp_api::{ - Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof, + Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, TransactionOutcome, }; use sp_consensus::RecordProof; @@ -56,12 +56,20 @@ pub struct BuiltBlock, /// An optional proof that was recorded while building the block. - pub proof: Option, + pub proof: Option>>, } -impl>> BuiltBlock { +impl BuiltBlock + where + Block: BlockT, + StateBackend: backend::StateBackend>, +{ /// Convert into the inner values. - pub fn into_inner(self) -> (Block, StorageChanges, Option) { + pub fn into_inner(self) -> ( + Block, + StorageChanges, + Option>>, + ) { (self.block, self.storage_changes, self.proof) } } @@ -246,7 +254,8 @@ mod tests { use super::*; use sp_blockchain::HeaderBackend; use sp_core::Blake2Hasher; - use sp_state_machine::Backend; + use sp_state_machine::backend::Backend; + use sp_state_machine::SimpleProof; use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; #[test] @@ -266,7 +275,7 @@ mod tests { let proof = block.proof.expect("Proof is build on request"); - let backend = sp_state_machine::create_proof_check_backend::( + let backend = sp_state_machine::create_proof_check_backend::( block.storage_changes.transaction_storage_root, proof, ).unwrap(); diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 4e6cb49f1129d..96a99d8a71254 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -911,7 +911,7 @@ mod tests { future::ready(r.map(|b| Proposal { block: b.block, - proof: b.proof, + encoded_proof: b.proof.as_ref().map(Encode::encode), storage_changes: b.storage_changes, })) } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 958d7845edbc6..07adef9212010 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -160,7 +160,7 @@ impl DummyProposer { // mutate the block header according to the mutator. (self.factory.mutator)(&mut block.header, Stage::PreSeal); - future::ready(Ok(Proposal { block, proof: None, storage_changes: Default::default() })) + future::ready(Ok(Proposal { block, encoded_proof: None, storage_changes: Default::default() })) } } diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index c3bed3e24f617..5ab7b3a8872b3 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -27,12 +27,13 @@ use sp_trie::{MemoryDB, prefixed_key}; use sp_core::{storage::ChildInfo, hexdisplay::HexDisplay}; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; -use sp_state_machine::{DBValue, backend::Backend as StateBackend, StorageCollection}; +use sp_state_machine::{DBValue, backend::{Backend as StateBackend, RecordBackendFor}, + SimpleProof, ProofInput, StorageCollection}; use kvdb::{KeyValueDB, DBTransaction}; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; type DbState = sp_state_machine::TrieBackend< - Arc>>, HashFor + Arc>>, HashFor, SimpleProof, >; type State = CachingState, B>; @@ -240,7 +241,9 @@ fn state_err() -> String { impl StateBackend> for BenchmarkingState { type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; + type StorageProof = as StateBackend>>::StorageProof; + type RecProofBackend = as StateBackend>>::RecProofBackend; + type ProofCheckBackend = as StateBackend>>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.add_read_key(key); @@ -334,7 +337,8 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, delta: impl Iterator)>, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) + self.state.borrow().as_ref() + .map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -353,12 +357,6 @@ impl StateBackend> for BenchmarkingState { self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(child_info, prefix)) } - fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> - { - None - } - fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction, @@ -437,6 +435,14 @@ impl StateBackend> for BenchmarkingState { fn usage_info(&self) -> sp_state_machine::UsageInfo { self.state.borrow().as_ref().map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) } + + fn from_previous_rec_state( + self, + previous: RecordBackendFor>, + previous_input: ProofInput, + ) -> Option { + self.state.borrow_mut().take().and_then(|s| s.from_previous_rec_state(previous, previous_input)) + } } impl std::fmt::Debug for BenchmarkingState { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index d854c80bf3535..01123f671b2e4 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -76,8 +76,8 @@ use sp_runtime::traits::{ }; use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, - StorageCollection, ChildStorageCollection, - backend::Backend as StateBackend, StateMachineStats, + StorageCollection, ChildStorageCollection, SimpleProof, + backend::{Backend as StateBackend, RecordBackendFor}, StateMachineStats, }; use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; @@ -101,7 +101,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - Arc>>, HashFor + Arc>>, HashFor, SimpleProof, >; const DB_HASH_LEN: usize = 32; @@ -113,7 +113,7 @@ pub type DbHash = [u8; DB_HASH_LEN]; /// It makes sure that the hash we are using stays pinned in storage /// until this structure is dropped. pub struct RefTrackingState { - state: DbState, + state: Option>, storage: Arc>, parent_hash: Option, } @@ -121,7 +121,7 @@ pub struct RefTrackingState { impl RefTrackingState { fn new(state: DbState, storage: Arc>, parent_hash: Option) -> Self { RefTrackingState { - state, + state: Some(state), parent_hash, storage, } @@ -142,17 +142,27 @@ impl std::fmt::Debug for RefTrackingState { } } +impl RefTrackingState { + #[inline] + fn state(&self) -> &DbState { + self.state.as_ref().expect("Non dropped state") + } +} + + impl StateBackend> for RefTrackingState { - type Error = as StateBackend>>::Error; + type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; + type StorageProof = as StateBackend>>::StorageProof; + type RecProofBackend = as StateBackend>>::RecProofBackend; + type ProofCheckBackend = as StateBackend>>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.storage(key) + self.state().storage(key) } fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.state.storage_hash(key) + self.state().storage_hash(key) } fn child_storage( @@ -160,11 +170,11 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.child_storage(child_info, key) + self.state().child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { - self.state.exists_storage(key) + self.state().exists_storage(key) } fn exists_child_storage( @@ -172,11 +182,11 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, key: &[u8], ) -> Result { - self.state.exists_child_storage(child_info, key) + self.state().exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.next_storage_key(key) + self.state().next_storage_key(key) } fn next_child_storage_key( @@ -184,15 +194,15 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.next_child_storage_key(child_info, key) + self.state().next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_keys_with_prefix(prefix, f) + self.state().for_keys_with_prefix(prefix, f) } fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_key_values_with_prefix(prefix, f) + self.state().for_key_values_with_prefix(prefix, f) } fn for_keys_in_child_storage( @@ -200,7 +210,7 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(child_info, f) + self.state().for_keys_in_child_storage(child_info, f) } fn for_child_keys_with_prefix( @@ -209,14 +219,14 @@ impl StateBackend> for RefTrackingState { prefix: &[u8], f: F, ) { - self.state.for_child_keys_with_prefix(child_info, prefix, f) + self.state().for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root<'a>( &self, delta: impl Iterator)>, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root(delta) + self.state().storage_root(delta) } fn child_storage_root<'a>( @@ -224,15 +234,15 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, delta: impl Iterator)>, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.child_storage_root(child_info, delta) + self.state().child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.pairs() + self.state().pairs() } fn keys(&self, prefix: &[u8]) -> Vec> { - self.state.keys(prefix) + self.state().keys(prefix) } fn child_keys( @@ -240,21 +250,24 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.child_keys(child_info, prefix) + self.state().child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> - { - self.state.as_trie_backend() + fn from_previous_rec_state( + mut self, + previous: RecordBackendFor>, + previous_input: sp_state_machine::ProofInput, + ) -> Option { + let state = std::mem::replace(&mut self.state, Default::default()).expect("Non dropped state"); + state.from_previous_rec_state(previous, previous_input) } fn register_overlay_stats(&mut self, stats: &StateMachineStats) { - self.state.register_overlay_stats(stats); + self.state().register_overlay_stats(stats); } fn usage_info(&self) -> StateUsageInfo { - self.state.usage_info() + self.state().usage_info() } } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 434b301ed6240..08213be8310c7 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -26,7 +26,7 @@ use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; use sp_core::hexdisplay::HexDisplay; use sp_core::storage::ChildInfo; use sp_state_machine::{ - backend::Backend as StateBackend, TrieBackend, StorageKey, StorageValue, + backend::{Backend as StateBackend, RecordBackendFor}, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, }; use log::trace; @@ -495,7 +495,9 @@ impl>, B: BlockT> CachingState { impl>, B: BlockT> StateBackend> for CachingState { type Error = S::Error; type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; + type StorageProof = S::StorageProof; + type RecProofBackend = S::RecProofBackend; + type ProofCheckBackend = S::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { let local_cache = self.cache.local_cache.upgradable_read(); @@ -652,8 +654,12 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { - self.state.as_trie_backend() + fn from_previous_rec_state( + self, + previous: RecordBackendFor>, + previous_input: sp_state_machine::ProofInput, + ) -> Option { + self.state.from_previous_rec_state(previous, previous_input) } fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { @@ -736,7 +742,9 @@ impl std::fmt::Debug for SyncingCachingState { impl>, B: BlockT> StateBackend> for SyncingCachingState { type Error = S::Error; type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; + type StorageProof = S::StorageProof; + type RecProofBackend = S::RecProofBackend; + type ProofCheckBackend = S::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.caching_state().storage(key) @@ -834,13 +842,6 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { - self.caching_state - .as_mut() - .expect("`caching_state` is valid for the lifetime of the object; qed") - .as_trie_backend() - } - fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { self.caching_state().register_overlay_stats(stats); } @@ -848,12 +849,26 @@ impl>, B: BlockT> StateBackend> for Syncin fn usage_info(&self) -> sp_state_machine::UsageInfo { self.caching_state().usage_info() } + + fn from_previous_rec_state( + mut self, + previous: RecordBackendFor>, + previous_input: sp_state_machine::ProofInput, + ) -> Option { + self.sync().and_then(|s| s.from_previous_rec_state(previous, previous_input)) + } } impl Drop for SyncingCachingState { fn drop(&mut self) { + let _ = self.sync(); + } +} + +impl SyncingCachingState { + fn sync(&mut self) -> Option> { if self.disable_syncing { - return; + return None; } if let Some(mut caching_state) = self.caching_state.take() { @@ -864,6 +879,9 @@ impl Drop for SyncingCachingState { let is_best = self.meta.read().best_hash == hash; caching_state.cache.sync_cache(&[], &[], vec![], vec![], None, None, is_best); } + Some(caching_state) + } else { + None } } } @@ -872,10 +890,13 @@ impl Drop for SyncingCachingState { mod tests { use super::*; use sp_runtime::{ - traits::BlakeTwo256, testing::{H256, Block as RawBlock, ExtrinsicWrapper}, }; - use sp_state_machine::InMemoryBackend; + + type InMemoryBackend = sp_state_machine::InMemoryBackend< + sp_runtime::traits::BlakeTwo256, + sp_state_machine::SimpleProof, + >; type Block = RawBlock>; @@ -897,7 +918,7 @@ mod tests { // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] // state [ 5 5 4 3 2 2 ] let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(root_parent), ); @@ -912,14 +933,14 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h0), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h0), ); @@ -934,7 +955,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1b), ); @@ -949,7 +970,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1a), ); @@ -964,35 +985,35 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2a), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h3a), ); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1a), ); assert!(s.storage(&key).unwrap().is_none()); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2b), ); assert!(s.storage(&key).unwrap().is_none()); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1b), ); @@ -1001,7 +1022,7 @@ mod tests { // reorg to 3b // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2b), ); @@ -1015,7 +1036,7 @@ mod tests { true, ); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h3a), ); @@ -1036,7 +1057,7 @@ mod tests { let shared = new_shared_cache::(256*1024, (0,1)); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(root_parent), ); @@ -1051,14 +1072,14 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); @@ -1073,7 +1094,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2b), ); @@ -1088,7 +1109,7 @@ mod tests { ); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2a), ); @@ -1108,21 +1129,21 @@ mod tests { let shared = new_shared_cache::(256*1024, (0,1)); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(root_parent), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2a), ); @@ -1137,14 +1158,14 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2b), ); @@ -1159,7 +1180,7 @@ mod tests { ); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h3a), ); @@ -1173,7 +1194,7 @@ mod tests { let h0 = H256::random(); let mut s = CachingState::new( - InMemoryBackend::::default(), shared.clone(), Some(root_parent.clone()), + InMemoryBackend::default(), shared.clone(), Some(root_parent.clone()), ); let key = H256::random()[..].to_vec(); @@ -1211,7 +1232,7 @@ mod tests { let h0 = H256::random(); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(root_parent), ); @@ -1255,7 +1276,7 @@ mod tests { let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(root_parent.clone()), ); @@ -1270,7 +1291,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h0), ); @@ -1285,7 +1306,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); @@ -1308,7 +1329,7 @@ mod tests { s.cache.sync_cache(&[], &[], vec![], vec![], None, None, true); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); @@ -1323,11 +1344,12 @@ mod qc { use quickcheck::{quickcheck, TestResult, Arbitrary}; use super::*; - use sp_runtime::{ - traits::BlakeTwo256, - testing::{H256, Block as RawBlock, ExtrinsicWrapper}, - }; - use sp_state_machine::InMemoryBackend; + use sp_runtime::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; + + type InMemoryBackend = sp_state_machine::InMemoryBackend< + sp_runtime::traits::BlakeTwo256, + sp_state_machine::SimpleProof, + >; type Block = RawBlock>; @@ -1454,22 +1476,22 @@ mod qc { } } - fn head_state(&self, hash: H256) -> CachingState, Block> { + fn head_state(&self, hash: H256) -> CachingState { CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), self.shared.clone(), Some(hash), ) } - fn canon_head_state(&self) -> CachingState, Block> { + fn canon_head_state(&self) -> CachingState { self.head_state(self.canon.last().expect("Expected to be one commit").hash) } fn mutate_static( &mut self, action: Action, - ) -> CachingState, Block> { + ) -> CachingState { self.mutate(action).expect("Expected to provide only valid actions to the mutate_static") } @@ -1488,7 +1510,7 @@ mod qc { fn mutate( &mut self, action: Action, - ) -> Result, Block>, ()> { + ) -> Result, ()> { let state = match action { Action::Fork { depth, hash, changes } => { let pos = self.canon.len() as isize - depth as isize; @@ -1525,7 +1547,7 @@ mod qc { }; let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), self.shared.clone(), Some(parent), ); @@ -1564,7 +1586,7 @@ mod qc { } let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), self.shared.clone(), Some(parent_hash), ); @@ -1611,7 +1633,7 @@ mod qc { self.canon.push(node); let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), self.shared.clone(), Some(fork_at), ); diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 2ac9ec57f3df4..1443b7b9a82d3 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -41,7 +41,7 @@ use log::{trace, warn}; use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; use sc_client_api::{ - backend::Backend, StorageProof, + backend::Backend, SimpleProof as StorageProof, light::{FetchChecker, RemoteReadRequest}, StorageProvider, ProofProvider, }; @@ -53,7 +53,8 @@ use sp_runtime::{ }; use sp_core::storage::StorageKey; use sc_telemetry::{telemetry, CONSENSUS_INFO}; -use sp_finality_grandpa::{AuthorityId, AuthorityList, VersionedAuthorityList, GRANDPA_AUTHORITIES_KEY}; +use sp_finality_grandpa::{AuthorityId, AuthorityList, VersionedAuthorityList, + GRANDPA_AUTHORITIES_KEY}; use crate::justification::GrandpaJustification; use crate::VoterSet; @@ -70,7 +71,8 @@ pub trait AuthoritySetForFinalityProver: Send + Sync { } /// Trait that combines `StorageProvider` and `ProofProvider` -pub trait StorageAndProofProvider: StorageProvider + ProofProvider + Send + Sync +pub trait StorageAndProofProvider: StorageProvider + + ProofProvider + Send + Sync where Block: BlockT, BE: Backend + Send + Sync, @@ -81,13 +83,14 @@ impl StorageAndProofProvider for P where Block: BlockT, BE: Backend + Send + Sync, - P: StorageProvider + ProofProvider + Send + Sync, + P: StorageProvider + ProofProvider + Send + Sync, {} /// Implementation of AuthoritySetForFinalityProver. -impl AuthoritySetForFinalityProver for Arc> +impl AuthoritySetForFinalityProver for Arc> where BE: Backend + Send + Sync + 'static, + Block: BlockT, { fn authorities(&self, block: &BlockId) -> ClientResult { let storage_key = StorageKey(GRANDPA_AUTHORITIES_KEY.to_vec()); @@ -114,7 +117,8 @@ pub trait AuthoritySetForFinalityChecker: Send + Sync { } /// FetchChecker-based implementation of AuthoritySetForFinalityChecker. -impl AuthoritySetForFinalityChecker for Arc> { +impl AuthoritySetForFinalityChecker for Arc> + where Block: BlockT { fn check_authorities_proof( &self, hash: Block::Hash, @@ -849,8 +853,8 @@ pub(crate) mod tests { _ => unreachable!("no other authorities should be fetched: {:?}", block_id), }, |block_id| match block_id { - BlockId::Number(5) => Ok(StorageProof::new(vec![vec![50]])), - BlockId::Number(7) => Ok(StorageProof::new(vec![vec![70]])), + BlockId::Number(5) => Ok(StorageProof::from_nodes(vec![vec![50]])), + BlockId::Number(7) => Ok(StorageProof::from_nodes(vec![vec![70]])), _ => unreachable!("no other authorities should be proved: {:?}", block_id), }, ), @@ -866,14 +870,14 @@ pub(crate) mod tests { block: header(5).hash(), justification: just5, unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![vec![50]])), + authorities_proof: Some(StorageProof::from_nodes(vec![vec![50]])), }, // last fragment provides justification for #7 && unknown#7 FinalityProofFragment { block: header(7).hash(), justification: just7.clone(), unknown_headers: vec![header(7)], - authorities_proof: Some(StorageProof::new(vec![vec![70]])), + authorities_proof: Some(StorageProof::from_nodes(vec![vec![70]])), }, ]); @@ -886,8 +890,8 @@ pub(crate) mod tests { &blockchain, 0, auth3, - &ClosureAuthoritySetForFinalityChecker( - |hash, _header, proof: StorageProof| match proof.clone().iter_nodes().next().map(|x| x[0]) { + &ClosureAuthoritySetForFinalityChecker(|hash, _header, proof: StorageProof| + match proof.clone().into_nodes().into_iter().next().map(|x| x[0]) { Some(50) => Ok(auth5.clone()), Some(70) => Ok(auth7.clone()), _ => unreachable!("no other proofs should be checked: {}", hash), @@ -948,7 +952,7 @@ pub(crate) mod tests { block: header(4).hash(), justification: TestJustification((0, authorities.clone()), vec![7]).encode(), unknown_headers: vec![header(4)], - authorities_proof: Some(StorageProof::new(vec![vec![42]])), + authorities_proof: Some(StorageProof::from_nodes(vec![vec![42]])), }, FinalityProofFragment { block: header(5).hash(), justification: TestJustification((0, authorities), vec![8]).encode(), @@ -998,7 +1002,7 @@ pub(crate) mod tests { block: header(2).hash(), justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![vec![42]])), + authorities_proof: Some(StorageProof::from_nodes(vec![vec![42]])), }, FinalityProofFragment { block: header(4).hash(), justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index a7c9a655467c7..11528092c7575 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -573,7 +573,7 @@ pub mod tests { use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; use sp_finality_grandpa::AuthorityId; use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof, BlockBackend}; + use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, SimpleProof, BlockBackend}; use substrate_test_runtime_client::runtime::{Block, Header}; use crate::tests::TestApi; use crate::finality_proof::{ @@ -867,7 +867,7 @@ pub mod tests { Vec::new(), ).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![])), + authorities_proof: Some(SimpleProof::from_nodes(vec![])), }, ].encode(), &mut verifier, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index d2905e4da4453..3f31fd71a481d 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -32,7 +32,7 @@ use tokio::runtime::{Runtime, Handle}; use sp_keyring::Ed25519Keyring; use sc_client_api::backend::TransactionFor; use sp_blockchain::Result; -use sp_api::{ApiRef, StorageProof, ProvideRuntimeApi}; +use sp_api::{ApiRef, ProvideRuntimeApi}; use substrate_test_runtime_client::runtime::BlockNumber; use sp_consensus::{ BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, ImportResult, BlockImport, @@ -53,6 +53,9 @@ use finality_proof::{ use consensus_changes::ConsensusChanges; use sc_block_builder::BlockBuilderProvider; use sc_consensus::LongestChain; +use sp_state_machine::SimpleProof; + +type ProofCheckBackend = sp_state_machine::InMemoryProofCheckBackend; type TestLinkHalf = LinkHalf>; @@ -238,9 +241,9 @@ impl AuthoritySetForFinalityProver for TestApi { Ok(self.genesis_authorities.clone()) } - fn prove_authorities(&self, block: &BlockId) -> Result { + fn prove_authorities(&self, block: &BlockId) -> Result { let authorities = self.authorities(block)?; - let backend = >>::from(vec![ + let backend = , SimpleProof>>::from(vec![ (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); let proof = prove_read(backend, vec![b"authorities"]) @@ -254,9 +257,9 @@ impl AuthoritySetForFinalityChecker for TestApi { &self, _hash: ::Hash, header: ::Header, - proof: StorageProof, + proof: SimpleProof, ) -> Result { - let results = read_proof_check::, _>( + let results = read_proof_check::>, HashFor, _>( *header.state_root(), proof, vec![b"authorities"] ) .expect("failure checking read proof for authorities"); diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index be7953e528bd8..0d82d5bd3ef8a 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -29,7 +29,7 @@ use sp_core::ChangesTrieConfiguration; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ - Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, + backend::{Backend as StateBackend, RecordBackendFor}, ChangesTrieTransaction, StorageCollection, ChildStorageCollection, }; use sp_runtime::{generic::BlockId, Justification, Storage}; @@ -49,6 +49,7 @@ use sc_client_api::{ }; use super::blockchain::Blockchain; use hash_db::Hasher; +use super::InMemoryBackend; const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; @@ -383,7 +384,9 @@ impl StateBackend for GenesisOrUnavailableState { type Error = ClientError; type Transaction = as StateBackend>::Transaction; - type TrieBackendStorage = as StateBackend>::TrieBackendStorage; + type StorageProof = as StateBackend>::StorageProof; + type RecProofBackend = as StateBackend>::RecProofBackend; + type ProofCheckBackend = as StateBackend>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> ClientResult>> { match *self { @@ -512,9 +515,14 @@ impl StateBackend for GenesisOrUnavailableState sp_state_machine::UsageInfo::empty() } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn from_previous_rec_state( + self, + previous: RecordBackendFor, + previous_input: sp_state_machine::ProofInput, + ) -> Option { match self { - GenesisOrUnavailableState::Genesis(ref mut state) => state.as_trie_backend(), + GenesisOrUnavailableState::Genesis(state) => state + .from_previous_rec_state(previous, previous_input), GenesisOrUnavailableState::Unavailable => None, } } diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index fa0f02cd5aed9..34fc9da47b104 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -32,10 +32,13 @@ use sp_runtime::{ }; use sp_externalities::Extensions; use sp_state_machine::{ - self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, + self, OverlayedChanges, ExecutionStrategy, execution_proof_check_on_proof_backend, + ExecutionManager, }; +use super::InMemoryBackend; +use sp_state_machine::backend::{Backend as StateBackend, ProofRawFor}; use hash_db::Hasher; +use sp_state_machine::{SimpleProof as StorageProof, MergeableProof}; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; @@ -118,7 +121,9 @@ impl CallExecutor for initialize_block: InitializeBlock<'a, Block>, _manager: ExecutionManager, native_call: Option, - recorder: &Option>, + _recorder: Option<&RefCell< + ProofRecorder<>::State, Block> + >>, extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { // there's no actual way/need to specify native/wasm execution strategy on light node @@ -145,7 +150,9 @@ impl CallExecutor for initialize_block, ExecutionManager::NativeWhenPossible, native_call, - recorder, + // we are not passing the recorder at it would invole some additional + // type constraint when the client do not support proving + None, extensions, ).map_err(|e| ClientError::Execution(Box::new(e.to_string()))), false => Err(ClientError::NotAvailableOnLightClient), @@ -159,13 +166,13 @@ impl CallExecutor for } } - fn prove_at_trie_state>>( + fn prove_at_proof_backend_state>>( &self, - _state: &sp_state_machine::TrieBackend>, - _changes: &mut OverlayedChanges, + _proof_backend: &P, + _overlay: &mut OverlayedChanges, _method: &str, _call_data: &[u8], - ) -> ClientResult<(Vec, StorageProof)> { + ) -> ClientResult<(Vec, ProofRawFor>)> { Err(ClientError::NotAvailableOnLightClient) } @@ -179,18 +186,18 @@ impl CallExecutor for /// Method is executed using passed header as environment' current block. /// Proof includes both environment preparation proof and method execution proof. pub fn prove_execution( - mut state: S, + state: S, header: Block::Header, executor: &E, method: &str, call_data: &[u8], -) -> ClientResult<(Vec, StorageProof)> +) -> ClientResult<(Vec, ProofRawFor>)> where Block: BlockT, S: StateBackend>, E: CallExecutor, { - let trie_state = state.as_trie_backend() + let proof_state = state.as_proof_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box @@ -198,21 +205,23 @@ pub fn prove_execution( // prepare execution environment + record preparation proof let mut changes = Default::default(); - let (_, init_proof) = executor.prove_at_trie_state( - trie_state, + let (_, init_proof) = executor.prove_at_proof_backend_state( + &proof_state, &mut changes, "Core_initialize_block", &header.encode(), )?; // execute method + record execution proof - let (result, exec_proof) = executor.prove_at_trie_state( - &trie_state, + let (result, exec_proof) = executor.prove_at_proof_backend_state( + &proof_state, &mut changes, method, call_data, )?; - let total_proof = StorageProof::merge(vec![init_proof, exec_proof]); + let total_proof = >>::merge( + vec![init_proof, exec_proof], + ); Ok((result, total_proof)) } @@ -233,7 +242,8 @@ pub fn check_execution_proof( H: Hasher, H::Out: Ord + codec::Codec + 'static, { - check_execution_proof_with_make_header::( + + check_execution_proof_with_make_header::, Header, E, H, _>( executor, spawn_handle, request, @@ -252,14 +262,15 @@ pub fn check_execution_proof( /// /// Method is executed using passed header as environment' current block. /// Proof should include both environment preparation proof and method execution proof. -pub fn check_execution_proof_with_make_header( +pub fn check_execution_proof_with_make_header( executor: &E, spawn_handle: Box, request: &RemoteCallRequest
, - remote_proof: StorageProof, + remote_proof: P::StorageProof, make_next_header: MakeNextHeader, ) -> ClientResult> where + P: sp_state_machine::backend::ProofCheckBackend, E: CodeExecutor + Clone + 'static, H: Hasher, Header: HeaderT, @@ -271,14 +282,14 @@ pub fn check_execution_proof_with_make_header( // prepare execution environment + check preparation proof let mut changes = OverlayedChanges::default(); - let trie_backend = create_proof_check_backend(root, remote_proof)?; + let trie_backend = P::create_proof_check_backend(root, remote_proof)?; let next_header = make_next_header(&request.header); // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); let runtime_code = backend_runtime_code.runtime_code()?; - execution_proof_check_on_trie_backend::( + execution_proof_check_on_proof_backend::( &trie_backend, &mut changes, executor, @@ -289,7 +300,7 @@ pub fn check_execution_proof_with_make_header( )?; // execute method - execution_proof_check_on_trie_backend::( + execution_proof_check_on_proof_backend::( &trie_backend, &mut changes, executor, diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index 33113c2fc7df0..9d74a646d09f0 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -34,7 +34,9 @@ use sp_state_machine::{ InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, read_child_proof_check, }; -pub use sp_state_machine::StorageProof; +use super::InMemoryBackend; +pub use sp_state_machine::{SimpleProof as StorageProof, ProofCommon}; +pub use sp_state_machine::BackendProof; use sp_blockchain::{Error as ClientError, Result as ClientResult}; pub use sc_client_api::{ @@ -161,7 +163,8 @@ impl> LightDataChecker { H::Out: Ord + codec::Codec, { // all the checks are sharing the same storage - let storage = remote_roots_proof.into_memory_db(); + let storage = remote_roots_proof.into_partial_db() + .map_err(|e| format!("{}", e))?; // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT @@ -205,7 +208,7 @@ impl> LightDataChecker { } } -impl FetchChecker for LightDataChecker +impl FetchChecker for LightDataChecker where Block: BlockT, E: CodeExecutor + Clone + 'static, @@ -235,7 +238,7 @@ impl FetchChecker for LightDataChecker request: &RemoteReadRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - read_proof_check::( + read_proof_check::, H, _>( convert_hash(request.header.state_root()), remote_proof, request.keys.iter(), @@ -251,7 +254,7 @@ impl FetchChecker for LightDataChecker Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; - read_child_proof_check::( + read_child_proof_check::, H, _>( convert_hash(request.header.state_root()), remote_proof, &child_info, diff --git a/client/light/src/lib.rs b/client/light/src/lib.rs index 899d1ae31a3dd..e01e3a43e9f52 100644 --- a/client/light/src/lib.rs +++ b/client/light/src/lib.rs @@ -29,6 +29,8 @@ pub mod fetcher; pub use {backend::*, blockchain::*, call_executor::*, fetcher::*}; +type InMemoryBackend = sp_state_machine::InMemoryBackend; + /// Create an instance of fetch data checker. pub fn new_fetch_checker>( blockchain: Arc>, diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 20fbe0284397d..2ec2dd941cdb6 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -19,17 +19,18 @@ //! Blockchain access trait use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; -use sc_client_api::{BlockBackend, ProofProvider}; +use sc_client_api::{BlockBackend, ProofProvider, SimpleProof as StorageProof}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; /// Local client abstraction for the network. -pub trait Client: HeaderBackend + ProofProvider + BlockIdTo - + BlockBackend + HeaderMetadata + Send + Sync +pub trait Client: HeaderBackend + ProofProvider + + BlockIdTo + BlockBackend + HeaderMetadata + + Send + Sync {} impl Client for T where - T: HeaderBackend + ProofProvider + BlockIdTo + T: HeaderBackend + ProofProvider + BlockIdTo + BlockBackend + HeaderMetadata + Send + Sync {} diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 678a717a898ff..27c1262f12016 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -57,7 +57,8 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client_api::{ - StorageProof, + SimpleProof as StorageProof, + ProofCommon, light::{ self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, @@ -290,7 +291,7 @@ pub struct LightClientHandler { /// Blockchain client. chain: Arc>, /// Verifies that received responses are correct. - checker: Arc>, + checker: Arc>, /// Peer information (addresses, their best block, etc.) peers: HashMap>, /// Futures sending back response to remote clients. @@ -313,7 +314,7 @@ where pub fn new( cfg: Config, chain: Arc>, - checker: Arc>, + checker: Arc>, peerset: sc_peerset::PeersetHandle, ) -> Self { LightClientHandler { @@ -546,7 +547,11 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = match self.chain.execution_proof(&BlockId::Hash(block), &request.method, &request.data) { + let proof = match self.chain.execution_proof( + &BlockId::Hash(block), + &request.method, + &request.data, + ) { Ok((_, proof)) => proof, Err(e) => { log::trace!("remote call request from {} ({} at {:?}) failed with: {}", @@ -585,7 +590,10 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = match self.chain.read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) { + let proof = match self.chain.read_proof( + &BlockId::Hash(block), + &mut request.keys.iter().map(AsRef::as_ref), + ) { Ok(proof) => proof, Err(error) => { log::trace!("remote read request from {} ({} at {:?}) failed with: {}", @@ -1323,7 +1331,8 @@ mod tests { swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, yamux }; - use sc_client_api::{StorageProof, RemoteReadChildRequest, FetchChecker}; + use sc_client_api::{ProofCommon, RemoteReadChildRequest, FetchChecker, + SimpleProof as StorageProof}; use sp_blockchain::{Error as ClientError}; use sp_core::storage::ChildInfo; use std::{ @@ -1367,7 +1376,7 @@ mod tests { _mark: std::marker::PhantomData } - impl light::FetchChecker for DummyFetchChecker { + impl light::FetchChecker for DummyFetchChecker { fn check_header_proof( &self, _request: &RemoteHeaderRequest, diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index 084172ee57c4f..baa555f38d9ac 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -24,7 +24,8 @@ use futures::{channel::oneshot, prelude::*}; use parking_lot::Mutex; use sc_client_api::{ FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, - RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, StorageProof, ChangesProof, + RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, SimpleProof as StorageProof, + ChangesProof, }; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::Error as ClientError; @@ -38,7 +39,7 @@ use std::{collections::HashMap, pin::Pin, sync::Arc, task::Context, task::Poll}; /// responsible for pulling elements out of that queue and fulfilling them. pub struct OnDemand { /// Objects that checks whether what has been retrieved is correct. - checker: Arc>, + checker: Arc>, /// Queue of requests. Set to `Some` at initialization, then extracted by the network. /// @@ -58,7 +59,7 @@ pub struct OnDemand { #[derive(Default, Clone)] pub struct AlwaysBadChecker; -impl FetchChecker for AlwaysBadChecker { +impl FetchChecker for AlwaysBadChecker { fn check_header_proof( &self, _request: &RemoteHeaderRequest, @@ -114,7 +115,7 @@ where B::Header: HeaderT, { /// Creates new on-demand service. - pub fn new(checker: Arc>) -> Self { + pub fn new(checker: Arc>) -> Self { let (requests_send, requests_queue) = tracing_unbounded("mpsc_ondemand"); let requests_queue = Mutex::new(Some(requests_queue)); @@ -126,7 +127,7 @@ where } /// Get checker reference. - pub fn checker(&self) -> &Arc> { + pub fn checker(&self) -> &Arc> { &self.checker } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c1c9ef02ea60b..07806beb197d1 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -59,7 +59,7 @@ use std::sync::Arc; use std::fmt::Write; use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; use log::{log, Level, trace, debug, warn, error}; -use sc_client_api::{ChangesProof, StorageProof}; +use sc_client_api::{ChangesProof, ProofCommon}; use wasm_timer::Instant; mod generic_proto; @@ -1422,7 +1422,7 @@ impl Protocol { error ); self.peerset_handle.report_peer(who.clone(), rep::RPC_FAILED); - StorageProof::empty() + ProofCommon::empty() } }; @@ -1431,7 +1431,7 @@ impl Protocol { None, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, - proof, + proof: proof.into_nodes(), }), ); } @@ -1562,7 +1562,7 @@ impl Protocol { request.block, error ); - StorageProof::empty() + ProofCommon::empty() } }; self.send_message( @@ -1570,7 +1570,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof, + proof: proof.into_nodes(), }), ); } @@ -1622,7 +1622,7 @@ impl Protocol { request.block, error ); - StorageProof::empty() + ProofCommon::empty() } }; self.send_message( @@ -1630,7 +1630,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof, + proof: proof.into_nodes(), }), ); } @@ -1655,7 +1655,7 @@ impl Protocol { request.block, error ); - (Default::default(), StorageProof::empty()) + (Default::default(), ProofCommon::empty()) } }; self.send_message( @@ -1664,7 +1664,7 @@ impl Protocol { GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, - proof, + proof: proof.into_nodes(), }), ); } @@ -1719,7 +1719,7 @@ impl Protocol { max_block: Zero::zero(), proof: vec![], roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), + roots_proof: ProofCommon::empty(), } } }; @@ -1731,7 +1731,7 @@ impl Protocol { max: proof.max_block, proof: proof.proof, roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof, + roots_proof: proof.roots_proof.into_nodes(), }), ); } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index a7fbb92387cf6..bd467b44db2f6 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -28,7 +28,10 @@ pub use self::generic::{ FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, }; -use sc_client_api::StorageProof; + +/// Former storage proof type, to be replace by +/// `use sc_client_api::StorageProof`; +type StorageProof = Vec>; /// A unique ID of a request. pub type RequestId = u64; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 01c7c5f1eb40c..2230c0b0fda33 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -40,7 +40,8 @@ use self::error::{Error, FutureResult}; pub use sc_rpc_api::state::*; pub use sc_rpc_api::child_state::*; -use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend, ProofProvider}; +use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend, + ProofProvider, SimpleProof}; use sp_blockchain::{HeaderMetadata, HeaderBackend}; const STORAGE_KEYS_PAGED_MAX_COUNT: u32 = 1000; @@ -175,7 +176,8 @@ pub fn new_full( where Block: BlockT + 'static, BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend + Client: ExecutorProvider + StorageProvider + + ProofProvider + HeaderBackend + HeaderMetadata + BlockchainEvents + CallApiAt + ProvideRuntimeApi + Send + Sync + 'static, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index fda73cea27110..aad5c5be4e1e2 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -26,7 +26,8 @@ use rpc::{Result as RpcResult, futures::{stream, Future, Sink, Stream, future::r use sc_rpc_api::state::ReadProof; use sc_client_api::backend::Backend; -use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata, HeaderBackend}; +use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata, + CachedHeaderMetadata, HeaderBackend}; use sc_client_api::BlockchainEvents; use sp_core::{ Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, @@ -36,10 +37,9 @@ use sp_version::RuntimeVersion; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor, SaturatedConversion, CheckedSub}, }; - use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; -use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err, SimpleProof}; use std::marker::PhantomData; use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider, ProofProvider}; @@ -219,7 +219,8 @@ impl FullState impl StateBackend for FullState where Block: BlockT + 'static, BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend + Client: ExecutorProvider + StorageProvider + + ProofProvider + HeaderBackend + HeaderMetadata + BlockchainEvents + CallApiAt + ProvideRuntimeApi + Send + Sync + 'static, @@ -394,7 +395,9 @@ impl StateBackend for FullState( ) -> Result where TCl: ProvideRuntimeApi + HeaderMetadata + Chain + - BlockBackend + BlockIdTo + ProofProvider + + BlockBackend + BlockIdTo + ProofProvider + HeaderBackend + BlockchainEvents + ExecutorProvider + UsageProvider + StorageProvider + CallApiAt + Send + 'static, @@ -460,7 +461,9 @@ pub fn spawn_tasks( TBackend: 'static + sc_client_api::backend::Backend + Send, TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, - TRpc: sc_rpc::RpcExtension + TRpc: sc_rpc::RpcExtension, + // This constraint should be lifted when client get generic over StateBackend and Proof + TBackend::State: StateBackend, StorageProof = SimpleProof>, { let SpawnTasksParams { mut config, @@ -715,7 +718,7 @@ fn gen_handler( TBl: BlockT, TCl: ProvideRuntimeApi + BlockchainEvents + HeaderBackend + HeaderMetadata + ExecutorProvider + - CallApiAt + ProofProvider + + CallApiAt + ProofProvider + StorageProvider + BlockBackend + Send + Sync + 'static, TExPool: MaintainedTransactionPool::Hash> + 'static, TBackend: sc_client_api::backend::Backend + 'static, @@ -723,6 +726,8 @@ fn gen_handler( >::Api: sp_session::SessionKeys + sp_api::Metadata, + // This constraint should be lifted when client get generic over StateBackend and Proof + TBackend::State: StateBackend, StorageProof = SimpleProof>, { use sc_rpc::{chain, state, author, system, offchain}; @@ -829,7 +834,7 @@ pub fn build_network( where TBl: BlockT, TCl: ProvideRuntimeApi + HeaderMetadata + Chain + - BlockBackend + BlockIdTo + ProofProvider + + BlockBackend + BlockIdTo + ProofProvider + HeaderBackend + BlockchainEvents + 'static, TExPool: MaintainedTransactionPool::Hash> + 'static, TImpQu: ImportQueue + 'static, diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index 3fe44dbdb142d..9809e437ee2ee 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -17,7 +17,8 @@ use crate::error::Error; use sp_runtime::traits::Block as BlockT; use sp_runtime::generic::BlockId; -use sp_core::storage::{StorageKey, well_known_keys, ChildInfo, Storage, StorageChild, StorageMap}; +use sp_core::storage::{StorageKey, ChildInfo, Storage, StorageChild, StorageMap, + ChildType, PrefixedStorageKey}; use sc_client_api::{StorageProvider, UsageProvider}; use std::{collections::HashMap, sync::Arc}; @@ -43,16 +44,16 @@ where // Remove all default child storage roots from the top storage and collect the child storage // pairs. - while let Some(pos) = top_storage - .iter() - .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) { - let (key, _) = top_storage.swap_remove(pos); - - let key = StorageKey( - key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec(), - ); + while let Some((pos, child_type, unprefixed_key)) = top_storage + .iter().enumerate() + .find_map(|(i, (k, _))| ChildType::from_prefixed_key(PrefixedStorageKey::new_ref(&k.0)) + .map(|(t, k)| (i, t, k))) { + debug_assert!(child_type == ChildType::ParentKeyId); + let key = StorageKey(unprefixed_key.to_vec()); let child_info = ChildInfo::new_default(&key.0); + let (key, _) = top_storage.swap_remove(pos); + let keys = client.child_storage_keys(&block, &child_info, &empty_key)?; let mut pairs = StorageMap::new(); keys.into_iter().try_for_each(|k| { diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 1919c76ff489b..633d8ba4573df 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -23,7 +23,7 @@ use sp_runtime::{ }; use sp_state_machine::{ self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, - backend::Backend as _, StorageProof, + backend::{Backend as _, ProofRawFor}, }; use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use sp_externalities::Extensions; @@ -143,7 +143,9 @@ where initialize_block: InitializeBlock<'a, Block>, execution_manager: ExecutionManager, native_call: Option, - recorder: &Option>, + recorder: Option<&RefCell< + ProofRecorder<>::State, Block> + >>, extensions: Option, ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { match initialize_block { @@ -158,43 +160,50 @@ where let changes_trie_state = backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - let mut state = self.backend.state_at(*at)?; + let state = self.backend.state_at(*at)?; let changes = &mut *changes.borrow_mut(); let offchain_changes = &mut *offchain_changes.borrow_mut(); match recorder { Some(recorder) => { - let trie_state = state.as_trie_backend() - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box - )?; + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_state); + let ProofRecorder{ recorder, input } = &mut *recorder.borrow_mut(); // It is important to extract the runtime code here before we create the proof // recorder. let runtime_code = state_runtime_code.runtime_code()?; - let backend = sp_state_machine::ProvingBackend::new_with_recorder( - trie_state, - recorder.clone(), - ); + let state = self.backend.state_at(*at)?; - let mut state_machine = StateMachine::new( - &backend, - changes_trie_state, - changes, - offchain_changes, - &self.executor, - method, - call_data, - extensions.unwrap_or_default(), - &runtime_code, - self.spawn_handle.clone(), - ); - // TODO: https://github.com/paritytech/substrate/issues/4455 - // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) - state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + let backend = state.from_previous_rec_state( + std::mem::replace(recorder, Default::default()), + std::mem::replace(input, Default::default()), + ).ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box)?; + + let result = { + let mut state_machine = StateMachine::new( + &backend, + changes_trie_state, + changes, + offchain_changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), + ); + // TODO: https://github.com/paritytech/substrate/issues/4455 + // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) + state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + }; + use sp_state_machine::backend::RecProofBackend; + let (recorder_state, input_state) = backend.extract_recorder(); + *recorder = recorder_state; + *input = input_state; + result }, None => { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); @@ -238,21 +247,21 @@ where .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) } - fn prove_at_trie_state>>( + fn prove_at_proof_backend_state>>( &self, - trie_state: &sp_state_machine::TrieBackend>, + proof_backend: &P, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8] - ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _, _>( - trie_state, + ) -> Result<(Vec, ProofRawFor>), sp_blockchain::Error> { + sp_state_machine::prove_execution_on_proof_backend::<_, _, NumberFor, _, _>( + proof_backend, overlay, &self.executor, self.spawn_handle.clone(), method, call_data, - &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, + &sp_state_machine::backend::BackendRuntimeCode::new(proof_backend).runtime_code()?, ) .map_err(Into::into) } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index d0859f4ee0392..7b7f8a97a66cc 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -18,6 +18,7 @@ //! Substrate Client +use sc_client_api::backend::{ProofFor, ProofRawFor}; use std::{ marker::PhantomData, collections::{HashSet, BTreeMap, HashMap}, @@ -42,9 +43,10 @@ use sp_runtime::{ }, }; use sp_state_machine::{ - DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, + DBValue, backend::Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, - ChangesTrieConfigurationRange, key_changes, key_changes_proof, + ChangesTrieConfigurationRange, key_changes, key_changes_proof, SimpleProof as StorageProof, + MergeableProof, }; use sc_executor::RuntimeVersion; use sp_consensus::{ @@ -58,7 +60,6 @@ use sp_blockchain::{ well_known_cache_keys::Id as CacheKeyId, HeaderMetadata, CachedHeaderMetadata, }; -use sp_trie::StorageProof; use sp_api::{ CallApiAt, ConstructRuntimeApi, Core as CoreApi, ApiExt, ApiRef, ProvideRuntimeApi, CallApiAtParams, @@ -517,7 +518,7 @@ impl Client where Ok(()) }, ())?; - Ok(StorageProof::merge(proofs)) + Ok(StorageProof::merge(proofs).into()) } /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). @@ -1178,8 +1179,9 @@ impl UsageProvider for Client where } } -impl ProofProvider for Client where +impl ProofProvider> for Client where B: backend::Backend, +// HashFor: Ord + Codec, E: CallExecutor, Block: BlockT, { @@ -1187,7 +1189,7 @@ impl ProofProvider for Client where &self, id: &BlockId, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result { + ) -> sp_blockchain::Result>> { self.state_at(id) .and_then(|state| prove_read(state, keys) .map_err(Into::into)) @@ -1198,7 +1200,7 @@ impl ProofProvider for Client where id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result { + ) -> sp_blockchain::Result>> { self.state_at(id) .and_then(|state| prove_child_read(state, child_info, keys) .map_err(Into::into)) @@ -1208,8 +1210,8 @@ impl ProofProvider for Client where &self, id: &BlockId, method: &str, - call_data: &[u8] - ) -> sp_blockchain::Result<(Vec, StorageProof)> { + call_data: &[u8], + ) -> sp_blockchain::Result<(Vec, ProofRawFor>)> { // Make sure we include the `:code` and `:heap_pages` in the execution proof to be // backwards compatible. // @@ -1227,8 +1229,10 @@ impl ProofProvider for Client where &self.executor, method, call_data, - ).map(|(r, p)| { - (r, StorageProof::merge(vec![p, code_proof])) + ).and_then(|(r, p)| { + Ok((r, ProofRawFor::>::merge( + vec![p, code_proof], + ))) }) } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index ffc84ad47b8f3..a3d6f003a499f 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -43,9 +43,10 @@ use sp_core::{H256, NativeOrEncoded, testing::TaskExecutor}; use sc_client_api::{ blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, - AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, + AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, BlockBackend, + SimpleProof as StorageProof, ProofCommon, }; use sp_externalities::Extensions; use sc_block_builder::BlockBuilderProvider; @@ -62,8 +63,11 @@ use substrate_test_runtime_client::{ AccountKeyring, runtime::{self, Extrinsic}, }; -use sp_core::{blake2_256, ChangesTrieConfiguration, storage::{well_known_keys, StorageKey, ChildInfo}}; -use sp_state_machine::Backend as _; +use sp_core::{blake2_256, ChangesTrieConfiguration}; +use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; +use sp_state_machine::backend::{ProofRawFor, Backend as _}; + +type InMemoryProofCheckBackend = sp_state_machine::InMemoryProofCheckBackend; pub type DummyBlockchain = Blockchain; @@ -231,7 +235,9 @@ impl CallExecutor for DummyCallExecutor { _initialize_block: InitializeBlock<'a, Block>, _execution_manager: ExecutionManager, _native_call: Option, - _proof_recorder: &Option>, + _proof_recorder: Option<&RefCell< + ProofRecorder<>::State, Block> + >>, _extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { unreachable!() @@ -241,13 +247,13 @@ impl CallExecutor for DummyCallExecutor { unreachable!() } - fn prove_at_trie_state>>( + fn prove_at_proof_backend_state>>( &self, - _trie_state: &sp_state_machine::TrieBackend>, + _proof_backend: &P, _overlay: &mut OverlayedChanges, _method: &str, - _call_data: &[u8] - ) -> Result<(Vec, StorageProof), ClientError> { + _call_data: &[u8], + ) -> Result<(Vec, ProofRawFor>), ClientError> { unreachable!() } @@ -311,7 +317,7 @@ fn execution_proof_is_generated_and_checked() { let (remote_result, remote_execution_proof) = remote_client.execution_proof( &remote_block_id, method, - &[] + &[], ).unwrap(); // check remote execution proof locally @@ -339,11 +345,11 @@ fn execution_proof_is_generated_and_checked() { let (_, remote_execution_proof) = remote_client.execution_proof( &remote_block_id, method, - &[] + &[], ).unwrap(); // check remote execution proof locally - let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( + let execution_result = check_execution_proof_with_make_header::( &local_executor(), Box::new(TaskExecutor::new()), &RemoteCallRequest { @@ -575,12 +581,17 @@ fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { #[test] fn storage_read_proof_is_generated_and_checked() { let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - keys: vec![well_known_keys::HEAP_PAGES.to_vec()], - retry_count: None, - }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_read_proof(&RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + keys: vec![well_known_keys::HEAP_PAGES.to_vec()], + retry_count: None, + }, remote_read_proof).unwrap() + .remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], + heap_pages as u8, + ); } #[test] @@ -592,7 +603,7 @@ fn storage_child_read_proof_is_generated_and_checked() { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( + assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, @@ -607,18 +618,19 @@ fn storage_child_read_proof_is_generated_and_checked() { #[test] fn header_proof_is_generated_and_checked() { let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); + assert_eq!((&local_checker as &dyn FetchChecker) + .check_header_proof(&RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); } #[test] fn check_header_proof_fails_if_cht_root_is_invalid() { let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ cht_root: Default::default(), block: 1, retry_count: None, @@ -629,7 +641,7 @@ fn check_header_proof_fails_if_cht_root_is_invalid() { fn check_header_proof_fails_if_invalid_header_provided() { let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ cht_root: local_cht_root, block: 1, retry_count: None, @@ -644,7 +656,7 @@ fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { local_executor(), Box::new(TaskExecutor::new()), ); - let local_checker = &local_checker as &dyn FetchChecker; + let local_checker = &local_checker as &dyn FetchChecker; let max = remote_client.chain_info().best_number; let max_hash = remote_client.chain_info().best_hash; @@ -686,7 +698,7 @@ fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { match local_result == expected_result { true => (), false => panic!(format!("Failed test {}: local = {:?}, expected = {:?}", - index, local_result, expected_result)), + index, local_result, expected_result)), } } } @@ -754,7 +766,7 @@ fn check_changes_proof_fails_if_proof_is_wrong() { local_executor(), Box::new(TaskExecutor::new()), ); - let local_checker = &local_checker as &dyn FetchChecker; + let local_checker = &local_checker as &dyn FetchChecker; let max = remote_client.chain_info().best_number; let max_hash = remote_client.chain_info().best_hash; @@ -843,7 +855,7 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { Box::new(TaskExecutor::new()), ); assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, - remote_proof.roots_proof.clone()).is_err()); + remote_proof.roots_proof.clone()).is_err()); // fails when proof is broken let mut local_storage = DummyStorage::new(); diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 8d073df272fd9..5c2adbf0f683a 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -18,7 +18,7 @@ use parity_scale_codec::{Encode, Decode, Joiner}; use sc_executor::native_executor_instance; -use sp_state_machine::{StateMachine, OverlayedChanges, ExecutionStrategy, InMemoryBackend}; +use sp_state_machine::{StateMachine, OverlayedChanges, ExecutionStrategy}; use substrate_test_runtime_client::{ prelude::*, runtime::{ @@ -55,6 +55,8 @@ use hex_literal::hex; mod light; mod db; +type InMemoryBackend = sp_state_machine::InMemoryBackend; + native_executor_instance!( Executor, substrate_test_runtime_client::runtime::api::dispatch, @@ -141,7 +143,7 @@ pub fn prepare_client_with_key_changes() -> ( } fn construct_block( - backend: &InMemoryBackend, + backend: &InMemoryBackend, number: BlockNumber, parent_hash: Hash, state_root: Hash, @@ -217,7 +219,7 @@ fn construct_block( (vec![].and(&Block { header, extrinsics: transactions }), hash) } -fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { +fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { construct_block( backend, 1, diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 8294c8bfbd684..fed61099012e1 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -416,7 +416,7 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { initialized_block: &std::cell::RefCell>>, native_call: Option, context: #crate_::ExecutionContext, - recorder: &Option<#crate_::ProofRecorder>, + recorder: Option<&std::cell::RefCell<#crate_::ProofRecorder>>, ) -> std::result::Result<#crate_::NativeOrEncoded, T::Error> { let version = call_runtime_at.runtime_version_at(at)?; use #crate_::InitializeBlock; diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 85f5a1797b1e3..5955f6d2b30e4 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -212,7 +212,7 @@ fn generate_runtime_api_base_structures() -> Result { storage_transaction_cache: std::cell::RefCell< #crate_::StorageTransactionCache >, - recorder: Option<#crate_::ProofRecorder>, + recorder: Option>>, } // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a @@ -283,18 +283,25 @@ fn generate_runtime_api_base_structures() -> Result { } fn record_proof(&mut self) { - self.recorder = Some(Default::default()); + self.recorder = Some(std::cell::RefCell::new(Default::default())); } - fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { + fn extract_proof( + &mut self, + ) -> Option<#crate_::ProofRawFor>> { + use #crate_::RecordableProof; self.recorder .take() - .map(|recorder| { - let trie_nodes = recorder.read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - #crate_::StorageProof::new(trie_nodes) + .and_then(|recorder| { + let #crate_::ProofRecorder{ recorder, input } = &mut *recorder.borrow_mut(); + let input = std::mem::replace(input, #crate_::ProofInput::None); + < + >>::RecProofBackend + as #crate_::RecProofBackend<#crate_::HashFor> + >::extract_proof_rec( + &recorder, + input, + ).ok() }) } @@ -339,7 +346,7 @@ fn generate_runtime_api_base_structures() -> Result { initialized_block: None.into(), changes: Default::default(), offchain_changes: Default::default(), - recorder: Default::default(), + recorder: None, storage_transaction_cache: Default::default(), }.into() } @@ -360,7 +367,7 @@ fn generate_runtime_api_base_structures() -> Result { &std::cell::RefCell<#crate_::OffchainOverlayedChanges>, &std::cell::RefCell<#crate_::StorageTransactionCache>, &std::cell::RefCell>>, - &Option<#crate_::ProofRecorder>, + Option<&std::cell::RefCell<#crate_::ProofRecorder>>, ) -> std::result::Result<#crate_::NativeOrEncoded, E>, E, >( @@ -377,7 +384,7 @@ fn generate_runtime_api_base_structures() -> Result { &self.offchain_changes, &self.storage_transaction_cache, &self.initialized_block, - &self.recorder, + self.recorder.as_ref(), ); self.commit_or_rollback(res.is_ok()); diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 0e8f18e3e6f14..e72540e65a431 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -71,7 +71,10 @@ fn implement_common_api_traits( } impl #crate_::ApiExt<#block_type> for #self_ty { - type StateBackend = #crate_::InMemoryBackend<#crate_::HashFor<#block_type>>; + type StateBackend = #crate_::InMemoryBackend< + #crate_::HashFor<#block_type>, + #crate_::SimpleProof, + >; fn execute_in_transaction #crate_::TransactionOutcome, R>( &self, @@ -99,7 +102,9 @@ fn implement_common_api_traits( unimplemented!("`record_proof` not implemented for runtime api mocks") } - fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { + fn extract_proof( + &mut self, + ) -> Option<#crate_::ProofRawFor>> { unimplemented!("`extract_proof` not implemented for runtime api mocks") } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index bad6c03058322..6b67f76146892 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -39,7 +39,8 @@ extern crate self as sp_api; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - OverlayedChanges, StorageProof, Backend as StateBackend, ChangesTrieState, InMemoryBackend, + OverlayedChanges, ProofCommon, backend::Backend as StateBackend, ChangesTrieState, InMemoryBackend, + ProofInput, backend::{ProofRawFor, RecProofBackend}, RecordableProof, SimpleProof, }; #[doc(hidden)] #[cfg(feature = "std")] @@ -70,7 +71,9 @@ pub use sp_std::{slice, mem}; use sp_std::result; #[doc(hidden)] pub use codec::{Encode, Decode, DecodeLimit}; +#[doc(hidden)] use sp_core::OpaqueMetadata; +#[doc(hidden)] #[cfg(feature = "std")] use std::{panic::UnwindSafe, cell::RefCell}; @@ -303,10 +306,6 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// ``` pub use sp_api_proc_macro::mock_impl_runtime_apis; -/// A type that records all accessed trie nodes and generates a proof out of it. -#[cfg(feature = "std")] -pub type ProofRecorder = sp_state_machine::ProofRecorder>; - /// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] pub type StorageTransactionCache = @@ -314,6 +313,7 @@ pub type StorageTransactionCache = >>::Transaction, HashFor, NumberFor >; +/// A type containing storage changes. #[cfg(feature = "std")] pub type StorageChanges = sp_state_machine::StorageChanges< @@ -379,7 +379,7 @@ pub trait ApiExt: ApiErrorExt { pred: P, ) -> Result where Self: Sized; - /// Start recording all accessed trie nodes for generating proofs. + /// Start record a proof. fn record_proof(&mut self); /// Extract the recorded proof. @@ -387,7 +387,7 @@ pub trait ApiExt: ApiErrorExt { /// This stops the proof recording. /// /// If `record_proof` was not called before, this will return `None`. - fn extract_proof(&mut self) -> Option; + fn extract_proof(&mut self) -> Option>>; /// Convert the api object into the storage changes that were done while executing runtime /// api functions. @@ -449,7 +449,7 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend>, + pub recorder: Option<&'a RefCell>>, } /// Something that can call into the an api at a given block. @@ -527,6 +527,29 @@ pub trait RuntimeApiInfo { const VERSION: u32; } +/// A type that records all accessed trie nodes and generates a proof out of it. +#[cfg(feature = "std")] +pub struct ProofRecorder>, Block: BlockT> { + /// The recorder to use over the db use by trie db. + pub recorder: sp_state_machine::backend::RecordBackendFor>, + /// The additional input needed for the proof. + pub input: ProofInput, +} + +#[cfg(feature = "std")] +impl Default for ProofRecorder + where + Backend: StateBackend>, + Block: BlockT, +{ + fn default() -> Self { + ProofRecorder { + recorder: Default::default(), + input: ProofInput::None, + } + } +} + /// Extracts the `Api::Error` for a type that provides a runtime api. #[cfg(feature = "std")] pub type ApiErrorFor = <>::Api as ApiErrorExt>::Error; diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index d72872959cefa..71e53f883b33b 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -24,7 +24,7 @@ use substrate_test_runtime_client::{ use sp_runtime::{generic::BlockId, traits::{Header as HeaderT, HashFor}}; use sp_state_machine::{ ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, + execution_proof_check_on_proof_backend, }; use sp_consensus::SelectChain; @@ -185,7 +185,7 @@ fn record_proof_works() { builder.push(transaction.clone()).unwrap(); let (block, _, proof) = builder.build().expect("Bake block").into_inner(); - let backend = create_proof_check_backend::>( + let backend = create_proof_check_backend::, sp_state_machine::SimpleProof>( storage_root, proof.expect("Proof was generated"), ).expect("Creates proof backend."); @@ -197,7 +197,7 @@ fn record_proof_works() { None, 8, ); - execution_proof_check_on_trie_backend::<_, u64, _, _>( + execution_proof_check_on_proof_backend::<_, _, u64, _, _>( &backend, &mut overlay, &executor, diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 04b65a723e4a8..349513eb905eb 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -36,7 +36,6 @@ use sp_runtime::{ }; use futures::prelude::*; pub use sp_inherents::InherentData; - pub mod block_validation; pub mod offline_tracker; pub mod error; @@ -55,7 +54,7 @@ pub use block_import::{ ImportResult, JustificationImport, FinalityProofImport, }; pub use select_chain::SelectChain; -pub use sp_state_machine::Backend as StateBackend; +pub use sp_state_machine::backend::Backend as StateBackend; pub use import_queue::DefaultImportQueue; /// Block status. @@ -95,7 +94,7 @@ pub struct Proposal { /// The block that was build. pub block: Block, /// Optional proof that was recorded while building the block. - pub proof: Option, + pub encoded_proof: Option>, /// The storage changes while building this block. pub storage_changes: sp_state_machine::StorageChanges, NumberFor>, } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 9ec03c4d1e249..f0b74879db134 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,25 +20,37 @@ use hash_db::Hasher; use codec::{Decode, Encode}; use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; -use crate::{ - trie_backend::TrieBackend, - trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, StorageCollection, -}; +use crate::{UsageInfo, StorageKey, StorageValue, StorageCollection}; +use sp_trie::{ProofInput, BackendProof}; + +/// Access the state of the recording proof backend of a backend. +pub type RecordBackendFor = sp_trie::RecordBackendFor<>::StorageProof, H>; + +/// Access the raw proof of a backend. +pub type ProofRawFor = <>::StorageProof as BackendProof>::ProofRaw; + +/// Access the proof of a backend. +pub type ProofFor = >::StorageProof; /// A state backend is used to read state data and can have changes committed /// to it. /// /// The clone operation (if implemented) should be cheap. -pub trait Backend: std::fmt::Debug { +pub trait Backend: Sized + std::fmt::Debug { /// An error type when fetching data is not possible. type Error: super::Error; /// Storage changes to be applied if committing type Transaction: Consolidate + Default + Send; - /// Type of trie backend storage. - type TrieBackendStorage: TrieBackendStorage; + /// Proof to use with this backend. + type StorageProof: BackendProof; + + /// Associated backend for recording proof. + type RecProofBackend: RecProofBackend; + + /// Associated backend for using a proof. + type ProofCheckBackend: ProofCheckBackend; /// Get keyed storage or None if there is nothing associated. fn storage(&self, key: &[u8]) -> Result, Self::Error>; @@ -153,11 +165,19 @@ pub trait Backend: std::fmt::Debug { all } - /// Try convert into trie backend. - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { - None + /// Try convert into a recording proof backend. + fn as_proof_backend(self) -> Option { + self.from_previous_rec_state(Default::default(), Default::default()) } + /// Try convert into a recording proof backend from previous recording state. + /// Using a previous proof backend avoids a costier merge of proof later. + fn from_previous_rec_state( + self, + previous: RecordBackendFor, + previous_input: ProofInput, + ) -> Option; + /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. @@ -232,10 +252,63 @@ pub trait Backend: std::fmt::Debug { } } -impl<'a, T: Backend, H: Hasher> Backend for &'a T { +/// Backend that can be instantiated from intital content. +pub trait GenesisStateBackend: Backend + where + H: Hasher, +{ + /// Instantiation method. + fn new(storage: sp_core::storage::Storage) -> Self; +} + +/// Backend used to record a proof. +pub trait RecProofBackend: crate::backend::Backend + where + H: Hasher, +{ + /// Extract proof after running operation to prove. + /// The proof extracted is raw and can be merge before + /// being converted into final proof format. + fn extract_proof(&self) -> Result, Box>; + + /// Extract current recording state. + fn extract_recorder(self) -> (RecordBackendFor, ProofInput); + + /// Extract proof from a recording state. + fn extract_proof_rec( + recorder_state: &RecordBackendFor, + input: ProofInput, + ) -> Result, Box> { + use sp_trie::RecordableProof; + <>::ProofRaw>::extract_proof( + recorder_state, + input, + ).map_err(|e| Box::new(e) as Box) + } +} + +/// Backend used to run a proof. +pub trait ProofCheckBackend: Sized + crate::backend::Backend + where + H: Hasher, +{ + /// Instantiate backend from proof. + fn create_proof_check_backend( + root: H::Out, + proof: Self::StorageProof, + ) -> Result>; +} + +impl<'a, T, H> Backend for &'a T + where + H: Hasher, + T: Backend, +{ type Error = T::Error; type Transaction = T::Transaction; - type TrieBackendStorage = T::TrieBackendStorage; + type StorageProof = T::StorageProof; + type RecProofBackend = T::RecProofBackend; + type ProofCheckBackend = T::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result, Self::Error> { (*self).storage(key) @@ -310,6 +383,15 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn usage_info(&self) -> UsageInfo { (*self).usage_info() } + + fn from_previous_rec_state( + self, + _previous: RecordBackendFor, + _input: ProofInput, + ) -> Option { + // Cannot move out of reference, consider cloning if needed. + None + } } /// Trait that allows consolidate two transactions together. diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 675904578be97..e2c946f35d0ba 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -53,7 +53,7 @@ pub(crate) fn prepare_input<'a, B, H, Number>( where B: Backend, H: Hasher + 'a, - H::Out: Encode, + H::Out: Decode + Encode, Number: BlockNumber, { let number = parent.number.clone() + One::one(); @@ -197,7 +197,7 @@ fn prepare_digest_input<'a, H, Number>( ), String> where H: Hasher, - H::Out: 'a + Encode, + H::Out: 'a + Decode + Encode, Number: BlockNumber, { let build_skewed_digest = config.end.as_ref() == Some(&block); @@ -270,6 +270,7 @@ fn prepare_digest_input<'a, H, Number>( let trie_storage = TrieBackendEssence::<_, H>::new( crate::changes_trie::TrieBackendStorageAdapter(storage), trie_root, + None, ); trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| @@ -302,6 +303,7 @@ fn prepare_digest_input<'a, H, Number>( let trie_storage = TrieBackendEssence::<_, H>::new( crate::changes_trie::TrieBackendStorageAdapter(storage), trie_root, + None, ); trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { @@ -326,20 +328,20 @@ fn prepare_digest_input<'a, H, Number>( #[cfg(test)] mod test { use sp_core::Blake2Hasher; - use crate::InMemoryBackend; + use crate::{InMemoryBackend, SimpleProof}; use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; use super::*; fn prepare_for_build(zero: u64) -> ( - InMemoryBackend, + InMemoryBackend, InMemoryStorage, OverlayedChanges, Configuration, ) { let child_info_1 = ChildInfo::new_default(b"storage_key1"); let child_info_2 = ChildInfo::new_default(b"storage_key2"); - let backend: InMemoryBackend<_> = vec![ + let backend: InMemoryBackend<_, _> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), (vec![102], vec![255]), diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index f9398b3ce5dd4..c183c840a13ae 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -131,7 +131,7 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( max: Number, storage_key: Option<&PrefixedStorageKey>, key: &[u8] -) -> Result, String> where H::Out: Encode { +) -> Result, String> where H::Out: Decode + Encode { key_changes_proof_check_with_db( config, roots_storage, @@ -154,7 +154,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( max: Number, storage_key: Option<&PrefixedStorageKey>, key: &[u8] -) -> Result, String> where H::Out: Encode { +) -> Result, String> where H::Out: Decode + Encode { // we can't query any roots before root let max = std::cmp::min(max, end.number.clone()); @@ -319,13 +319,13 @@ pub struct DrilldownIterator<'a, H, Number> } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> - where H::Out: Encode + where H::Out: Decode + Encode { type Item = Result<(Number, u32), String>; fn next(&mut self) -> Option { self.essence.next(|storage, root, key| - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key)) + TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root, None).storage(key)) } } @@ -369,7 +369,7 @@ impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> .expect("only fails when already borrowed; storage() is non-reentrant; qed"); self.essence.next(|storage, root, key| ProvingBackendRecorder::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), + backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root, None), proof_recorder, }.storage(key)) } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 04322f1d5930c..028386364f2e2 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -72,7 +72,7 @@ use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; -use sp_core::storage::PrefixedStorageKey; +use sp_core::storage::{PrefixedStorageKey, ChildInfo}; use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ @@ -167,10 +167,19 @@ pub trait Storage: RootsStorage { /// Changes trie storage -> trie backend essence adapter. pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a dyn Storage); -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { +impl<'a, H, N> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> + where + H: Hasher, + N: BlockNumber, +{ type Overlay = sp_trie::MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + _child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { self.0.get(key, prefix) } } @@ -231,7 +240,7 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( panic_on_storage_error: bool, ) -> Result, H::Out, CacheAction)>, ()> where - H::Out: Ord + 'static + Encode, + H::Out: Ord + 'static + Decode + Encode, { /// Panics when `res.is_err() && panic`, otherwise it returns `Err(())` on an error. fn maybe_panic( diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 54456f97add1f..7727c42ea274a 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -63,6 +63,7 @@ pub fn prune( let trie_storage = TrieBackendEssence::<_, H>::new( crate::changes_trie::TrieBackendStorageAdapter(storage), root, + None, ); let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); let mut children_roots = Vec::new(); @@ -98,7 +99,7 @@ fn prune_trie( let mut proof_recorder: Recorder = Default::default(); { let mut trie = ProvingBackendRecorder::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), + backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root, None), proof_recorder: &mut proof_recorder, }; trie.record_all_keys(); diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 51b7ff6f50f71..b858996736d89 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -19,7 +19,7 @@ use std::collections::{BTreeMap, HashSet, HashMap}; use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; -use sp_core::storage::PrefixedStorageKey; +use sp_core::storage::{PrefixedStorageKey, ChildInfo}; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; @@ -190,7 +190,7 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) + MemoryDB::::get(&self.data.read().mdb, &ChildInfo::top_trie(), key, prefix) } } @@ -207,7 +207,12 @@ impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + _child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index d7d4bc145eb06..a5141ae873291 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -712,7 +712,7 @@ mod tests { }, InMemoryBackend, }; - type TestBackend = InMemoryBackend; + type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; fn prepare_overlay_with_changes() -> OverlayedChanges { diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index f211f60202730..f190db79cee2b 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -59,7 +59,15 @@ where } /// Create a new empty instance of in-memory backend. -pub fn new_in_mem() -> TrieBackend, H> +pub fn new_in_mem() -> TrieBackend, H, sp_trie::SimpleProof> +where + H::Out: Codec + Ord, +{ + new_in_mem_proof::() +} + +/// Create a new empty instance of in-memory backend, for a parameterized proof type. +pub fn new_in_mem_proof() -> TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -69,7 +77,7 @@ where backend } -impl TrieBackend, H> +impl TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -133,7 +141,7 @@ where } } -impl Clone for TrieBackend, H> +impl Clone for TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -142,28 +150,28 @@ where } } -impl Default for TrieBackend, H> +impl Default for TrieBackend, H, P> where H::Out: Codec + Ord, { fn default() -> Self { - new_in_mem() + new_in_mem_proof() } } -impl From, BTreeMap>> - for TrieBackend, H> +impl From, BTreeMap>> + for TrieBackend, H, P> where H::Out: Codec + Ord, { fn from(inner: HashMap, BTreeMap>) -> Self { - let mut backend = new_in_mem(); + let mut backend = new_in_mem_proof(); backend.insert(inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect()))); backend } } -impl From for TrieBackend, H> +impl From for TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -175,7 +183,7 @@ where } } -impl From> for TrieBackend, H> +impl From> for TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -186,8 +194,8 @@ where } } -impl From, StorageCollection)>> - for TrieBackend, H> +impl From, StorageCollection)>> + for TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -220,13 +228,13 @@ mod tests { let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; - let mut storage = storage.update( + let storage = storage.update( vec![( Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))] )] ); - let trie_backend = storage.as_trie_backend().unwrap(); + let trie_backend = storage.as_proof_backend().unwrap(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index ee0980f59b926..482f31967603c 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -44,12 +44,14 @@ mod trie_backend_essence; mod stats; mod read_only; -pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, DBValue, MemoryDB}; +pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, + TrieNodesStorageProof, ProofCommon, StorageProofKind, ChildrenProofMap, + ProofInput, ProofInputKind, ProofNodes, RecordableProof, + SimpleProof, CompactProof, BackendProof, MergeableProof}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use read_only::{ReadOnlyExternalities, InspectState}; pub use ext::Ext; -pub use backend::Backend; pub use changes_trie::{ AnchorBlockId as ChangesTrieAnchorBlockId, State as ChangesTrieState, @@ -69,15 +71,16 @@ pub use overlayed_changes::{ OverlayedChanges, StorageChanges, StorageTransactionCache, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, }; -pub use proving_backend::{ - create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, -}; +pub use proving_backend::{ProvingBackend, ProvingBackendRecorder, + create_proof_check_backend, create_full_proof_check_backend}; pub use trie_backend_essence::{TrieBackendStorage, Storage}; pub use trie_backend::TrieBackend; pub use error::{Error, ExecutionError}; pub use in_memory_backend::new_in_mem; pub use stats::{UsageInfo, UsageUnit, StateMachineStats}; +use backend::{Backend, RecProofBackend, ProofCheckBackend, ProofRawFor}; + const PROOF_CLOSE_TRANSACTION: &str = "\ Closing a transaction that was started in this function. Client initiated transactions are protected from being closed by the runtime. qed"; @@ -94,7 +97,13 @@ pub type ChangesTrieTransaction = ( ); /// Trie backend with in-memory storage. -pub type InMemoryBackend = TrieBackend, H>; +pub type InMemoryBackend = TrieBackend, H, P>; + +/// Proof check trie backend with in-memory storage. +pub type InMemoryProofCheckBackend = TrieBackend, H, P>; + +/// Proof check trie backend with in-memory storage using separate child backends. +pub type InMemoryFullProofCheckBackend = TrieBackend>, H, P>; /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] @@ -463,14 +472,14 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where /// Prove execution using the given state backend, overlayed changes, and call executor. pub fn prove_execution( - mut backend: B, + backend: B, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Spawn, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, -) -> Result<(Vec, StorageProof), Box> +) -> Result<(Vec, ProofRawFor), Box> where B: Backend, H: Hasher, @@ -479,10 +488,10 @@ where N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { - let trie_backend = backend.as_trie_backend() + let proof_backend = backend.as_proof_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_execution_on_trie_backend::<_, _, N, _, _>( - trie_backend, + prove_execution_on_proof_backend::<_, _, N, _, _>( + &proof_backend, overlay, exec, spawn_handle, @@ -501,17 +510,17 @@ where /// /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. -pub fn prove_execution_on_trie_backend( - trie_backend: &TrieBackend, +pub fn prove_execution_on_proof_backend( + proving_backend: &P, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Spawn, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, -) -> Result<(Vec, StorageProof), Box> +) -> Result<(Vec, ProofRawFor), Box> where - S: trie_backend_essence::TrieBackendStorage, + P: RecProofBackend, H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + 'static + Clone, @@ -519,9 +528,8 @@ where Spawn: SpawnNamed + Send + 'static, { let mut offchain_overlay = OffchainOverlayedChanges::default(); - let proving_backend = proving_backend::ProvingBackend::new(trie_backend); let mut sm = StateMachine::<_, H, N, Exec>::new( - &proving_backend, + proving_backend, None, overlay, &mut offchain_overlay, @@ -537,14 +545,14 @@ where always_wasm(), None, )?; - let proof = sm.backend.extract_proof(); + let proof = sm.backend.extract_proof()?; Ok((result.into_encoded(), proof)) } /// Check execution proof, generated by `prove_execution` call. -pub fn execution_proof_check( +pub fn execution_proof_check( root: H::Out, - proof: StorageProof, + proof: P::StorageProof, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Spawn, @@ -553,14 +561,15 @@ pub fn execution_proof_check( runtime_code: &RuntimeCode, ) -> Result, Box> where + P: ProofCheckBackend, H: Hasher, Exec: CodeExecutor + Clone + 'static, H::Out: Ord + 'static + codec::Codec, N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { - let trie_backend = create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend::<_, N, _, _>( + let trie_backend = P::create_proof_check_backend(root.into(), proof)?; + execution_proof_check_on_proof_backend::( &trie_backend, overlay, exec, @@ -572,8 +581,8 @@ where } /// Check execution proof on proving backend, generated by `prove_execution` call. -pub fn execution_proof_check_on_trie_backend( - trie_backend: &TrieBackend, H>, +pub fn execution_proof_check_on_proof_backend( + proof_backend: &B, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Spawn, @@ -582,6 +591,7 @@ pub fn execution_proof_check_on_trie_backend( runtime_code: &RuntimeCode, ) -> Result, Box> where + B: Backend, H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, @@ -590,7 +600,7 @@ where { let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut sm = StateMachine::<_, H, N, Exec>::new( - trie_backend, + proof_backend, None, overlay, &mut offchain_overlay, @@ -610,9 +620,28 @@ where /// Generate storage read proof. pub fn prove_read( - mut backend: B, + backend: B, + keys: I, +) -> Result, Box> +where + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, +{ + let proof_backend = backend.as_proof_backend() + .ok_or_else( + || Box::new(ExecutionError::UnableToGenerateProof) as Box + )?; + prove_read_on_proof_backend(&proof_backend, keys) +} + +/// Generate storage read proof for query plan verification. +pub fn prove_read_for_query_plan_check( + backend: B, keys: I, -) -> Result> +) -> Result<(crate::backend::RecordBackendFor, ProofInput), Box> where B: Backend, H: Hasher, @@ -620,19 +649,24 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend() + let proof_backend = backend.as_proof_backend() .ok_or_else( || Box::new(ExecutionError::UnableToGenerateProof) as Box )?; - prove_read_on_trie_backend(trie_backend, keys) + for key in keys.into_iter() { + proof_backend + .storage(key.as_ref()) + .map_err(|e| Box::new(e) as Box)?; + } + Ok(proof_backend.extract_recorder()) } /// Generate child storage read proof. pub fn prove_child_read( - mut backend: B, + backend: B, child_info: &ChildInfo, keys: I, -) -> Result> +) -> Result, Box> where B: Backend, H: Hasher, @@ -640,67 +674,102 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend() + let proving_backend = backend.as_proof_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_child_read_on_trie_backend(trie_backend, child_info, keys) + prove_child_read_on_proof_backend(&proving_backend, child_info, keys) } /// Generate storage read proof on pre-created trie backend. -pub fn prove_read_on_trie_backend( - trie_backend: &TrieBackend, +pub fn prove_read_on_proof_backend( + proving_backend: &P, keys: I, -) -> Result> +) -> Result, Box> where - S: trie_backend_essence::TrieBackendStorage, + P: RecProofBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof()) + proving_backend.extract_proof() } /// Generate storage read proof on pre-created trie backend. -pub fn prove_child_read_on_trie_backend( - trie_backend: &TrieBackend, +pub fn prove_child_read_on_proof_backend( + proving_backend: &P, child_info: &ChildInfo, keys: I, -) -> Result> +) -> Result, Box> where - S: trie_backend_essence::TrieBackendStorage, + P: RecProofBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof()) + proving_backend.extract_proof() +} + +/// Generate storage child read proof for query plan verification. +pub fn prove_child_read_for_query_plan_check( + backend: B, + top_keys: I, + child_keys: I3, +) -> Result<(crate::backend::RecordBackendFor, ProofInput), Box> +where + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, + I2: IntoIterator, + I2::Item: AsRef<[u8]>, + I3: IntoIterator, +{ + let proof_backend = backend.as_proof_backend() + .ok_or_else( + || Box::new(ExecutionError::UnableToGenerateProof) as Box + )?; + for key in top_keys.into_iter() { + proof_backend + .storage(key.as_ref()) + .map_err(|e| Box::new(e) as Box)?; + } + for (child_info, keys) in child_keys.into_iter() { + for key in keys.into_iter() { + proof_backend + .child_storage(&child_info, key.as_ref()) + .map_err(|e| Box::new(e) as Box)?; + } + } + + Ok(proof_backend.extract_recorder()) } /// Check storage read proof, generated by `prove_read` call. -pub fn read_proof_check( +pub fn read_proof_check( root: H::Out, - proof: StorageProof, + proof: P::StorageProof, keys: I, ) -> Result, Option>>, Box> where + P: ProofCheckBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = create_proof_check_backend::(root, proof)?; + let proving_backend = P::create_proof_check_backend(root, proof)?; let mut result = HashMap::new(); for key in keys.into_iter() { let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; @@ -710,19 +779,20 @@ where } /// Check child storage read proof, generated by `prove_child_read` call. -pub fn read_child_proof_check( +pub fn read_child_proof_check( root: H::Out, - proof: StorageProof, + proof: P::StorageProof, child_info: &ChildInfo, keys: I, ) -> Result, Option>>, Box> where + P: ProofCheckBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = create_proof_check_backend::(root, proof)?; + let proving_backend = P::create_proof_check_backend(root, proof)?; let mut result = HashMap::new(); for key in keys.into_iter() { let value = read_child_proof_check_on_proving_backend( @@ -736,11 +806,12 @@ where } /// Check storage read proof on pre-created proving backend. -pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, +pub fn read_proof_check_on_proving_backend( + proving_backend: &B, key: &[u8], ) -> Result>, Box> where + B: Backend, H: Hasher, H::Out: Ord + Codec, { @@ -748,12 +819,13 @@ where } /// Check child storage read proof on pre-created proving backend. -pub fn read_child_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, +pub fn read_child_proof_check_on_proving_backend( + proving_backend: &B, child_info: &ChildInfo, key: &[u8], ) -> Result>, Box> where + B: Backend, H: Hasher, H::Out: Ord + Codec, { @@ -772,6 +844,12 @@ mod tests { map, traits::{Externalities, RuntimeCode}, testing::TaskExecutor, }; use sp_runtime::traits::BlakeTwo256; + use sp_trie::{Layout, SimpleProof, SimpleFullProof, BackendProof, FullBackendProof}; + + type TestCheckBackend

= InMemoryProofCheckBackend; + type CompactProof = sp_trie::CompactProof>; + type CompactFullProof = sp_trie::CompactFullProof>; + type QueryPlanProof = sp_trie::QueryPlanProof>; #[derive(Clone)] struct DummyCodeExecutor { @@ -942,6 +1020,12 @@ mod tests { #[test] fn prove_execution_and_proof_check_works() { + prove_execution_and_proof_check_works_inner::(); + prove_execution_and_proof_check_works_inner::(); + prove_execution_and_proof_check_works_inner::(); + prove_execution_and_proof_check_works_inner::(); + } + fn prove_execution_and_proof_check_works_inner>() { let executor = DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -950,7 +1034,7 @@ mod tests { }; // fetch execution proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(std::iter::empty()).0; let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( remote_backend, @@ -963,9 +1047,9 @@ mod tests { ).unwrap(); // check proof locally - let local_result = execution_proof_check::( + let local_result = execution_proof_check::, BlakeTwo256, u64, _, _>( remote_root, - remote_proof, + remote_proof.into(), &mut Default::default(), &executor, TaskExecutor::new(), @@ -987,8 +1071,8 @@ mod tests { b"abc".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"3".to_vec() ]; - let mut state = InMemoryBackend::::from(initial); - let backend = state.as_trie_backend().unwrap(); + let state = InMemoryBackend::::from(initial); + let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec())); @@ -1004,7 +1088,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1031,8 +1115,8 @@ mod tests { fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; - let mut state = new_in_mem::(); - let backend = state.as_trie_backend().unwrap(); + let state = new_in_mem::(); + let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1040,7 +1124,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1078,8 +1162,8 @@ mod tests { b"d4".to_vec(), ]; let key = b"key".to_vec(); - let mut state = new_in_mem::(); - let backend = state.as_trie_backend().unwrap(); + let state = new_in_mem::(); + let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1088,7 +1172,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1105,7 +1189,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1124,7 +1208,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1143,8 +1227,8 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); - let mut state = new_in_mem::(); - let backend = state.as_trie_backend().unwrap(); + let state = new_in_mem::(); + let backend = state.as_proof_backend().unwrap(); let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut overlay = OverlayedChanges::default(); @@ -1154,7 +1238,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1169,7 +1253,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1194,7 +1278,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1220,7 +1304,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1231,48 +1315,153 @@ mod tests { } } + #[test] + fn prove_read_and_proof_check_works_query_plan() { + use sp_trie::{VerifiableProof, ProofInput}; + + let child_info = ChildInfo::new_default(b"sub1"); + let child_info = &child_info; + // fetch read proof from 'remote' full node. + // Using compact proof to get record backend and proofs. + let remote_backend = trie_backend::tests::test_trie_proof::(); + let remote_root = remote_backend.storage_root(std::iter::empty()).0; + let remote_root_child = remote_backend.child_storage_root(child_info, std::iter::empty()).0; + let (recorder, root_input) = prove_read_for_query_plan_check(remote_backend, &[b"value2"]) + .unwrap(); + let mut root_map = ChildrenProofMap::default(); + root_map.insert(ChildInfo::top_trie().proof_info(), remote_root.encode()); + assert!(ProofInput::ChildTrieRoots(root_map) == root_input); + + let input = ProofInput::query_plan( + remote_root.encode(), + vec![b"value2".to_vec()].into_iter(), + std::iter::empty::<(_, _, std::iter::Empty<_>)>(), + true, + ); + let remote_proof = >::extract_proof( + &recorder, + input, + ).unwrap(); + + let input_check = ProofInput::query_plan_with_values( + remote_root.encode(), + vec![(b"value2".to_vec(), Some(vec![24u8]))].into_iter(), + std::iter::empty::<(_, _, std::iter::Empty<_>)>(), + true, + ); + + assert!(remote_proof.verify(&input_check).unwrap()); + + // on child trie + let remote_backend = trie_backend::tests::test_trie_proof::(); + + let (recorder, _root_input) = prove_child_read_for_query_plan_check( + remote_backend, + &[b"value2"], + vec![(child_info.clone(), &[b"value3"])], + ).unwrap(); + + let test_with_roots = |include_roots: bool| { + let input = ProofInput::query_plan( + remote_root.encode(), + vec![b"value2".to_vec()].into_iter(), + vec![( + child_info.clone(), + remote_root_child.encode(), + vec![b"value3".to_vec()].into_iter(), + )].into_iter(), + include_roots, + ); + let remote_proof = >::extract_proof( + &recorder, + input, + ).unwrap(); + + let input_check = ProofInput::query_plan_with_values( + remote_root.encode(), + vec![(b"value2".to_vec(), Some(vec![24u8]))].into_iter(), + vec![( + child_info.clone(), + remote_root_child.encode(), + vec![(b"value3".to_vec(), Some(vec![142u8]))].into_iter(), + )].into_iter(), + include_roots, + ); + + assert!(remote_proof.clone().verify(&input_check).unwrap()); + + let input_check = ProofInput::query_plan_with_values( + remote_root.encode(), + vec![(b"value2".to_vec(), Some(vec![24u8]))].into_iter(), + vec![( + child_info.clone(), + remote_root_child.encode(), + vec![(b"value3".to_vec(), Some(vec![142u8]))].into_iter(), + )].into_iter(), + !include_roots, // not including child root in parent breaks extract + ); + + assert!(!remote_proof.verify(&input_check).unwrap()); + }; + + test_with_roots(true); + test_with_roots(false); + } + #[test] fn prove_read_and_proof_check_works() { + prove_read_and_proof_check_works_inner::(); + prove_read_and_proof_check_works_inner::(); + prove_read_and_proof_check_works_inner::(); + prove_read_and_proof_check_works_inner::(); + } + fn prove_read_and_proof_check_works_inner

() + where + P: BackendProof, + P::ProofRaw: Clone, + { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); - // check proof locally - let local_result1 = read_proof_check::( + + // check proof locally + let local_result1 = read_proof_check::, BlakeTwo256, _>( remote_root, - remote_proof.clone(), + remote_proof.clone().into(), &[b"value2"], ).unwrap(); - let local_result2 = read_proof_check::( + let local_result2 = read_proof_check::, BlakeTwo256, _>( remote_root, - remote_proof.clone(), + remote_proof.clone().into(), &[&[0xff]], ).is_ok(); - // check that results are correct + // check that results are correct assert_eq!( local_result1.into_iter().collect::>(), vec![(b"value2".to_vec(), Some(vec![24]))], ); assert_eq!(local_result2, false); + // on child trie - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, child_info, &[b"value3"], ).unwrap(); - let local_result1 = read_child_proof_check::( + let local_result1 = read_child_proof_check::, BlakeTwo256, _>( remote_root, - remote_proof.clone(), + remote_proof.clone().into(), child_info, &[b"value3"], ).unwrap(); - let local_result2 = read_child_proof_check::( + let local_result2 = read_child_proof_check::, BlakeTwo256, _>( remote_root, - remote_proof.clone(), + remote_proof.clone().into(), child_info, &[b"value2"], ).unwrap(); @@ -1286,6 +1475,50 @@ mod tests { ); } + #[test] + fn prove_read_and_proof_on_fullbackend_works() { + // more proof could be tested, but at this point the full backend + // is just here to assert that we are able to test child trie content + // and are able to switch backend for checking proof. + prove_read_and_proof_on_fullbackend_works_inner::(); + prove_read_and_proof_on_fullbackend_works_inner::(); + } + fn prove_read_and_proof_on_fullbackend_works_inner

() + where + P: FullBackendProof, + P::ProofRaw: Clone, + { + // fetch read proof from 'remote' full node + let remote_backend = trie_backend::tests::test_trie_proof::

(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); + // check proof locally + let local_result1 = read_proof_check::< + InMemoryFullProofCheckBackend, + BlakeTwo256, + _, + >( + remote_root, + remote_proof.clone().into(), + &[b"value2"], + ).unwrap(); + let local_result2 = read_proof_check::< + InMemoryFullProofCheckBackend, + BlakeTwo256, + _, + >( + remote_root, + remote_proof.clone().into(), + &[&[0xff]], + ).is_ok(); + // check that results are correct + assert_eq!( + local_result1.into_iter().collect::>(), + vec![(b"value2".to_vec(), Some(vec![24]))], + ); + assert_eq!(local_result2, false); + } + #[test] fn child_storage_uuid() { @@ -1329,8 +1562,8 @@ mod tests { b"aaa".to_vec() => b"0".to_vec(), b"bbb".to_vec() => b"".to_vec() ]; - let mut state = InMemoryBackend::::from(initial); - let backend = state.as_trie_backend().unwrap(); + let state = InMemoryBackend::::from(initial); + let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); overlay.start_transaction(); @@ -1348,7 +1581,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 9a2b1c4197310..5e675ed01a648 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -30,7 +30,7 @@ use crate::{ use self::changeset::OverlayedChangeSet; use std::collections::HashMap; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, Codec}; use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; use sp_core::offchain::storage::OffchainOverlayedChanges; use hash_db::Hasher; @@ -417,7 +417,10 @@ impl OverlayedChanges { changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: StorageTransactionCache, - ) -> Result, String> where H::Out: Ord + Encode + 'static { + ) -> Result, String> + where + H::Out: Ord + Codec + 'static + { self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) } @@ -428,7 +431,10 @@ impl OverlayedChanges { changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: &mut StorageTransactionCache, - ) -> Result, String> where H::Out: Ord + Encode + 'static { + ) -> Result, String> + where + H::Out: Ord + Codec + 'static + { // If the transaction does not exist, we generate it. if cache.transaction.is_none() { self.storage_root(backend, &mut cache); @@ -527,7 +533,7 @@ impl OverlayedChanges { parent_hash: H::Out, panic_on_storage_error: bool, cache: &mut StorageTransactionCache, - ) -> Result, ()> where H::Out: Ord + Encode + 'static { + ) -> Result, ()> where H::Out: Ord + Codec + 'static { build_changes_trie::<_, H, N>( backend, changes_trie_state, @@ -624,7 +630,7 @@ mod tests { (b"dogglesworth".to_vec(), b"catXXX".to_vec()), (b"doug".to_vec(), b"notadog".to_vec()), ].into_iter().collect(); - let backend = InMemoryBackend::::from(initial); + let backend = InMemoryBackend::::from(initial); let mut overlay = OverlayedChanges::default(); overlay.set_collect_extrinsics(false); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 0888c561cae30..3809156134289 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -17,20 +17,22 @@ //! Proving state machine backend. -use std::{sync::Arc, collections::HashMap}; use parking_lot::RwLock; use codec::{Decode, Codec}; use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ - MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, StorageProof, + empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, RecordBackendFor, + ProofInput, RecordBackend, RecordableProof, BackendProof, + record_all_keys, ProofInputKind, FullBackendProof, }; -pub use sp_trie::{Recorder, trie_types::{Layout, TrieError}}; +pub use sp_trie::{Recorder, ChildrenProofMap, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; -use crate::{Error, ExecutionError, Backend, DBValue}; -use sp_core::storage::ChildInfo; +use crate::{Error, ExecutionError, DBValue}; +use crate::backend::{Backend, RecProofBackend, ProofRawFor}; +use sp_core::storage::{ChildInfo, ChildInfoProof}; +use std::marker::PhantomData; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -50,6 +52,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let eph = Ephemeral::new( self.backend.backend_storage(), &mut read_overlay, + None, ); let map_e = |e| format!("Trie lookup error: {}", e); @@ -77,6 +80,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let eph = Ephemeral::new( self.backend.backend_storage(), &mut read_overlay, + Some(child_info), ); let map_e = |e| format!("Trie lookup error: {}", e); @@ -96,6 +100,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let eph = Ephemeral::new( self.backend.backend_storage(), &mut read_overlay, + None, ); let mut iter = move || -> Result<(), Box>> { @@ -109,91 +114,174 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> } } -/// Global proof recorder, act as a layer over a hash db for recording queried -/// data. -pub type ProofRecorder = Arc::Out, Option>>>; - /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ( - TrieBackend, H>, -); +pub struct ProvingBackend< + S: TrieBackendStorage, + H: Hasher, + P: BackendProof, +> { + pub(crate) trie_backend: TrieBackend>, H, P>, + _ph: PhantomData

, +} /// Trie backend storage with its proof recorder. -pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - backend: &'a S, - proof_recorder: ProofRecorder, +pub struct ProofRecorderBackend, H: Hasher, R: RecordBackend> { + backend: S, + // Inner mutability require sync here due to sync constraint on TrieBackendStorage (itself + // related to HashDB). + proof_recorder: RwLock, + _ph: PhantomData, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> - where H::Out: Codec +impl<'a, S, H, P> ProvingBackend<&'a S, H, P> + where + S: TrieBackendStorage, + H: Hasher, + H::Out: Codec, + P: BackendProof, { /// Create new proving backend. - pub fn new(backend: &'a TrieBackend) -> Self { + pub fn new(backend: &'a TrieBackend) -> Self { let proof_recorder = Default::default(); Self::new_with_recorder(backend, proof_recorder) } - /// Create new proving backend with the given recorder. - pub fn new_with_recorder( - backend: &'a TrieBackend, - proof_recorder: ProofRecorder, + fn new_with_recorder( + backend: &'a TrieBackend, + proof_recorder: RecordBackendFor, ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); let recorder = ProofRecorderBackend { backend: essence.backend_storage(), - proof_recorder, + proof_recorder: RwLock::new(proof_recorder), + _ph: PhantomData, }; - ProvingBackend(TrieBackend::new(recorder, root)) + match P::ProofRaw::INPUT_KIND { + ProofInputKind::ChildTrieRoots => { + ProvingBackend { + trie_backend: TrieBackend::new_with_roots(recorder, root), + _ph: PhantomData + } + }, + ProofInputKind::None + | ProofInputKind::QueryPlan + | ProofInputKind::QueryPlanWithValues => { + ProvingBackend { + trie_backend: TrieBackend::new(recorder, root), + _ph: PhantomData, + } + }, + } } +} - /// Extracting the gathered unordered proof. - pub fn extract_proof(&self) -> StorageProof { - let trie_nodes = self.0.essence().backend_storage().proof_recorder - .read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - StorageProof::new(trie_nodes) +impl ProvingBackend + where + S: TrieBackendStorage, + H: Hasher, + H::Out: Codec, + P: BackendProof, +{ + /// Create new proving backend from a given recorder. + /// This does not manage root registration and can + /// leave new recorder in a inconsistent state. + pub(crate) fn from_backend_with_recorder( + backend: S, + root: H::Out, + proof_recorder: RecordBackendFor, + ) -> Self { + let recorder = ProofRecorderBackend { + backend, + proof_recorder: RwLock::new(proof_recorder), + _ph: PhantomData, + }; + match P::ProofRaw::INPUT_KIND { + ProofInputKind::ChildTrieRoots => { + ProvingBackend { + trie_backend: TrieBackend::new_with_roots(recorder, root), + _ph: PhantomData + } + }, + ProofInputKind::None + | ProofInputKind::QueryPlan + | ProofInputKind::QueryPlanWithValues => { + ProvingBackend { + trie_backend: TrieBackend::new(recorder, root), + _ph: PhantomData, + } + }, + } } + } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage - for ProofRecorderBackend<'a, S, H> +impl, H: Hasher, R: RecordBackend> TrieBackendStorage + for ProofRecorderBackend { type Overlay = S::Overlay; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - if let Some(v) = self.proof_recorder.read().get(key) { + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + if let Some(v) = self.proof_recorder.read().get(child_info, key) { return Ok(v.clone()); } - let backend_value = self.backend.get(key, prefix)?; - self.proof_recorder.write().insert(key.clone(), backend_value.clone()); + let backend_value = self.backend.get(child_info, key, prefix)?; + self.proof_recorder.write().record(child_info.clone(), key.clone(), backend_value.clone()); Ok(backend_value) } } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug - for ProvingBackend<'a, S, H> +impl, H: Hasher, P: BackendProof> std::fmt::Debug + for ProvingBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "ProvingBackend") } } -impl<'a, S, H> Backend for ProvingBackend<'a, S, H> +impl RecProofBackend for ProvingBackend where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, + S: TrieBackendStorage, + H: Hasher, H::Out: Ord + Codec, + P: BackendProof, +{ + fn extract_proof(&self) -> Result, Box> { + let input = self.trie_backend.extract_registered_roots(); + >::ProofRaw::extract_proof( + &self.trie_backend.essence().backend_storage().proof_recorder.read(), + input, + ).map_err(|e| Box::new(e) as Box) + } + + fn extract_recorder(self) -> (RecordBackendFor, ProofInput) { + let input = self.trie_backend.extract_registered_roots(); + let recorder = self.trie_backend.into_storage().proof_recorder.into_inner(); + (recorder, input) + } +} + +impl Backend for ProvingBackend + where + S: TrieBackendStorage, + H: Hasher, + H::Out: Ord + Codec, + P: BackendProof, { type Error = String; type Transaction = S::Overlay; - type TrieBackendStorage = S; + type StorageProof = P; + type RecProofBackend = Self; + type ProofCheckBackend = crate::InMemoryProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.storage(key) + self.trie_backend.storage(key) } fn child_storage( @@ -201,7 +289,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.0.child_storage(child_info, key) + self.trie_backend.child_storage(child_info, key) } fn for_keys_in_child_storage( @@ -209,11 +297,11 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> child_info: &ChildInfo, f: F, ) { - self.0.for_keys_in_child_storage(child_info, f) + self.trie_backend.for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.next_storage_key(key) + self.trie_backend.next_storage_key(key) } fn next_child_storage_key( @@ -221,15 +309,15 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.0.next_child_storage_key(child_info, key) + self.trie_backend.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.0.for_keys_with_prefix(prefix, f) + self.trie_backend.for_keys_with_prefix(prefix, f) } fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.0.for_key_values_with_prefix(prefix, f) + self.trie_backend.for_key_values_with_prefix(prefix, f) } fn for_child_keys_with_prefix( @@ -238,15 +326,15 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> prefix: &[u8], f: F, ) { - self.0.for_child_keys_with_prefix( child_info, prefix, f) + self.trie_backend.for_child_keys_with_prefix( child_info, prefix, f) } fn pairs(&self) -> Vec<(Vec, Vec)> { - self.0.pairs() + self.trie_backend.pairs() } fn keys(&self, prefix: &[u8]) -> Vec> { - self.0.keys(prefix) + self.trie_backend.keys(prefix) } fn child_keys( @@ -254,14 +342,14 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { - self.0.child_keys(child_info, prefix) + self.trie_backend.child_keys(child_info, prefix) } fn storage_root<'b>( &self, delta: impl Iterator)>, ) -> (H::Out, Self::Transaction) where H::Out: Ord { - self.0.storage_root(delta) + self.trie_backend.storage_root(delta) } fn child_storage_root<'b>( @@ -269,27 +357,54 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> child_info: &ChildInfo, delta: impl Iterator)>, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { - self.0.child_storage_root(child_info, delta) + self.trie_backend.child_storage_root(child_info, delta) } fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } fn usage_info(&self) -> crate::stats::UsageInfo { - self.0.usage_info() + self.trie_backend.usage_info() + } + + fn from_previous_rec_state( + self, + previous_recorder: crate::backend::RecordBackendFor, + previous_input: ProofInput, + ) -> Option { + let root = self.trie_backend.essence().root().clone(); + let storage = self.trie_backend.into_storage(); + let current_recorder = storage.proof_recorder; + let backend = storage.backend; + if current_recorder.write().merge(previous_recorder) { + let current_recorder = current_recorder.into_inner(); + + Some(ProvingBackend::::from_backend_with_recorder(backend, root, current_recorder)) + } else { + None + }.filter(|backend| { + match previous_input { + ProofInput::ChildTrieRoots(roots) => { + backend.trie_backend.push_registered_roots(roots) + }, + ProofInput::None => true, + _ => false, + } + }) } } /// Create proof check backend. -pub fn create_proof_check_backend( +pub fn create_proof_check_backend( root: H::Out, - proof: StorageProof, -) -> Result, H>, Box> + proof: P, +) -> Result, Box> where H: Hasher, H::Out: Codec, + P: BackendProof, { - let db = proof.into_memory_db(); - + let db = proof.into_partial_db() + .map_err(|e| Box::new(format!("{}", e)) as Box)?; if db.contains(&root, EMPTY_PREFIX) { Ok(TrieBackend::new(db, root)) } else { @@ -297,48 +412,91 @@ where } } +/// Create proof check backend with different backend for each +/// child trie. +pub fn create_full_proof_check_backend( + root: H::Out, + proof: P, +) -> Result, Box> +where + H: Hasher, + H::Out: Codec, + P: FullBackendProof, +{ + use std::ops::Deref; + let db = proof.into_partial_full_db() + .map_err(|e| Box::new(format!("{}", e)) as Box)?; + if db.deref().get(&ChildInfoProof::top_trie()) + .map(|db| db.contains(&root, EMPTY_PREFIX)) + .unwrap_or(false) { + Ok(TrieBackend::new(db, root)) + } else { + Err(Box::new(ExecutionError::InvalidProof)) + } +} + #[cfg(test)] mod tests { - use crate::InMemoryBackend; - use crate::trie_backend::tests::test_trie; + use crate::InMemoryProofCheckBackend; + use crate::trie_backend::tests::test_trie_proof; use super::*; use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; + use sp_trie::{SimpleProof, ProofCommon}; use sp_runtime::traits::BlakeTwo256; - fn test_proving<'a>( - trie_backend: &'a TrieBackend,BlakeTwo256>, - ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { + type CompactProof = sp_trie::CompactProof>; + + fn test_proving>( + trie_backend: &TrieBackend, BlakeTwo256, P>, + ) -> ProvingBackend<&PrefixedMemoryDB, BlakeTwo256, P> { ProvingBackend::new(trie_backend) } #[test] fn proof_is_empty_until_value_is_read() { - let trie_backend = test_trie(); - assert!(test_proving(&trie_backend).extract_proof().is_empty()); + proof_is_empty_until_value_is_read_inner::(); + proof_is_empty_until_value_is_read_inner::(); + } + fn proof_is_empty_until_value_is_read_inner>() { + let trie_backend = test_trie_proof::

(); + assert!(test_proving(&trie_backend).extract_proof().unwrap().is_empty()); } #[test] fn proof_is_non_empty_after_value_is_read() { - let trie_backend = test_trie(); + proof_is_non_empty_after_value_is_read_inner::(); + proof_is_non_empty_after_value_is_read_inner::(); + } + fn proof_is_non_empty_after_value_is_read_inner>() { + let trie_backend = test_trie_proof::

(); let backend = test_proving(&trie_backend); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof().is_empty()); + assert!(!backend.extract_proof().unwrap().is_empty()); } #[test] fn proof_is_invalid_when_does_not_contains_root() { use sp_core::H256; - let result = create_proof_check_backend::( + let result = create_proof_check_backend::( + H256::from_low_u64_be(1), + SimpleProof::empty() + ); + assert!(result.is_err()); + let result = create_proof_check_backend::( H256::from_low_u64_be(1), - StorageProof::empty() + CompactProof::empty() ); assert!(result.is_err()); } #[test] fn passes_through_backend_calls() { - let trie_backend = test_trie(); + passes_through_backend_calls_inner::(); + passes_through_backend_calls_inner::(); + } + fn passes_through_backend_calls_inner>() { + let trie_backend = test_trie_proof::

(); let proving_backend = test_proving(&trie_backend); assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); @@ -351,28 +509,39 @@ mod tests { #[test] fn proof_recorded_and_checked() { + proof_recorded_and_checked_inner::(); + proof_recorded_and_checked_inner::(); + } + fn proof_recorded_and_checked_inner>() { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); - let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(vec![(None, contents)]); + let in_memory = InMemoryProofCheckBackend::::default(); + let in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); - let trie = in_memory.as_trie_backend().unwrap(); + let trie = &in_memory; let trie_root = trie.storage_root(::std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - let proving = ProvingBackend::new(trie); + let proving = in_memory.as_proof_backend().unwrap(); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - let proof = proving.extract_proof(); + let proof = proving.extract_proof().unwrap(); - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + let proof_check = create_proof_check_backend::( + in_memory_root.into(), + proof.into(), + ).unwrap(); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); } #[test] fn proof_recorded_and_checked_with_child() { + proof_recorded_and_checked_with_child_inner::(); + proof_recorded_and_checked_with_child_inner::(); + } + fn proof_recorded_and_checked_with_child_inner>() { let child_info_1 = ChildInfo::new_default(b"sub1"); let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; @@ -384,8 +553,8 @@ mod tests { (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; - let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(contents); + let in_memory = InMemoryProofCheckBackend::::default(); + let in_memory = in_memory.update(contents); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory.full_storage_root( std::iter::empty(), @@ -404,7 +573,7 @@ mod tests { vec![i] )); - let trie = in_memory.as_trie_backend().unwrap(); + let trie = &in_memory; let trie_root = trie.storage_root(::std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!( @@ -412,14 +581,14 @@ mod tests { vec![i] )); - let proving = ProvingBackend::new(trie); + let proving = in_memory.clone().as_proof_backend().unwrap(); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - let proof = proving.extract_proof(); + let proof = proving.extract_proof().unwrap(); - let proof_check = create_proof_check_backend::( + let proof_check = create_proof_check_backend::( in_memory_root.into(), - proof + proof.into(), ).unwrap(); assert!(proof_check.storage(&[0]).is_err()); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); @@ -427,16 +596,17 @@ mod tests { assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); assert_eq!(proof_check.storage(&[64]).unwrap(), None); - let proving = ProvingBackend::new(trie); + let proving = in_memory.as_proof_backend().unwrap(); assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); - let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::( + let proof = proving.extract_proof().unwrap(); + let proof_check = create_proof_check_backend::( in_memory_root.into(), - proof + proof.into(), ).unwrap(); + assert_eq!( - proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), + proof_check.child_storage(&child_info_1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index be7dc6df9de9a..af4eb873d8482 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -21,7 +21,7 @@ use std::any::{Any, TypeId}; use codec::Decode; use hash_db::Hasher; use crate::{ - backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, + backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, StorageKey, StorageValue, changes_trie::{ Configuration as ChangesTrieConfiguration, @@ -45,6 +45,8 @@ use sp_core::{ use codec::Encode; use sp_externalities::{Extensions, Extension}; +type InMemoryBackend = crate::InMemoryBackend; + /// Simple HashMap-based Externalities impl. pub struct TestExternalities where diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index e0a86bbd193a1..6a05681741532 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -19,28 +19,88 @@ use log::{warn, debug}; use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; +use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, + ChildrenProofMap, ProofInput, BackendProof, FullBackendProof}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildType}; -use codec::{Codec, Decode}; +use crate::backend::RecordBackendFor; +use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; +use codec::{Codec, Decode, Encode}; use crate::{ - StorageKey, StorageValue, Backend, + StorageKey, StorageValue, Backend, backend::ProofCheckBackend, trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, }; +use parking_lot::RwLock; +use std::marker::PhantomData; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher> { +pub struct TrieBackend, H: Hasher, P> { pub (crate) essence: TrieBackendEssence, + _ph: PhantomData

, } -impl, H: Hasher> TrieBackend where H::Out: Codec { +impl, H: Hasher, P> TrieBackend where H::Out: Codec { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { - essence: TrieBackendEssence::new(storage, root), + essence: TrieBackendEssence::new(storage, root, None), + _ph: PhantomData, } } + /// Create a trie backend that also record visited trie roots. + /// Visited trie roots allow packing proofs and does cache child trie roots. + pub fn new_with_roots(storage: S, root: H::Out) -> Self { + let register_roots = Some(RwLock::new(Default::default())); + TrieBackend { + essence: TrieBackendEssence::new(storage, root, register_roots), + _ph: PhantomData, + } + } + + /// Get registered roots. Empty input is returned when the backend is + /// not configured to register roots. + pub fn extract_registered_roots(&self) -> ProofInput { + if let Some(register_roots) = self.essence.register_roots() { + let mut dest = ChildrenProofMap::default(); + dest.insert(ChildInfoProof::top_trie(), self.essence.root().encode()); + let roots = { + std::mem::replace(&mut *register_roots.write(), Default::default()) + }; + for (child_info, root) in roots.into_iter() { + if let Some(root) = root { + dest.insert(child_info.proof_info(), root); + } + } + ProofInput::ChildTrieRoots(dest) + } else { + ProofInput::None + } + } + + /// Set previously registered roots. + /// Return false if there is some conflicting information (roots should not change + /// for a given `StateMachine` instante). + pub(crate) fn push_registered_roots(&self, previous: ChildrenProofMap>) -> bool { + if let Some(register_roots) = self.essence.register_roots() { + let mut roots = register_roots.write(); + for (child_info_proof, encoded_root) in previous { + if let Some(child_info) = child_info_proof.as_child_info() { + if let Some(existing_root) = roots.get(&child_info) { + if Some(&encoded_root) != existing_root.as_ref() { + return false; + } + } + roots.insert(child_info, Some(encoded_root)); + } else { + return false; + } + } + true + } else { + false + } + } + /// Get backend essence reference. pub fn essence(&self) -> &TrieBackendEssence { &self.essence @@ -67,18 +127,27 @@ impl, H: Hasher> TrieBackend where H::Out: Codec } } -impl, H: Hasher> std::fmt::Debug for TrieBackend { +impl, H: Hasher, P> std::fmt::Debug for TrieBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "TrieBackend") } } -impl, H: Hasher> Backend for TrieBackend where +impl Backend for TrieBackend where + H: Hasher, + S: TrieBackendStorage, H::Out: Ord + Codec, + P: BackendProof, { type Error = String; type Transaction = S::Overlay; - type TrieBackendStorage = S; + type StorageProof = P; + type RecProofBackend = crate::proving_backend::ProvingBackend< + S, + H, + Self::StorageProof, + >; + type ProofCheckBackend = crate::InMemoryProofCheckBackend; fn storage(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage(key) @@ -131,7 +200,8 @@ impl, H: Hasher> Backend for TrieBackend where fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { let collect_all = || -> Result<_, Box>> { - let trie = TrieDB::::new(self.essence(), self.essence.root())?; + let backend = self.essence().top_backend(); + let trie = TrieDB::::new(&backend, self.essence.root())?; let mut v = Vec::new(); for x in trie.iter()? { let (key, value) = x?; @@ -152,7 +222,8 @@ impl, H: Hasher> Backend for TrieBackend where fn keys(&self, prefix: &[u8]) -> Vec { let collect_all = || -> Result<_, Box>> { - let trie = TrieDB::::new(self.essence(), self.essence.root())?; + let backend = self.essence().top_backend(); + let trie = TrieDB::::new(&backend, self.essence.root())?; let mut v = Vec::new(); for x in trie.iter()? { let (key, _) = x?; @@ -178,6 +249,7 @@ impl, H: Hasher> Backend for TrieBackend where let mut eph = Ephemeral::new( self.essence.backend_storage(), &mut write_overlay, + None, ); match delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta) { @@ -199,8 +271,7 @@ impl, H: Hasher> Backend for TrieBackend where }; let mut write_overlay = S::Overlay::default(); - let prefixed_storage_key = child_info.prefixed_storage_key(); - let mut root = match self.storage(prefixed_storage_key.as_slice()) { + let mut root = match self.essence.child_root_encoded(child_info) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or_else(|| default_root.clone()), Err(e) => { @@ -213,6 +284,7 @@ impl, H: Hasher> Backend for TrieBackend where let mut eph = Ephemeral::new( self.essence.backend_storage(), &mut write_overlay, + Some(child_info), ); match child_delta_trie_root::, _, _, _, _, _, _>( @@ -231,8 +303,27 @@ impl, H: Hasher> Backend for TrieBackend where (root, is_default, write_overlay) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { - Some(self) + fn from_previous_rec_state( + self, + recorder: RecordBackendFor, + previous_input: ProofInput, + ) -> Option { + let root = self.essence.root().clone(); + let backend = crate::proving_backend::ProvingBackend::from_backend_with_recorder( + self.essence.into_storage(), + root, + recorder, + ); + match previous_input { + ProofInput::ChildTrieRoots(roots) => { + if !backend.trie_backend.push_registered_roots(roots) { + return None; + } + }, + ProofInput::None => (), + _ => return None, + } + Some(backend) } fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } @@ -246,12 +337,42 @@ impl, H: Hasher> Backend for TrieBackend where } } +impl ProofCheckBackend for crate::InMemoryProofCheckBackend + where + H::Out: Ord + Codec, + P: BackendProof, +{ + fn create_proof_check_backend( + root: H::Out, + proof: Self::StorageProof, + ) -> Result> { + let mem_db = proof.into_partial_db() + .map_err(|e| Box::new(e) as Box)?; + Ok(TrieBackend::new(mem_db, root)) + } +} + +impl ProofCheckBackend for crate::InMemoryFullProofCheckBackend + where + H::Out: Ord + Codec, + P: FullBackendProof, +{ + fn create_proof_check_backend( + root: H::Out, + proof: Self::StorageProof, + ) -> Result> { + let mem_db = proof.into_partial_full_db() + .map_err(|e| Box::new(e) as Box)?; + Ok(TrieBackend::new(mem_db, root)) + } +} + #[cfg(test)] pub mod tests { use std::{collections::HashSet, iter}; use sp_core::H256; use codec::Encode; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut, SimpleProof}; use sp_runtime::traits::BlakeTwo256; use super::*; @@ -285,7 +406,13 @@ pub mod tests { (mdb, root) } - pub(crate) fn test_trie() -> TrieBackend, BlakeTwo256> { + pub(crate) fn test_trie_proof>() + -> TrieBackend, BlakeTwo256, P> { + let (mdb, root) = test_db(); + TrieBackend::new(mdb, root) + } + + pub(crate) fn test_trie() -> TrieBackend, BlakeTwo256, SimpleProof> { let (mdb, root) = test_db(); TrieBackend::new(mdb, root) } @@ -316,7 +443,7 @@ pub mod tests { #[test] fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackend::, BlakeTwo256>::new( + assert!(TrieBackend::, BlakeTwo256, SimpleProof>::new( PrefixedMemoryDB::default(), Default::default(), ).pairs().is_empty()); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 72864e312b6ab..d47f006845d0a 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -20,20 +20,24 @@ use std::ops::Deref; use std::sync::Arc; +use std::marker::PhantomData; use log::{debug, warn}; use hash_db::{self, Hasher, Prefix}; -use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, +use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, ChildrenProofMap, empty_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::ChildInfo; -use codec::Encode; +use sp_core::storage::{ChildInfo, ChildrenMap}; +use codec::{Decode, Encode}; +use parking_lot::RwLock; + +type Result = std::result::Result; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } /// Patricia trie-based pairs storage essence. @@ -41,15 +45,52 @@ pub struct TrieBackendEssence, H: Hasher> { storage: S, root: H::Out, empty: H::Out, + /// If defined, we store encoded visited roots for top_trie and child trie in this + /// map. It also act as a cache. + register_roots: Option>>>, +} + +/// Patricia trie-based pairs storage essence, with reference to child info. +pub struct ChildTrieBackendEssence<'a, S: TrieBackendStorage, H: Hasher> { + /// Trie backend to use. + /// For the default child trie it is the top trie one. + pub essence: &'a TrieBackendEssence, + /// Definition of the child trie, this is use to be able to pass + /// child_info information when registering proof. + pub child_info: Option<&'a ChildInfo>, } -impl, H: Hasher> TrieBackendEssence where H::Out: Encode { +impl, H: Hasher> TrieBackendEssence where H::Out: Decode + Encode { + /// Get trie backend for top trie. + pub fn top_backend(&self) -> ChildTrieBackendEssence { + ChildTrieBackendEssence{ + essence: self, + child_info: None, + } + } + + /// Get trie backend for child trie. + pub fn child_backend<'a>( + &'a self, + child_info: &'a ChildInfo, + ) -> ChildTrieBackendEssence<'a, S, H> { + ChildTrieBackendEssence{ + essence: self, + child_info: Some(child_info), + } + } + /// Create new trie-based backend. - pub fn new(storage: S, root: H::Out) -> Self { + pub fn new( + storage: S, + root: H::Out, + register_roots: Option>>>, + ) -> Self { TrieBackendEssence { storage, root, empty: H::hash(&[0u8]), + register_roots, } } @@ -63,6 +104,11 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &mut self.storage } + /// Get register root reference. + pub fn register_roots(&self) -> Option<&RwLock>>> { + self.register_roots.as_ref() + } + /// Get trie root. pub fn root(&self) -> &H::Out { &self.root @@ -80,13 +126,49 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. - pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { + pub fn next_storage_key(&self, key: &[u8]) -> Result> { self.next_storage_key_from_root(&self.root, None, key) } /// Access the root of the child storage in its parent trie - fn child_root(&self, child_info: &ChildInfo) -> Result, String> { - self.storage(child_info.prefixed_storage_key().as_slice()) + pub(crate) fn child_root_encoded( + &self, + child_info: &ChildInfo, + ) -> Result> { + if let Some(cache) = self.register_roots.as_ref() { + if let Some(result) = cache.read().get(child_info) { + return Ok(result.clone()); + } + } + + let root: Option = self.storage(child_info.prefixed_storage_key().as_slice())?; + + if let Some(cache) = self.register_roots.as_ref() { + cache.write().insert(child_info.clone(), root.clone()); + } + + Ok(root) + } + + /// Access the root of the child storage in its parent trie + fn child_root(&self, child_info: &ChildInfo) -> Result> { + if let Some(cache) = self.register_roots.as_ref() { + if let Some(root) = cache.read().get(child_info) { + let root = root.as_ref() + .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); + return Ok(root); + } + } + + let encoded_root = self.storage(child_info.prefixed_storage_key().as_slice())?; + if let Some(cache) = self.register_roots.as_ref() { + cache.write().insert(child_info.clone(), encoded_root.clone()); + } + + let root: Option = encoded_root + .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); + + Ok(root) } /// Return the next key in the child trie i.e. the minimum key that is strictly superior to @@ -95,20 +177,12 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &self, child_info: &ChildInfo, key: &[u8], - ) -> Result, String> { - let child_root = match self.child_root(child_info)? { + ) -> Result> { + let hash = match self.child_root(child_info)? { Some(child_root) => child_root, None => return Ok(None), }; - let mut hash = H::Out::default(); - - if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); - } - // note: child_root and hash must be same size, panics otherwise. - hash.as_mut().copy_from_slice(&child_root[..]); - self.next_storage_key_from_root(&hash, Some(child_info), key) } @@ -118,14 +192,18 @@ impl, H: Hasher> TrieBackendEssence where H::Out: root: &H::Out, child_info: Option<&ChildInfo>, key: &[u8], - ) -> Result, String> { + ) -> Result> { let dyn_eph: &dyn hash_db::HashDBRef<_, _>; let keyspace_eph; + let top_backend; + let child_backend; if let Some(child_info) = child_info.as_ref() { - keyspace_eph = KeySpacedDB::new(self, child_info.keyspace()); + child_backend = self.child_backend(&child_info); + keyspace_eph = KeySpacedDB::new(&child_backend, child_info.keyspace()); dyn_eph = &keyspace_eph; } else { - dyn_eph = self; + top_backend = self.top_backend(); + dyn_eph = &top_backend; } let trie = TrieDB::::new(dyn_eph, root) @@ -158,10 +236,10 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Get the value of storage at given key. - pub fn storage(&self, key: &[u8]) -> Result, String> { + pub fn storage(&self, key: &[u8]) -> Result> { let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value::, _>(self, &self.root, key).map_err(map_e) + read_trie_value::, _>(&self.top_backend(), &self.root, key).map_err(map_e) } /// Get the value of child storage at given key. @@ -169,14 +247,18 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &self, child_info: &ChildInfo, key: &[u8], - ) -> Result, String> { - let root = self.child_root(child_info)? + ) -> Result> { + let root = self.child_root_encoded(child_info)? .unwrap_or_else(|| empty_child_trie_root::>().encode()); let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value::, _>(child_info.keyspace(), self, &root, key) - .map_err(map_e) + read_child_trie_value::, _>( + child_info.keyspace(), + &self.child_backend(child_info), + &root, + key, + ).map_err(map_e) } /// Retrieve all entries keys of child storage and call `f` for each of those keys. @@ -185,7 +267,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: &ChildInfo, f: F, ) { - let root = match self.child_root(child_info) { + let root = match self.child_root_encoded(child_info) { Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); @@ -195,7 +277,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: if let Err(e) = for_keys_in_child_trie::, _, _>( child_info.keyspace(), - self, + &self.child_backend(child_info), &root, f, ) { @@ -210,7 +292,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: prefix: &[u8], mut f: F, ) { - let root_vec = match self.child_root(child_info) { + let root_vec = match self.child_root_encoded(child_info) { Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); @@ -234,7 +316,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: mut f: F, child_info: Option<&ChildInfo>, ) { - let mut iter = move |db| -> Result<(), Box>> { + let mut iter = move |db| -> std::result::Result<(), Box>> { let trie = TrieDB::::new(db, root)?; for x in TrieDBIterator::new_prefixed(&trie, prefix)? { @@ -249,10 +331,11 @@ impl, H: Hasher> TrieBackendEssence where H::Out: }; let result = if let Some(child_info) = child_info { - let db = KeySpacedDB::new(self, child_info.keyspace()); + let backend = self.child_backend(&child_info); + let db = KeySpacedDB::new(&backend, child_info.keyspace()); iter(&db) } else { - iter(self) + iter(&self.top_backend()) }; if let Err(e) = result { debug!(target: "trie", "Error while iterating by prefix: {}", e); @@ -268,6 +351,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { storage: &'a S, overlay: &'a mut S::Overlay, + child_info: Option<&'a ChildInfo>, + _ph: PhantomData, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB @@ -278,10 +363,16 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB, H: Hasher> Ephemeral<'a, S, H> { - pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { + pub fn new( + storage: &'a S, + overlay: &'a mut S::Overlay, + child_info: Option<&'a ChildInfo>, + ) -> Self { Ephemeral { storage, overlay, + child_info, + _ph: PhantomData, } } } @@ -293,7 +384,14 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { Some(val) } else { - match self.storage.get(&key, prefix) { + let top; + let child_info = if let Some(child_info) = self.child_info { + child_info + } else { + top = ChildInfo::top_trie(); + &top + }; + match self.storage.get(child_info, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -337,14 +435,22 @@ pub trait TrieBackendStorage: Send + Sync { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result>; +} + +impl<'a, H: Hasher, S: TrieBackendStorage> TrieBackendStorage for &'a S { + type Overlay = S::Overlay; + + fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result> { + >::get(self, child_info, key, prefix) + } } // This implementation is used by normal storage trie clients. impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result> { Storage::::get(self.deref(), key, prefix) } } @@ -353,7 +459,7 @@ impl TrieBackendStorage for Arc> { impl TrieBackendStorage for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result> { Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -361,26 +467,49 @@ impl TrieBackendStorage for PrefixedMemoryDB { impl TrieBackendStorage for MemoryDB { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result> { Ok(hash_db::HashDB::get(self, key, prefix)) } } -impl, H: Hasher> hash_db::AsHashDB - for TrieBackendEssence +impl TrieBackendStorage for ChildrenProofMap> { + type Overlay = MemoryDB; + + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result> { + let child_info_proof = child_info.proof_info(); + Ok(self.deref().get(&child_info_proof).and_then(|s| + hash_db::HashDB::get(s, key, prefix) + )) + } +} + +impl<'a, S: TrieBackendStorage, H: Hasher> hash_db::AsHashDB + for ChildTrieBackendEssence<'a, S, H> { fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } } -impl, H: Hasher> hash_db::HashDB - for TrieBackendEssence +impl<'a, S: TrieBackendStorage, H: Hasher> hash_db::HashDB + for ChildTrieBackendEssence<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - if *key == self.empty { + if *key == self.essence.empty { return Some([0u8].to_vec()) } - match self.storage.get(&key, prefix) { + let top; + let child_info = if let Some(child_info) = self.child_info { + child_info + } else { + top = ChildInfo::top_trie(); + &top + }; + match self.essence.storage.get(child_info, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -406,8 +535,8 @@ impl, H: Hasher> hash_db::HashDB } } -impl, H: Hasher> hash_db::HashDBRef - for TrieBackendEssence +impl<'a, S: TrieBackendStorage, H: Hasher> hash_db::HashDBRef + for ChildTrieBackendEssence<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { hash_db::HashDB::get(self, key, prefix) @@ -418,7 +547,6 @@ impl, H: Hasher> hash_db::HashDBRef } } - #[cfg(test)] mod test { use sp_core::{Blake2Hasher, H256}; @@ -456,7 +584,7 @@ mod test { .expect("insert failed"); }; - let essence_1 = TrieBackendEssence::new(mdb, root_1); + let essence_1 = TrieBackendEssence::new(mdb, root_1, None); assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); @@ -465,7 +593,7 @@ mod test { assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); let mdb = essence_1.into_storage(); - let essence_2 = TrieBackendEssence::new(mdb, root_2); + let essence_2 = TrieBackendEssence::new(mdb, root_2, None); assert_eq!( essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index cb7f2daa50e83..46d76fd7d2832 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -18,7 +18,8 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } ref-cast = "1.0.0" sp-debug-derive = { version = "2.0.0-rc5", path = "../debug-derive" } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] default = [ "std" ] -std = [ "sp-std/std", "serde", "impl-serde" ] +std = [ "sp-std/std", "serde", "impl-serde", "codec/std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 073d80291c13e..770f2efe4b59c 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -19,11 +19,13 @@ #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Encode, Decode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; use sp_std::{vec::Vec, ops::{Deref, DerefMut}}; +use sp_std::collections::btree_map::BTreeMap; use ref_cast::RefCast; /// Storage key. @@ -90,7 +92,7 @@ pub struct StorageData( /// Map of data to use in a storage, it is a collection of /// byte key and values. #[cfg(feature = "std")] -pub type StorageMap = std::collections::BTreeMap, Vec>; +pub type StorageMap = BTreeMap, Vec>; /// Child trie storage data. #[cfg(feature = "std")] @@ -148,8 +150,10 @@ pub mod well_known_keys { /// Prefix of child storage keys. pub const CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:"; - /// Prefix of the default child storage keys in the top trie. - pub const DEFAULT_CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:default:"; + /// Prefix of child storage keys of default type. + /// Most of the time using `ChildInfo::from_prefixed_key` is preferable to using + /// this constant. + pub const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; /// Whether a key is a child storage key. /// @@ -161,9 +165,23 @@ pub mod well_known_keys { } } +/// Child information needed for proof construction. +/// +/// It contains only `ChildInfo` content that is strictly needed for proofs. +/// +/// It could also be use for specific proof usage. +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] +pub enum ChildInfoProof { + /// By default a child only need to be defined by its location in + /// the block top trie. + /// This variant is reserved for child trie of `ParentKeyId` type + /// and do not require to store the full parent key. + /// Empty location is reserved for the top level trie of the proof. + Default(ChildTrieParentKeyId), +} + /// Information related to a child state. -#[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] pub enum ChildInfo { /// This is the one used by default. ParentKeyId(ChildTrieParentKeyId), @@ -193,6 +211,19 @@ impl ChildInfo { } } + /// ChildInfo definition for top trie. + /// The top trie is defined as a default trie with an empty key. + pub fn top_trie() -> Self { + Self::new_default(&[]) + } + + /// Test if the child info is the block top trie. + pub fn is_top_trie(&self) -> bool { + match self { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => data.len() == 0, + } + } + /// Returns byte sequence (keyspace) that can be use by underlying db to isolate keys. /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. @@ -242,6 +273,42 @@ impl ChildInfo { ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId, } } + + /// Get default corresponding info to use with proof. + pub fn proof_info(&self) -> ChildInfoProof { + match self { + ChildInfo::ParentKeyId(parent) => ChildInfoProof::Default(parent.clone()), + } + } +} + +impl ChildInfoProof { + /// ChildInfoProof definition for top trie. + /// Same as `ChildInfo::top_trie().proof_info()`. + pub fn top_trie() -> Self { + ChildInfoProof::Default(ChildTrieParentKeyId { data: Vec::new() }) + } + + /// Test if the child info proof is the block top trie. + pub fn is_top_trie(&self) -> bool { + match self { + ChildInfoProof::Default(ChildTrieParentKeyId { data }) => data.len() == 0, + } + } + + /// Returns the type for this child info. + pub fn child_type(&self) -> ChildType { + match self { + ChildInfoProof::Default(..) => ChildType::ParentKeyId, + } + } + + /// Get child info if it can be resolve without additional context. + pub fn as_child_info(self) -> Option { + match self { + ChildInfoProof::Default(parent_key) => Some(ChildInfo::ParentKeyId(parent_key)), + } + } } /// Type of child. @@ -303,7 +370,7 @@ impl ChildType { /// is one. pub fn parent_prefix(&self) -> &'static [u8] { match self { - &ChildType::ParentKeyId => well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX, + &ChildType::ParentKeyId => well_known_keys::DEFAULT_CHILD_TYPE_PARENT_PREFIX, } } } @@ -317,8 +384,7 @@ impl ChildType { /// that will be use only once. /// Those unique id also required to be long enough to avoid any /// unique id to be prefixed by an other unique id. -#[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] pub struct ChildTrieParentKeyId { /// Data is the storage key without prefix. data: Vec, @@ -334,6 +400,9 @@ impl ChildTrieParentKeyId { } } +/// Map of child trie information stored by `ChildInfo`. +pub type ChildrenMap = BTreeMap; + #[cfg(test)] mod tests { use super::*; @@ -343,6 +412,6 @@ mod tests { let child_info = ChildInfo::new_default(b"any key"); let prefix = child_info.child_type().parent_prefix(); assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); - assert!(prefix.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)); + assert!(prefix.starts_with(well_known_keys::DEFAULT_CHILD_TYPE_PARENT_PREFIX)); } } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 8dd386e095109..7a853c920f782 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -24,6 +24,8 @@ trie-db = { version = "0.22.0", default-features = false } trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.24.0", default-features = false } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } +sp-storage = { version = "2.0.0-rc5", default-features = false, path = "../storage" } +hashbrown = { version = "0.8.0", default-features = false } [dev-dependencies] trie-bench = "0.24.0" @@ -42,5 +44,6 @@ std = [ "trie-db/std", "trie-root/std", "sp-core/std", + "sp-storage/std", ] memory-tracker = [] diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 73a4a8029b2d7..fd328aa9f21fe 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -34,10 +34,18 @@ pub use error::Error; pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::StorageProof; +pub use storage_proof::{Common as ProofCommon, ChildrenProofMap, simple::ProofNodes, + compact::FullForMerge, compact::Flat as CompactProof, simple::Full as SimpleFullProof, + compact::Full as CompactFullProof, query_plan::KnownQueryPlanAndValues as QueryPlanProof, + Verifiable as VerifiableProof, Input as ProofInput, InputKind as ProofInputKind, + Recordable as RecordableProof, FullBackendProof, BackendProof, + Mergeable as MergeableProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, + multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof, + simple::Flat as SimpleProof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ - Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, nibble_ops, TrieDBIterator, + Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, + nibble_ops, TrieDBIterator, }; /// Various re-exports from the `memory-db` crate. pub use memory_db::KeyFunction; @@ -45,6 +53,11 @@ pub use memory_db::prefixed_key; /// Various re-exports from the `hash-db` crate. pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; +/// Access record backend for a given backend storage proof. +pub type RecordBackendFor = < +

>::ProofRaw as RecordableProof +>::RecordBackend; + #[derive(Default)] /// substrate trie layout pub struct Layout(sp_std::marker::PhantomData); @@ -319,6 +332,35 @@ pub fn record_all_keys( Ok(()) } +/// Pack proof from a collection of encoded node. +fn pack_proof_from_collected( + root: &TrieHash, + input: &dyn hash_db::HashDBRef, +) -> Result>, Box>> { + let trie = TrieDB::::new(input, root)?; + trie_db::encode_compact(&trie) +} + +/// Unpack packed proof. Packed proof here is a list of encoded +/// packed node ordered as defined by the compact trie scheme use. +/// Returns a root and a collection on unpacked encoded nodes. +fn unpack_proof(input: &[Vec]) + -> Result<(TrieHash, Vec>), Box>> { + let mut memory_db = MemoryDB::<::Hash>::default(); + let root = trie_db::decode_compact::(&mut memory_db, input)?; + Ok((root.0, memory_db.drain().into_iter().map(|(_k, (v, _rc))| v).collect())) +} + +/// Unpack packed proof. +/// This is faster than `unpack_proof`, and should be prefered is encoded node +/// will be use in a new memory db. +fn unpack_proof_to_memdb(input: &[Vec]) + -> Result<(TrieHash, MemoryDB::<::Hash>), Box>> { + let mut memory_db = MemoryDB::<::Hash>::default(); + let root = trie_db::decode_compact::(&mut memory_db, input)?; + Ok((root.0, memory_db)) +} + /// Read a value from the child trie. pub fn read_child_trie_value( keyspace: &[u8], @@ -855,7 +897,7 @@ mod tests { #[test] fn generate_storage_root_with_proof_works_independently_from_the_delta_order() { - let proof = StorageProof::decode(&mut &include_bytes!("../test-res/proof")[..]).unwrap(); + let proof = SimpleProof::decode(&mut &include_bytes!("../test-res/proof")[..]).unwrap(); let storage_root = sp_core::H256::decode( &mut &include_bytes!("../test-res/storage_root")[..], ).unwrap(); @@ -868,7 +910,7 @@ mod tests { &mut &include_bytes!("../test-res/valid-delta-order")[..], ).unwrap(); - let proof_db = proof.into_memory_db::(); + let proof_db = proof.into_partial_db().unwrap(); let first_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs deleted file mode 100644 index 254adc2fcb48a..0000000000000 --- a/primitives/trie/src/storage_proof.rs +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use sp_std::vec::Vec; -use codec::{Encode, Decode}; -use hash_db::{Hasher, HashDB}; - -/// A proof that some set of key-value pairs are included in the storage trie. The proof contains -/// the storage values so that the partial storage backend can be reconstructed by a verifier that -/// does not already have access to the key-value pairs. -/// -/// The proof consists of the set of serialized nodes in the storage trie accessed when looking up -/// the keys covered by the proof. Verifying the proof requires constructing the partial trie from -/// the serialized nodes and performing the key lookups. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub struct StorageProof { - trie_nodes: Vec>, -} - -impl StorageProof { - /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. - pub fn new(trie_nodes: Vec>) -> Self { - StorageProof { trie_nodes } - } - - /// Returns a new empty proof. - /// - /// An empty proof is capable of only proving trivial statements (ie. that an empty set of - /// key-value pairs exist in storage). - pub fn empty() -> Self { - StorageProof { - trie_nodes: Vec::new(), - } - } - - /// Returns whether this is an empty proof. - pub fn is_empty(&self) -> bool { - self.trie_nodes.is_empty() - } - - /// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed - /// to be traversed in any particular order. - pub fn iter_nodes(self) -> StorageProofNodeIterator { - StorageProofNodeIterator::new(self) - } - - /// Creates a `MemoryDB` from `Self`. - pub fn into_memory_db(self) -> crate::MemoryDB { - self.into() - } - - /// Merges multiple storage proofs covering potentially different sets of keys into one proof - /// covering all keys. The merged proof output may be smaller than the aggregate size of the input - /// proofs due to deduplication of trie nodes. - pub fn merge(proofs: I) -> Self where I: IntoIterator { - let trie_nodes = proofs.into_iter() - .flat_map(|proof| proof.iter_nodes()) - .collect::>() - .into_iter() - .collect(); - - Self { trie_nodes } - } -} - -/// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to -/// be traversed in any particular order. -pub struct StorageProofNodeIterator { - inner: > as IntoIterator>::IntoIter, -} - -impl StorageProofNodeIterator { - fn new(proof: StorageProof) -> Self { - StorageProofNodeIterator { - inner: proof.trie_nodes.into_iter(), - } - } -} - -impl Iterator for StorageProofNodeIterator { - type Item = Vec; - - fn next(&mut self) -> Option { - self.inner.next() - } -} - -impl From for crate::MemoryDB { - fn from(proof: StorageProof) -> Self { - let mut db = crate::MemoryDB::default(); - for item in proof.iter_nodes() { - db.insert(crate::EMPTY_PREFIX, &item); - } - db - } -} diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs new file mode 100644 index 0000000000000..3261130c72bcd --- /dev/null +++ b/primitives/trie/src/storage_proof/compact.rs @@ -0,0 +1,435 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ! Trie storage proofs that are a compacted collection of encoded nodes. + +use super::*; +use super::simple::ProofNodes; +use codec::{Codec, Encode, Decode}; +use crate::TrieLayout; +use crate::TrieHash; +use sp_storage::ChildType; +use sp_std::marker::PhantomData; +use sp_std::convert::TryInto; +use sp_std::{vec, vec::Vec}; + +/// A collection on encoded and compacted trie nodes. +/// Nodes are sorted by trie node iteration order, and some hash +/// and/or values are ommitted (they can be either calculated from +/// proof content or completed by proof input). +pub type ProofCompacted = Vec>; + +/// Compacted flat proof. +/// +/// This works as `Flat`, but skips encoding of hashes +/// that can be calculated when reading the child nodes +/// in the proof (nodes ordering hold the trie structure information). +/// This requires that the proof is collected with +/// child trie separation and each child trie roots as additional +/// input. +/// We remove child trie info when encoding because it is not strictly needed +/// when decoding. +#[derive(Encode, Decode)] +pub struct Flat(Vec, PhantomData); + +impl PartialEq> for Flat { + fn eq(&self, other: &Flat) -> bool { + self.0.eq(&other.0) + } +} +impl Eq for Flat { } + +impl sp_std::fmt::Debug for Flat { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "Flat compact proof: {:?}", &self.0) + } +} + +impl Clone for Flat { + fn clone(&self) -> Self { + Flat(self.0.clone(), PhantomData) + } +} + +/// Compacted proof with child trie . +/// +/// This currently mainly provided for test purpose and extensibility. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +pub struct Full(ChildrenProofMap, PhantomData); + +impl sp_std::fmt::Debug for Full { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "Full compact proof: {:?}", &self.0) + } +} + +/// Proof cotaining an intermediate representation of state +/// which is mergeable and can be converted to compact representation. +/// +/// This is needed mainly for technical reasons (merge then compact proofs). +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct FullForMerge(ChildrenProofMap<(ProofMapTrieNodes, Vec)>); + +impl Common for Flat { + fn empty() -> Self { + Flat(Default::default(), PhantomData) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl Common for Full { + fn empty() -> Self { + Full(Default::default(), PhantomData) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl Common for FullForMerge { + fn empty() -> Self { + FullForMerge(Default::default()) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +/// Note that this implementation assumes all proof are from a same state. +impl Mergeable for FullForMerge { + fn merge(proofs: I) -> Self where I: IntoIterator { + let mut child_sets = ChildrenProofMap::<(ProofMapTrieNodes, Vec)>::default(); + for children in proofs { + for (child_info, (mut proof, root)) in children.0.into_iter() { + child_sets.entry(child_info) + .and_modify(|entry| { + debug_assert!(&root == &entry.1); + let iter_proof = sp_std::mem::replace(&mut proof, Default::default()); + entry.0.extend(iter_proof.0.into_iter()); + }) + .or_insert((proof, root)); + } + } + FullForMerge(child_sets) + } +} + +impl Recordable for Flat + where + T: TrieLayout, + TrieHash: Decode, +{ + const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; + + type RecordBackend = super::FullRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + if let Input::ChildTrieRoots(roots) = input { + let mut result = Vec::default(); + for (child_info, set) in recorder.0.iter() { + let root = roots.get(&child_info.proof_info()) + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .ok_or_else(|| missing_pack_input())?; + let trie_nodes = crate::pack_proof_from_collected::(&root, set)?; + result.push(trie_nodes); + } + Ok(Flat(result, PhantomData)) + } else { + Err(missing_pack_input()) + } + } +} + +impl Recordable for Full + where + T: TrieLayout, + TrieHash: Decode, +{ + const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; + + type RecordBackend = super::FullRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + if let Input::ChildTrieRoots(roots) = input { + let mut result = ChildrenProofMap::default(); + for (child_info, set) in recorder.0.iter() { + let root = roots.get(&child_info.proof_info()) + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .ok_or_else(|| missing_pack_input())?; + let trie_nodes = crate::pack_proof_from_collected::(&root, set)?; + result.insert(child_info.proof_info(), trie_nodes); + } + Ok(Full(result, PhantomData)) + } else { + Err(missing_pack_input()) + } + } +} + +impl Recordable for FullForMerge + where + H: Hasher, + H::Out: Encode, +{ + const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; + + type RecordBackend = super::FullRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + if let Input::ChildTrieRoots(roots) = input { + let mut result = ChildrenProofMap::default(); + for (child_info, set) in recorder.0.iter() { + let root = roots.get(&child_info.proof_info()) + .ok_or_else(|| missing_pack_input())?.clone(); + let trie_nodes: BTreeMap<_, _> = set + .iter() + .filter_map(|(k, v)| v.as_ref().map(|v| (k.encode(), v.to_vec()))) + .collect(); + result.insert(child_info.proof_info(), (ProofMapTrieNodes(trie_nodes), root)); + } + Ok(FullForMerge(result)) + } else { + Err(missing_pack_input()) + } + } +} + +impl BackendProof for Flat + where + T: TrieLayout, + TrieHash: Codec, +{ + type ProofRaw = FullForMerge; + + fn into_partial_db(self) -> Result> { + let mut db = MemoryDB::default(); + let mut db_empty = true; + for proof in self.0.into_iter() { + let (_root, child_db) = crate::unpack_proof_to_memdb::(proof.as_slice())?; + if db_empty { + db_empty = false; + db = child_db; + } else { + db.consolidate(child_db); + } + } + Ok(db) + } +} + +impl BackendProof for Full + where + T: TrieLayout, + TrieHash: Codec, +{ + type ProofRaw = FullForMerge; + + fn into_partial_db(self) -> Result> { + let mut db = MemoryDB::default(); + let mut db_empty = true; + for (_child_info, proof) in self.0.into_iter() { + let (_root, child_db) = crate::unpack_proof_to_memdb::(proof.as_slice())?; + if db_empty { + db_empty = false; + db = child_db; + } else { + db.consolidate(child_db); + } + } + Ok(db) + } +} + +impl FullBackendProof for Full + where + T: TrieLayout, + TrieHash: Codec, +{ + fn into_partial_full_db(self) -> Result>> { + let mut result = ChildrenProofMap::default(); + for (child_info, proof) in self.0 { + // Note that this does check all hashes by using a trie backend + let (_root, db) = crate::unpack_proof_to_memdb::(proof.as_slice())?; + result.insert(child_info, db); + } + Ok(result) + } +} + +// Note that this implementation is only possible +// as long as we only have default child trie which +// can be flattened into top trie storage. +impl Into> for Full { + fn into(self) -> Flat { + let mut unique_set = BTreeSet::>::default(); + for (child_info, nodes) in self.0 { + assert!(matches!(child_info, ChildInfoProof::Default(..))); + unique_set.extend(nodes); + } + Flat(vec![unique_set.into_iter().collect()], PhantomData) + } +} + +impl TryInto> for Flat { + type Error = super::Error; + + fn try_into(mut self) -> Result> { + if self.0.len() > 1 { + return Err(super::error( + "Can only convert compact flat proof if it is only top storage" + )); + } + let mut result = ChildrenProofMap::default(); + if let Some(v) = self.0.pop() { + result.insert(ChildInfoProof::top_trie(), v); + } + Ok(Full(result, PhantomData)) + } +} + +impl FullForMerge { + fn to_full(self) -> Result> + where + L: TrieLayout, + TrieHash: Codec, + { + let mut result = ChildrenProofMap::::default(); + for (child_info, (set, root)) in self.0.into_iter() { + let root = Decode::decode(&mut &root[..]) + .map_err(|_e| pack_error())?; + let trie_nodes = crate::pack_proof_from_collected::(&root, &set) + .map_err(|_e| pack_error())?; + result.insert(child_info, trie_nodes); + } + Ok(Full(result, PhantomData)) + } + + fn to_flat(self) -> Result> + where + L: TrieLayout, + TrieHash: Codec, + { + let mut result = Vec::::default(); + for (_child_info, (set, root)) in self.0.into_iter() { + let root = Decode::decode(&mut &root[..]) + .map_err(|_e| pack_error())?; + let trie_nodes = crate::pack_proof_from_collected::(&root, &set) + .map_err(|_e| pack_error())?; + result.push(trie_nodes); + } + Ok(Flat(result, PhantomData)) + } +} + +impl Into> for FullForMerge + where + L: TrieLayout, + TrieHash: Codec, +{ + fn into(self) -> Full { + self.to_full() + .expect("Full for merge was recorded on a valid state; qed") + } +} + +impl Into> for FullForMerge + where + L: TrieLayout, + TrieHash: Codec, +{ + fn into(self) -> Flat { + self.to_flat() + .expect("Full for merge was recorded on a valid state; qed") + } +} + +impl Into for FullForMerge +{ + fn into(self) -> super::simple::Flat { + let mut result = ProofNodes::default(); + for (_child_info, (nodes, _root)) in self.0 { + result.extend(nodes.0.into_iter().map(|(_k, v)| v)); + } + super::simple::Flat(result) + } +} + +impl TryInto for Flat { + type Error = super::Error; + + fn try_into(self) -> Result { + let mut result = ProofNodes::default(); + for proof in self.0 { + let (_root, unpacked_proof) = crate::unpack_proof::(proof.as_slice())?; + result.extend(unpacked_proof); + } + Ok(super::simple::Flat(result)) + } +} + +impl TryInto for Full { + type Error = super::Error; + + fn try_into(self) -> Result { + let mut result = ChildrenProofMap::default(); + for (child_info, proof) in self.0 { + match child_info.child_type() { + ChildType::ParentKeyId => { + // Note that we could return roots from unpacking. + let (_root, unpacked_proof) = crate::unpack_proof::(proof.as_slice())?; + result.insert(child_info, unpacked_proof); + } + } + } + Ok(super::simple::Full(result)) + } +} + +/// Container recording trie nodes and their encoded hash. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +struct ProofMapTrieNodes(pub BTreeMap, DBValue>); + +impl sp_std::default::Default for ProofMapTrieNodes { + fn default() -> Self { + ProofMapTrieNodes(Default::default()) + } +} + +impl sp_std::ops::Deref for ProofMapTrieNodes { + type Target = BTreeMap, DBValue>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl sp_std::ops::DerefMut for ProofMapTrieNodes { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl HashDBRef for ProofMapTrieNodes + where + H::Out: Encode, +{ + fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { + let key = key.encode(); + self.0.get(&key).cloned() + } + + fn contains(&self, key: &H::Out, _prefix: hash_db::Prefix) -> bool { + let key = key.encode(); + self.0.contains_key(&key) + } +} diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs new file mode 100644 index 0000000000000..348026f04264a --- /dev/null +++ b/primitives/trie/src/storage_proof/mod.rs @@ -0,0 +1,479 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use sp_std::collections::{btree_map::BTreeMap, btree_map::Entry}; +use sp_std::collections::btree_set::BTreeSet; +use sp_std::vec::Vec; +use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput, Error as CodecError}; +use hash_db::{Hasher, HashDBRef}; +use crate::Layout; +use sp_storage::{ChildInfo, ChildInfoProof, ChildrenMap}; +use trie_db::DBValue; +use crate::MemoryDB; + +pub mod simple; +pub mod compact; +pub mod query_plan; +pub mod multiple; + +// We are not including it to sp_std, this hash map +// usage is restricted here to proof. +// In practice it is already use internally by no_std trie_db. +#[cfg(not(feature = "std"))] +use hashbrown::{hash_map::Entry as HEntry, HashMap}; + +#[cfg(feature = "std")] +use std::collections::{hash_map::Entry as HEntry, HashMap}; + +type Result = sp_std::result::Result; +type CodecResult = sp_std::result::Result; + +#[cfg(feature = "std")] +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum Error { + /// Error produced by storage proof logic. + /// It is formatted in std to simplify type. + Proof(&'static str), + /// Error produced by trie manipulation. + Trie(String), +} + +#[cfg(feature = "std")] +impl std::error::Error for Error { } + +#[cfg(not(feature = "std"))] +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum Error { + /// Error produced by storage proof logic. + Proof, + /// Error produced by trie manipulation. + Trie, +} + +#[cfg(feature = "std")] +impl sp_std::fmt::Display for Error { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + match self { + Error::Trie(msg) => write!(f, "Proof error trie: {}", msg), + Error::Proof(msg) => write!(f, "Proof error: {}", msg), + } + } +} + +#[cfg(feature = "std")] +impl sp_std::convert::From> for Error { + fn from(e: sp_std::boxed::Box) -> Self { + // Only trie error is build from box so we do a tiny simplification here + // by generalizing. + Error::Trie(format!("{}", e)) + } +} + +#[cfg(not(feature = "std"))] +impl sp_std::convert::From> for Error { + fn from(_e: sp_std::boxed::Box) -> Self { + Error::Trie + } +} + +impl sp_std::convert::From for Error { + fn from(e: CodecError) -> Self { + error(e.what()) + } +} + +#[cfg(feature = "std")] +const fn error(message: &'static str) -> Error { + Error::Proof(message) +} + +#[cfg(not(feature = "std"))] +const fn error(_message: &'static str) -> Error { + Error::Proof +} + +const fn missing_pack_input() -> Error { + error("Packing input missing for proof") +} + +const fn pack_error() -> Error { + error("Error while packing for proof") +} + +const fn missing_verify_input() -> Error { + error("Input missing for proof verification") +} + +const fn incompatible_type() -> Error { + error("Incompatible type") +} + + +#[derive(Clone, Eq, PartialEq)] +/// Additional information needed to manage a storage proof. +/// These do not need to be part of the proof but are required +/// when processing the proof. +pub enum Input { + /// Proof is self contained. + None, + + /// Contains trie roots used during proof processing. + ChildTrieRoots(ChildrenProofMap>), + + /// For each children, contains encoded trie roots used during proof processing. + /// Also contains key and values queried during the proof processing. + QueryPlanWithValues(ChildrenProofMap<(Vec, Vec<(Vec, Option>)>)>), + + /// Contains trie roots used during proof processing. + /// Contains keys queried during the proof processing. + QueryPlan(ChildrenProofMap<(Vec, Vec>)>), +} + +impl Default for Input { + fn default() -> Self { + Input::None + } +} +impl Input { + /// Get input kind for a given input. + pub fn kind(&self) -> InputKind { + match self { + Input::ChildTrieRoots(..) => InputKind::ChildTrieRoots, + Input::QueryPlan(..) => InputKind::QueryPlan, + Input::QueryPlanWithValues(..) => InputKind::QueryPlanWithValues, + Input::None => InputKind::None, + } + } + + /// Build a query plan with values. + /// All tuples are key and optional value. + /// Children iterator also contains children encoded root. + /// If `include_child_root` is set to true, we add the child trie query to the top + /// trie, that is usually what we want (unless we only want to prove something + /// local to a child trie. + pub fn query_plan_with_values( + top_encoded_root: Vec, + top: impl Iterator, Option>)>, + children: impl Iterator, + impl Iterator, Option>)>, + )>, + include_child_root: bool, + ) -> Input { + let mut result = ChildrenProofMap::default(); + let mut additional_roots = Vec::new(); + for (child_info, encoded_root, key_values) in children { + if include_child_root { + additional_roots.push(( + child_info.prefixed_storage_key().into_inner(), + Some(encoded_root.clone()), + )); + } + result.insert(child_info.proof_info(), (encoded_root, key_values.collect())); + } + let mut top_values: Vec<_> = top.collect(); + top_values.extend(additional_roots); + result.insert(ChildInfo::top_trie().proof_info(), (top_encoded_root, top_values)); + + Input::QueryPlanWithValues(result) + } + + /// Build a query plan. + /// Iterator contains key. + /// Children iterator also contains children encoded root. + /// If `include_child_root` is set to true, we add the child trie query to the top + /// trie, that is usually what we want (unless we only want to prove something + /// local to a child trie. + pub fn query_plan( + top_encoded_root: Vec, + top: impl Iterator>, + children: impl Iterator, impl Iterator>)>, + include_child_root: bool, + ) -> Input { + let mut result = ChildrenProofMap::default(); + let mut additional_roots = Vec::new(); + for (child_info, encoded_root, keys) in children { + if include_child_root { + additional_roots.push(child_info.prefixed_storage_key().into_inner()); + } + result.insert(child_info.proof_info(), (encoded_root, keys.collect())); + } + let mut top_keys: Vec<_> = top.collect(); + top_keys.extend(additional_roots); + result.insert(ChildInfo::top_trie().proof_info(), (top_encoded_root, top_keys)); + + Input::QueryPlan(result) + } +} + +/// Kind for a `Input` variant. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum InputKind { + /// `Input::None` kind. + None, + /// `Input::ChildTrieRoots` kind. + ChildTrieRoots, + /// `Input::QueryPlan` kind. + QueryPlan, + /// `Input::QueryPlanWithValues` kind. + QueryPlanWithValues, +} + +/// Basic trait for proofs. +pub trait Common: sp_std::fmt::Debug + Sized { + /// Returns a new empty proof. + /// + /// An empty proof is capable of only proving trivial statements (ie. that an empty set of + /// key-value pairs exist in storage). + fn empty() -> Self; + + /// Returns whether this is an empty proof. + fn is_empty(&self) -> bool; +} + +/// Trait for proofs that can be merged. +/// +/// Merging can be a non negligeable additional cost. +/// So when possible, user should rather share recording context +/// than merge multiple recorded proofs. +pub trait Mergeable: Common { + /// Merges multiple storage proofs covering potentially different sets of keys into one proof + /// covering all keys. The merged proof output may be smaller than the aggregate size of the input + /// proofs due to deduplication of trie nodes. + fn merge(proofs: I) -> Self where I: IntoIterator; +} + +/// Trait for proofs that can be recorded against a `RecordBackend`. +pub trait Recordable: Common { + /// Variant of enum input to use. + const INPUT_KIND: InputKind; + + /// The data structure for recording proof entries. + type RecordBackend: RecordBackend; + + /// Extracts the gathered proof. + /// The input provided must match the kind specified by `Recordable::INPUT_KIND`. + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result; +} + +/// Proof that could be use as a backend to execute action +/// on a backend. +pub trait BackendProof: Codec + Common { + /// Intermediate proof format that is recorded + /// and mergeable. + type ProofRaw: Recordable + + Mergeable + + Into; + + /// Extract a trie db from the proof. + /// This mainly allows running proof against + /// a trie backend (memorydb containing unordered + /// gathered encoded node in this case). + /// Can fail on invalid proof content. + fn into_partial_db(self) -> Result>; +} + +/// Proof that could be use as a backend to execute action +/// on a backend, with a different backend per child proofs. +pub trait FullBackendProof: BackendProof { + /// Extract a trie dbs with children info from the proof. + /// Can fail on invalid proof content. + fn into_partial_full_db(self) -> Result>>; +} + +/// Trait for proofs that simply provides validity information. +pub trait Verifiable: Codec + Common { + /// Run proof validation, return verification result. + /// Error is returned for invalid input, or bad proof format. + fn verify(self, input: &Input) -> Result; +} + +/// Trie encoded node recorder trait. +/// +/// This trait does not strictly need H as generic parameter and could use H::Out, +/// but currently use Hasher makes code more readable. +pub trait RecordBackend: Send + Sync + Clone + Default { + /// Access recorded value, allow using the backend as a cache. + fn get(&self, child_info: &ChildInfo, key: &H::Out) -> Option>; + /// Record the actual value. + fn record(&mut self, child_info: ChildInfo, key: H::Out, value: Option); + /// Merge two records, returns false on failure. + fn merge(&mut self, other: Self) -> bool; +} + +/// Trie node recorder with child trie isolation, keeping child trie origin +/// is needed for proof compaction. +pub struct FullRecorder(ChildrenMap>); + +/// Trie node recorder with a single storage for all recoded nodes (as in +/// state db column). +/// This variant exists only for performance, but is not strictly necessary. +/// (`FullRecorder` cost an additional map access) +pub struct FlatRecorder(RecordMapTrieNodes); + +impl Default for FlatRecorder { + fn default() -> Self { + FlatRecorder(Default::default()) + } +} + +impl Default for FullRecorder { + fn default() -> Self { + FullRecorder(Default::default()) + } +} + +impl Clone for FlatRecorder { + fn clone(&self) -> Self { + FlatRecorder(self.0.clone()) + } +} + +impl Clone for FullRecorder { + fn clone(&self) -> Self { + FullRecorder(self.0.clone()) + } +} + +impl RecordBackend for FullRecorder { + fn get(&self, child_info: &ChildInfo, key: &H::Out) -> Option> { + self.0.get(child_info).and_then(|s| (**s).get(&key).cloned()) + } + + fn record(&mut self, child_info: ChildInfo, key: H::Out, value: Option) { + self.0.entry(child_info) + .or_default() + .insert(key, value); + } + + fn merge(&mut self, mut other: Self) -> bool { + for (child_info, other) in sp_std::mem::replace(&mut other.0, Default::default()) { + match self.0.entry(child_info) { + Entry::Occupied(mut entry) => { + for (key, value) in other.0 { + match entry.get_mut().entry(key) { + HEntry::Occupied(entry) => { + if entry.get() != &value { + return false; + } + }, + HEntry::Vacant(entry) => { + entry.insert(value); + }, + } + } + }, + Entry::Vacant(entry) => { + entry.insert(other); + }, + } + } + true + } +} + +impl RecordBackend for FlatRecorder { + fn get(&self, _child_info: &ChildInfo, key: &H::Out) -> Option> { + (*self.0).get(&key).cloned() + } + + fn record(&mut self, _child_info: ChildInfo, key: H::Out, value: Option) { + (*self.0).insert(key.clone(), value.clone()); + } + + fn merge(&mut self, mut other: Self) -> bool { + for (key, value) in sp_std::mem::replace(&mut other.0, Default::default()).0 { + match self.0.entry(key) { + HEntry::Occupied(entry) => { + if entry.get() != &value { + return false; + } + }, + HEntry::Vacant(entry) => { + entry.insert(value); + }, + } + } + true + } +} + +/// Type for storing a map of child trie proof related information. +/// A few utilities methods are defined. +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +pub struct ChildrenProofMap(pub BTreeMap); + +impl sp_std::ops::Deref for ChildrenProofMap { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl sp_std::ops::DerefMut for ChildrenProofMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl sp_std::default::Default for ChildrenProofMap { + fn default() -> Self { + ChildrenProofMap(BTreeMap::new()) + } +} + +impl IntoIterator for ChildrenProofMap { + type Item = (ChildInfoProof, T); + type IntoIter = sp_std::collections::btree_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +/// Container recording trie nodes. Only here to factor `HashDBRef` methods +/// between `FullRecorder` and `FlatRecorder`. +struct RecordMapTrieNodes(HashMap>); + +impl sp_std::default::Default for RecordMapTrieNodes { + fn default() -> Self { + RecordMapTrieNodes(Default::default()) + } +} + +impl Clone for RecordMapTrieNodes { + fn clone(&self) -> Self { + RecordMapTrieNodes(self.0.clone()) + } +} + +impl sp_std::ops::Deref for RecordMapTrieNodes { + type Target = HashMap>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl sp_std::ops::DerefMut for RecordMapTrieNodes { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl HashDBRef for RecordMapTrieNodes { + fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { + self.0.get(key).and_then(Clone::clone) + } + + fn contains(&self, key: &H::Out, _prefix: hash_db::Prefix) -> bool { + self.0.get(key).map(Option::is_some).unwrap_or(false) + } +} diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs new file mode 100644 index 0000000000000..7795e844318f5 --- /dev/null +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -0,0 +1,289 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ! Enumeration to use different storage proofs from a single type. + +use super::*; +use sp_std::convert::TryInto; +use sp_std::marker::PhantomData; + +/// Different kind of proof representation are allowed. +/// This definition is used as input parameter when producing +/// a storage proof. +/// Some kind are reserved for test or internal use and will +/// not be usable when decoding proof, those could be remove +/// when substrate will be able to define custom state-machine +/// backend. +#[repr(u8)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum StorageProofKind { + /// Kind for `MultipleStorageProof::Flat`. + Flat = 1, + + /// Kind for `MultipleStorageProof::Compact`. + Compact = 2, +} + +impl StorageProofKind { + /// Decode a byte value representing the storage kind. + /// Return `None` if the kind does not exists or is not allowed. + pub fn from_byte(encoded: u8) -> Option { + Some(match encoded { + x if x == StorageProofKind::Flat as u8 => StorageProofKind::Flat, + x if x == StorageProofKind::Compact as u8 => StorageProofKind::Compact, + _ => return None, + }) + } +} + +/// Allow usage of multiple proof at the same time. This is usefull when +/// we want to be able to operate from different proof origin. +/// It produces a single proof type that is defined by type parameter `D` +/// as `DefaultKind`. +#[derive(PartialEq, Eq, Clone)] +pub enum MultipleStorageProof { + /// See `crate::storage_proof::simple::Flat`. + Flat(super::simple::Flat), + + /// See `crate::storage_proof::compact::Flat`. + Compact(super::compact::Flat>, PhantomData), +} + +impl sp_std::fmt::Debug for MultipleStorageProof { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + match self { + MultipleStorageProof::Flat(v) => v.fmt(f), + MultipleStorageProof::Compact(v, _) => v.fmt(f), + } + } +} + +/// Allow to use specific kind of proof by default. +pub trait DefaultKind: Clone + Send + Sync { + const KIND: StorageProofKind; +} + +/// Default the multiple proof to flat. +#[derive(Clone, Copy)] +pub struct FlatDefault; + +impl DefaultKind for FlatDefault { + const KIND: StorageProofKind = StorageProofKind::Flat; +} + +impl Decode for MultipleStorageProof { + fn decode(value: &mut I) -> CodecResult { + let kind = value.read_byte()?; + Ok(match StorageProofKind::from_byte(kind) + .ok_or_else(|| codec::Error::from("Invalid storage kind"))? { + StorageProofKind::Flat => MultipleStorageProof::Flat(Decode::decode(value)?), + StorageProofKind::Compact => MultipleStorageProof::Compact( + Decode::decode(value)?, + PhantomData, + ), + }) + } +} + +impl Encode for MultipleStorageProof { + fn encode_to(&self, dest: &mut T) { + (self.kind() as u8).encode_to(dest); + match self { + MultipleStorageProof::Flat(p) => p.encode_to(dest), + MultipleStorageProof::Compact(p, _) => p.encode_to(dest), + } + } +} + +impl Common for MultipleStorageProof { + fn empty() -> Self { + match D::KIND { + StorageProofKind::Flat => + MultipleStorageProof::Flat(super::simple::Flat::empty()), + StorageProofKind::Compact => + MultipleStorageProof::Compact(super::compact::Flat::empty(), PhantomData), + } + } + + fn is_empty(&self) -> bool { + match self { + MultipleStorageProof::Flat(data) => data.is_empty(), + MultipleStorageProof::Compact(data, _) => data.is_empty(), + } + } +} + +pub enum MultipleRecorder { + Flat(super::FlatRecorder, StorageProofKind, PhantomData), + Full(super::FullRecorder, StorageProofKind), +} + +impl MultipleRecorder { + /// Instantiate a recorder of a given type. + pub fn new_recorder(kind: StorageProofKind) -> Self { + match kind { + StorageProofKind::Flat => MultipleRecorder::Flat(Default::default(), D::KIND, PhantomData), + StorageProofKind::Compact => MultipleRecorder::Full(Default::default(), D::KIND), + } + } + + /// Targetted storage proof kind. + pub fn target(&self) -> StorageProofKind { + match self { + MultipleRecorder::Flat(_, k, _) => *k, + MultipleRecorder::Full(_, k) => *k, + } + } +} + +impl Default for MultipleRecorder { + fn default() -> Self { + Self::new_recorder(D::KIND) + } +} + +impl Clone for MultipleRecorder { + fn clone(&self) -> Self { + use MultipleRecorder::{Flat, Full}; + match self { + Flat(data, kind, _) => Flat(data.clone(), *kind, PhantomData), + Full(data, kind) => Full(data.clone(), *kind), + } + } +} + +impl RecordBackend for MultipleRecorder { + fn get(&self, child_info: &ChildInfo, key: &H::Out) -> Option> { + match self { + MultipleRecorder::Flat(rec, _ ,_) => rec.get(child_info, key), + MultipleRecorder::Full(rec, _) => rec.get(child_info, key), + } + } + + fn record(&mut self, child_info: ChildInfo, key: H::Out, value: Option) { + match self { + MultipleRecorder::Flat(rec, _, _) => rec.record(child_info, key, value), + MultipleRecorder::Full(rec, _) => rec.record(child_info, key, value), + } + } + + fn merge(&mut self, other: Self) -> bool { + match self { + MultipleRecorder::Flat(rec, _, _) => { + match other { + MultipleRecorder::Flat(oth, _, _) => { + rec.merge(oth); + true + }, + _ => false + } + }, + MultipleRecorder::Full(rec, _) => { + match other { + MultipleRecorder::Full(oth, _) => { + rec.merge(oth); + true + }, + _ => false, + } + }, + } + } +} + +impl Recordable for MultipleStorageProof + where + H: Hasher, + H::Out: Codec, + D: DefaultKind, +{ + // This could be ignored in case it is knowned that the type is not compact. + const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; + + type RecordBackend = MultipleRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + match recorder.target() { + StorageProofKind::Flat => { + if let MultipleRecorder::Flat(rec, _, _) = recorder { + return Ok(MultipleStorageProof::Flat(super::simple::Flat::extract_proof(rec, input)?)) + } + }, + StorageProofKind::Compact => { + if let MultipleRecorder::Full(rec, _) = recorder { + return Ok(MultipleStorageProof::Compact( + super::compact::Flat::extract_proof(rec, input)?, + PhantomData, + )) + } + }, + } + Err(missing_pack_input()) + } +} + +impl BackendProof for MultipleStorageProof + where + H: Hasher, + H::Out: Codec, + D: DefaultKind, +{ + type ProofRaw = super::compact::FullForMerge; + + fn into_partial_db(self) -> Result> { + match self { + MultipleStorageProof::Flat(p) => p.into_partial_db(), + MultipleStorageProof::Compact(p, _) => p.into_partial_db(), + } + } + +} + +impl TryInto for MultipleStorageProof { + type Error = super::Error; + + fn try_into(self) -> Result { + match self { + MultipleStorageProof::Flat(p) => Ok(p), + _ => Err(incompatible_type()), + } + } +} + +impl TryInto>> for MultipleStorageProof { + type Error = super::Error; + + fn try_into(self) -> Result>> { + match self { + MultipleStorageProof::Compact(p, _) => Ok(p), + _ => Err(incompatible_type()), + } + } +} + +impl MultipleStorageProof { + /// Get kind type for the storage proof variant. + pub fn kind(&self) -> StorageProofKind { + match self { + MultipleStorageProof::Flat(_) => StorageProofKind::Flat, + MultipleStorageProof::Compact(_, _) => StorageProofKind::Compact, + } + } +} + +impl Into> for super::compact::FullForMerge + where + H::Out: Codec, +{ + fn into(self) -> MultipleStorageProof { + match D::KIND { + StorageProofKind::Flat => MultipleStorageProof::Flat(self.into()), + StorageProofKind::Compact => MultipleStorageProof::Compact(self.into(), PhantomData), + } + } +} diff --git a/primitives/trie/src/storage_proof/query_plan.rs b/primitives/trie/src/storage_proof/query_plan.rs new file mode 100644 index 0000000000000..9912498be608c --- /dev/null +++ b/primitives/trie/src/storage_proof/query_plan.rs @@ -0,0 +1,125 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ! Trie storage proofs that are only verifying state for a given +// key value query plan. + +use super::*; +use super::compact::{ProofCompacted}; +use codec::{Encode, Decode}; +use crate::{TrieConfiguration, TrieHash}; +use sp_std::marker::PhantomData; + + +/// Proof for a known key value content. +/// +/// This skips encoding of hashes in a similar way as `crate::storage_proof::compact`. +/// This also skips values in the proof, and can therefore only be +/// use to check if there was a change of content. +/// This needs to be check for every children proofs, and needs to keep +/// trace of every child trie origin. +#[derive(Encode, Decode)] +pub struct KnownQueryPlanAndValues(pub(crate) ChildrenProofMap, PhantomData); + +impl sp_std::fmt::Debug for KnownQueryPlanAndValues { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "Known values compact proof: {:?}", &self.0) + } +} + +impl PartialEq> for KnownQueryPlanAndValues { + fn eq(&self, other: &KnownQueryPlanAndValues) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for KnownQueryPlanAndValues { } + +impl Clone for KnownQueryPlanAndValues { + fn clone(&self) -> Self { + KnownQueryPlanAndValues(self.0.clone(), PhantomData) + } +} + +impl Common for KnownQueryPlanAndValues { + fn empty() -> Self { + KnownQueryPlanAndValues(Default::default(), PhantomData) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl Recordable for KnownQueryPlanAndValues + where + T: TrieConfiguration, + TrieHash: Decode, +{ + const INPUT_KIND: InputKind = InputKind::QueryPlan; + + type RecordBackend = super::FullRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + if let Input::QueryPlan(input_children) = input { + let mut result = ChildrenProofMap::default(); + let mut root_hash = TrieHash::::default(); + for (child_info, set) in recorder.0.iter() { + let child_info_proof = child_info.proof_info(); + if let Some((root, keys)) = input_children.get(&child_info_proof) { + // Layout h is the only supported one at the time being + if root.len() != root_hash.as_ref().len() { + return Err(missing_pack_input()); + } + root_hash.as_mut().copy_from_slice(&root[..]); + let trie = trie_db::TrieDB::::new(set, &root_hash)?; + let compacted = trie_db::proof::generate_proof(&trie, keys)?; + result.insert(child_info_proof, compacted); + } else { + return Err(missing_pack_input()); + } + } + Ok(KnownQueryPlanAndValues(result, PhantomData)) + } else { + Err(missing_pack_input()) + } + } +} + +impl Verifiable for KnownQueryPlanAndValues + where + T: TrieConfiguration, + TrieHash: Decode, +{ + fn verify(self, input: &Input) -> Result { + if let Input::QueryPlanWithValues(input_children) = input { + let mut root_hash = TrieHash::::default(); + for (child_info, nodes) in self.0.iter() { + if let Some((root, input)) = input_children.get(child_info) { + // Layout h is the only supported one at the time being + if root.len() != root_hash.as_ref().len() { + return Ok(false); + } + root_hash.as_mut().copy_from_slice(&root[..]); + if let Err(_) = trie_db::proof::verify_proof::( + &root_hash, + &nodes[..], + input.iter(), + ) { + return Ok(false); + } + } else { + return Err(missing_verify_input()); + } + } + Ok(true) + } else { + Err(missing_pack_input()) + } + } +} diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs new file mode 100644 index 0000000000000..64db91418be69 --- /dev/null +++ b/primitives/trie/src/storage_proof/simple.rs @@ -0,0 +1,193 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ! Trie storage proofs that are a simple collection of encoded nodes. + +use super::*; +use codec::{Encode, Decode}; +use sp_storage::ChildInfoProof; + +/// A collection on encoded trie nodes. +pub type ProofNodes = Vec>; + +/// Single flattened proof, all default child trie are flattened over a same +/// container, no child trie information is provided. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct Flat(pub(crate) ProofNodes); + +/// Compacted proof with child trie organisation. +/// +/// This is taking more space than the flat variant.but +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct Full(pub(crate) ChildrenProofMap); + +impl Flat { + /// Access to inner proof node, + /// mainly needed for part of the + /// code that is not generic or test. + pub fn into_nodes(self) -> ProofNodes { + self.0 + } + /// Instantiate from inner proof node, + /// mainly needed for part of the + /// code that is not generic or test. + pub fn from_nodes(nodes: ProofNodes) -> Self { + Flat(nodes) + } +} + +impl Common for Flat { + fn empty() -> Self { + Flat(Default::default()) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl Common for Full { + fn empty() -> Self { + Full(Default::default()) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl Mergeable for Flat { + fn merge(proofs: I) -> Self where I: IntoIterator { + let mut unique_set = BTreeSet::>::default(); + for proof in proofs { + unique_set.extend(proof.0); + } + Flat(unique_set.into_iter().collect()) + } +} + +impl Mergeable for Full { + fn merge(proofs: I) -> Self where I: IntoIterator { + let mut child_sets = ChildrenProofMap::>>::default(); + for children in proofs { + for (child_info, child) in children.0.into_iter() { + let set = child_sets.entry(child_info).or_default(); + set.extend(child); + } + } + Full(ChildrenProofMap(child_sets + .into_iter() + .map(|(child_info, set)| (child_info, set.into_iter().collect())) + .collect())) + } +} + +impl Recordable for Flat { + const INPUT_KIND: InputKind = InputKind::None; + + type RecordBackend = super::FlatRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, _input: Input) -> Result { + let trie_nodes = recorder.0 + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + Ok(Flat(trie_nodes)) + } +} + +impl Recordable for Full { + const INPUT_KIND: InputKind = InputKind::None; + + type RecordBackend = super::FullRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, _input: Input) -> Result { + let mut result = ChildrenProofMap::default(); + for (child_info, set) in recorder.0.iter() { + let trie_nodes: Vec> = set + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + result.insert(child_info.proof_info(), trie_nodes); + } + Ok(Full(result)) + } +} + +impl BackendProof for Flat { + type ProofRaw = Self; + + fn into_partial_db(self) -> Result> { + use hash_db::HashDB; + let mut db = MemoryDB::default(); + for item in self.0.into_iter() { + db.insert(hash_db::EMPTY_PREFIX, &item[..]); + } + Ok(db) + } +} + +impl BackendProof for Full { + type ProofRaw = Self; + + fn into_partial_db(self) -> Result> { + use hash_db::HashDB; + let mut db = MemoryDB::default(); + for (_child_info, proof) in self.0.into_iter() { + for item in proof.into_iter() { + db.insert(hash_db::EMPTY_PREFIX, &item); + } + } + + Ok(db) + } +} + +impl FullBackendProof for Full { + fn into_partial_full_db(self) -> Result>> { + use hash_db::HashDB; + let mut result = ChildrenProofMap::default(); + for (child_info, proof) in self.0.into_iter() { + let mut db = MemoryDB::default(); + for item in proof.into_iter() { + db.insert(hash_db::EMPTY_PREFIX, &item); + } + result.insert(child_info, db); + } + Ok(result) + } +} + +// Note that this implementation is only possible +// as long as we only have default child trie which +// can be flattened into top trie storage. +impl Into for Full { + fn into(self) -> Flat { + let mut unique_set = BTreeSet::>::default(); + for (child_info, nodes) in self.0 { + assert!(matches!(child_info, ChildInfoProof::Default(..))); + unique_set.extend(nodes); + } + Flat(unique_set.into_iter().collect()) + } +} + +impl Into for Flat { + fn into(self) -> Full { + let mut result = ChildrenProofMap::default(); + result.insert(ChildInfoProof::top_trie(), self.0); + Full(result) + } +} + +#[test] +fn flat_encoding_compatible() { + let nodes = ProofNodes::from([vec![1u8], vec![2u8, 3u8]]); + let flat = Flat::from_nodes(nodes.clone()); + assert_eq!(nodes.encode(), flat.encode()); +}