Skip to content

Commit

Permalink
Fix the undeterministic storage proof recorded for the same execution (
Browse files Browse the repository at this point in the history
…paritytech#10915)

* Add a test case for the determinism of recorded proof

* Replace HashMap with BTreeMap for the actual proof records

* cargo +nightly fmt --all

* Store the trie nodes in BTreeSet for StorageProof

* Nit

* Revert the BTreeMap changes and sort when converting to storage proof

* Remove PartialEq from StorageProof

* Remove unnecessary change

* Add `compare` method to StorageProof

* FMT

* Dummy change to trigger CI

* Use `BTreeSet` for StorageProof and keep using `Vec` for CompactProof

* Update comment on `iter_nodes`

* Revert `PartialEq` removal
  • Loading branch information
liuchengxu authored and ark0f committed Feb 27, 2023
1 parent caec190 commit 9bb3f29
Show file tree
Hide file tree
Showing 5 changed files with 65 additions and 32 deletions.
5 changes: 3 additions & 2 deletions client/executor/src/native_executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -462,8 +462,9 @@ impl RuntimeSpawn for RuntimeInstanceSpawn {
// https://github.com/paritytech/substrate/issues/7354
let mut instance = match module.new_instance() {
Ok(instance) => instance,
Err(error) =>
panic!("failed to create new instance from module: {}", error),
Err(error) => {
panic!("failed to create new instance from module: {}", error)
},
};

match instance
Expand Down
10 changes: 6 additions & 4 deletions client/offchain/src/api/http.rs
Original file line number Diff line number Diff line change
Expand Up @@ -432,8 +432,9 @@ impl HttpApi {
);
},
None => {}, // can happen if we detected an IO error when sending the body
_ =>
tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker"),
_ => {
tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker")
},
}
},

Expand All @@ -443,8 +444,9 @@ impl HttpApi {
self.requests.insert(id, HttpApiRequest::Fail(error));
},
None => {}, // can happen if we detected an IO error when sending the body
_ =>
tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker"),
_ => {
tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker")
},
},

None => {
Expand Down
49 changes: 39 additions & 10 deletions primitives/state-machine/src/proving_backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,15 +158,13 @@ impl<Hash: std::hash::Hash + Eq> ProofRecorder<Hash> {

/// Convert into a [`StorageProof`].
pub fn to_storage_proof(&self) -> StorageProof {
let trie_nodes = self
.inner
.read()
.records
.iter()
.filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec()))
.collect();

StorageProof::new(trie_nodes)
StorageProof::new(
self.inner
.read()
.records
.iter()
.filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())),
)
}

/// Reset the internal state.
Expand Down Expand Up @@ -393,6 +391,7 @@ mod tests {
proving_backend::create_proof_check_backend, trie_backend::tests::test_trie,
InMemoryBackend,
};
use sp_core::H256;
use sp_runtime::traits::BlakeTwo256;
use sp_trie::PrefixedMemoryDB;

Expand Down Expand Up @@ -426,7 +425,6 @@ mod tests {

#[test]
fn proof_is_invalid_when_does_not_contains_root() {
use sp_core::H256;
let result = create_proof_check_backend::<BlakeTwo256>(
H256::from_low_u64_be(1),
StorageProof::empty(),
Expand Down Expand Up @@ -582,4 +580,35 @@ mod tests {
assert!(backend.storage(b"doesnotexist2").unwrap().is_none());
check_estimation(&backend);
}

#[test]
fn proof_recorded_for_same_execution_should_be_deterministic() {
let storage_changes = vec![
(H256::random(), Some(b"value1".to_vec())),
(H256::random(), Some(b"value2".to_vec())),
(H256::random(), Some(b"value3".to_vec())),
(H256::random(), Some(b"value4".to_vec())),
(H256::random(), Some(b"value5".to_vec())),
(H256::random(), Some(b"value6".to_vec())),
(H256::random(), Some(b"value7".to_vec())),
(H256::random(), Some(b"value8".to_vec())),
];

let proof_recorder =
ProofRecorder::<H256> { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) };
storage_changes
.clone()
.into_iter()
.for_each(|(key, val)| proof_recorder.record(key, val));
let proof1 = proof_recorder.to_storage_proof();

let proof_recorder =
ProofRecorder::<H256> { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) };
storage_changes
.into_iter()
.for_each(|(key, val)| proof_recorder.record(key, val));
let proof2 = proof_recorder.to_storage_proof();

assert_eq!(proof1, proof2);
}
}
31 changes: 16 additions & 15 deletions primitives/trie/src/storage_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
use codec::{Decode, Encode};
use hash_db::{HashDB, Hasher};
use scale_info::TypeInfo;
use sp_std::vec::Vec;
use sp_std::{collections::btree_set::BTreeSet, iter::IntoIterator, vec::Vec};
// Note that `LayoutV1` usage here (proof compaction) is compatible
// with `LayoutV0`.
use crate::LayoutV1 as Layout;
Expand All @@ -32,36 +32,36 @@ use crate::LayoutV1 as Layout;
/// the serialized nodes and performing the key lookups.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)]
pub struct StorageProof {
trie_nodes: Vec<Vec<u8>>,
trie_nodes: BTreeSet<Vec<u8>>,
}

impl StorageProof {
/// Constructs a storage proof from a subset of encoded trie nodes in a storage backend.
pub fn new(trie_nodes: Vec<Vec<u8>>) -> Self {
StorageProof { trie_nodes }
pub fn new(trie_nodes: impl IntoIterator<Item = Vec<u8>>) -> Self {
StorageProof { trie_nodes: BTreeSet::from_iter(trie_nodes) }
}

/// Returns a new empty proof.
///
/// An empty proof is capable of only proving trivial statements (ie. that an empty set of
/// key-value pairs exist in storage).
pub fn empty() -> Self {
StorageProof { trie_nodes: Vec::new() }
StorageProof { trie_nodes: BTreeSet::new() }
}

/// Returns whether this is an empty proof.
pub fn is_empty(&self) -> bool {
self.trie_nodes.is_empty()
}

/// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed
/// to be traversed in any particular order.
/// Create an iterator over encoded trie nodes in lexicographical order constructed
/// from the proof.
pub fn iter_nodes(self) -> StorageProofNodeIterator {
StorageProofNodeIterator::new(self)
}

/// Convert into plain node vector.
pub fn into_nodes(self) -> Vec<Vec<u8>> {
pub fn into_nodes(self) -> BTreeSet<Vec<u8>> {
self.trie_nodes
}

Expand Down Expand Up @@ -138,12 +138,13 @@ impl CompactProof {
expected_root,
)?;
Ok((
StorageProof::new(
db.drain()
.into_iter()
.filter_map(|kv| if (kv.1).1 > 0 { Some((kv.1).0) } else { None })
.collect(),
),
StorageProof::new(db.drain().into_iter().filter_map(|kv| {
if (kv.1).1 > 0 {
Some((kv.1).0)
} else {
None
}
})),
root,
))
}
Expand Down Expand Up @@ -171,7 +172,7 @@ impl CompactProof {
/// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to
/// be traversed in any particular order.
pub struct StorageProofNodeIterator {
inner: <Vec<Vec<u8>> as IntoIterator>::IntoIter,
inner: <BTreeSet<Vec<u8>> as IntoIterator>::IntoIter,
}

impl StorageProofNodeIterator {
Expand Down
2 changes: 1 addition & 1 deletion primitives/trie/src/trie_stream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ use hash_db::Hasher;
use sp_std::vec::Vec;
use trie_root;

#[derive(Default, Clone)]
/// Codec-flavored TrieStream.
#[derive(Default, Clone)]
pub struct TrieStream {
/// Current node buffer.
buffer: Vec<u8>,
Expand Down

0 comments on commit 9bb3f29

Please sign in to comment.