Skip to content

Commit

Permalink
Ensure key ownership proof is optimal (paritytech#4699)
Browse files Browse the repository at this point in the history
Ensure that the key ownership proof doesn't contain duplicate or
unneeded nodes.

We already have these checks for the bridge messages proof. Just making
them more generic and performing them also for the key ownership proof.

---------

Co-authored-by: Adrian Catangiu <adrian@parity.io>
  • Loading branch information
2 people authored and TarekkMA committed Aug 2, 2024
1 parent 437f1a1 commit 7bfaeb3
Show file tree
Hide file tree
Showing 8 changed files with 366 additions and 144 deletions.
7 changes: 5 additions & 2 deletions bridges/bin/runtime-common/src/messages.rs
Original file line number Diff line number Diff line change
Expand Up @@ -395,6 +395,7 @@ mod tests {
use codec::Encode;
use sp_core::H256;
use sp_runtime::traits::Header as _;
use sp_trie::accessed_nodes_tracker::Error as AccessedNodesTrackerError;

#[test]
fn verify_chain_message_rejects_message_with_too_large_declared_weight() {
Expand Down Expand Up @@ -541,7 +542,7 @@ mod tests {
target::verify_messages_proof::<OnThisChainBridge>(proof, 10)
},),
Err(VerificationError::HeaderChain(HeaderChainError::StorageProof(
StorageProofError::DuplicateNodesInProof
StorageProofError::StorageProof(sp_trie::StorageProofError::DuplicateNodes.into())
))),
);
}
Expand All @@ -553,7 +554,9 @@ mod tests {
proof.storage_proof.push(vec![42]);
target::verify_messages_proof::<OnThisChainBridge>(proof, 10)
},),
Err(VerificationError::StorageProof(StorageProofError::UnusedNodesInTheProof)),
Err(VerificationError::StorageProof(StorageProofError::AccessedNodesTracker(
AccessedNodesTrackerError::UnusedNodes.into()
))),
);
}

Expand Down
97 changes: 34 additions & 63 deletions bridges/primitives/runtime/src/storage_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,16 @@ use codec::{Decode, Encode};
use frame_support::PalletError;
use hash_db::{HashDB, Hasher, EMPTY_PREFIX};
use scale_info::TypeInfo;
use sp_std::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec};
use sp_std::{boxed::Box, vec::Vec};
pub use sp_trie::RawStorageProof;
use sp_trie::{
read_trie_value, LayoutV1, MemoryDB, Recorder, StorageProof, Trie, TrieConfiguration,
accessed_nodes_tracker::{AccessedNodesTracker, Error as AccessedNodesTrackerError},
read_trie_value,
recorder_ext::RecorderExt,
LayoutV1, MemoryDB, Recorder, StorageProof, StorageProofError, Trie, TrieConfiguration,
TrieDBBuilder, TrieError, TrieHash,
};

/// Raw storage proof type (just raw trie nodes).
pub type RawStorageProof = Vec<Vec<u8>>;

/// Storage proof size requirements.
///
/// This is currently used by benchmarks when generating storage proofs.
Expand All @@ -51,10 +52,9 @@ pub struct StorageProofChecker<H>
where
H: Hasher,
{
proof_nodes_count: usize,
root: H::Out,
db: MemoryDB<H>,
recorder: Recorder<LayoutV1<H>>,
accessed_nodes_tracker: AccessedNodesTracker<H::Out>,
}

impl<H> StorageProofChecker<H>
Expand All @@ -65,52 +65,39 @@ where
///
/// This returns an error if the given proof is invalid with respect to the given root.
pub fn new(root: H::Out, proof: RawStorageProof) -> Result<Self, Error> {
// 1. we don't want extra items in the storage proof
// 2. `StorageProof` is storing all trie nodes in the `BTreeSet`
//
// => someone could simply add duplicate items to the proof and we won't be
// able to detect that by just using `StorageProof`
//
// => let's check it when we are converting our "raw proof" into `StorageProof`
let proof_nodes_count = proof.len();
let proof = StorageProof::new(proof);
if proof_nodes_count != proof.iter_nodes().count() {
return Err(Error::DuplicateNodesInProof)
}
let proof = StorageProof::new_with_duplicate_nodes_check(proof)
.map_err(|e| Error::StorageProof(e.into()))?;

let recorder = AccessedNodesTracker::new(proof.len());

let db = proof.into_memory_db();
if !db.contains(&root, EMPTY_PREFIX) {
return Err(Error::StorageRootMismatch)
}

let recorder = Recorder::default();
let checker = StorageProofChecker { proof_nodes_count, root, db, recorder };
Ok(checker)
Ok(StorageProofChecker { root, db, accessed_nodes_tracker: recorder })
}

/// Returns error if the proof has some nodes that are left intact by previous `read_value`
/// calls.
pub fn ensure_no_unused_nodes(mut self) -> Result<(), Error> {
let visited_nodes = self
.recorder
.drain()
.into_iter()
.map(|record| record.data)
.collect::<BTreeSet<_>>();
let visited_nodes_count = visited_nodes.len();
if self.proof_nodes_count == visited_nodes_count {
Ok(())
} else {
Err(Error::UnusedNodesInTheProof)
}
pub fn ensure_no_unused_nodes(self) -> Result<(), Error> {
self.accessed_nodes_tracker
.ensure_no_unused_nodes()
.map_err(|e| Error::AccessedNodesTracker(e.into()))
}

/// Reads a value from the available subset of storage. If the value cannot be read due to an
/// incomplete or otherwise invalid proof, this function returns an error.
pub fn read_value(&mut self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
// LayoutV1 or LayoutV0 is identical for proof that only read values.
read_trie_value::<LayoutV1<H>, _>(&self.db, &self.root, key, Some(&mut self.recorder), None)
.map_err(|_| Error::StorageValueUnavailable)
read_trie_value::<LayoutV1<H>, _>(
&self.db,
&self.root,
key,
Some(&mut self.accessed_nodes_tracker),
None,
)
.map_err(|_| Error::StorageValueUnavailable)
}

/// Reads and decodes a value from the available subset of storage. If the value cannot be read
Expand Down Expand Up @@ -145,10 +132,10 @@ where
/// Storage proof related errors.
#[derive(Encode, Decode, Clone, Eq, PartialEq, PalletError, Debug, TypeInfo)]
pub enum Error {
/// Duplicate trie nodes are found in the proof.
DuplicateNodesInProof,
/// Unused trie nodes are found in the proof.
UnusedNodesInTheProof,
/// Error generated by the `AccessedNodesTrackerError`.
AccessedNodesTracker(StrippableError<AccessedNodesTrackerError>),
/// Error originating in the `storage_proof` module.
StorageProof(StrippableError<StorageProofError>),
/// Expected storage root is missing from the proof.
StorageRootMismatch,
/// Unable to reach expected storage value using provided trie nodes.
Expand Down Expand Up @@ -202,15 +189,7 @@ where
trie.get(&key)?;
}

// recorder may record the same trie node multiple times and we don't want duplicate nodes
// in our proofs => let's deduplicate it by collecting to the BTreeSet first
Ok(recorder
.drain()
.into_iter()
.map(|n| n.data.to_vec())
.collect::<BTreeSet<_>>()
.into_iter()
.collect())
Ok(recorder.into_raw_storage_proof())
}

#[cfg(test)]
Expand Down Expand Up @@ -243,30 +222,22 @@ pub mod tests {
);
}

#[test]
fn proof_with_duplicate_items_is_rejected() {
let (root, mut proof) = craft_valid_storage_proof();
proof.push(proof.first().unwrap().clone());

assert_eq!(
StorageProofChecker::<sp_core::Blake2Hasher>::new(root, proof).map(drop),
Err(Error::DuplicateNodesInProof),
);
}

#[test]
fn proof_with_unused_items_is_rejected() {
let (root, proof) = craft_valid_storage_proof();

let mut checker =
StorageProofChecker::<sp_core::Blake2Hasher>::new(root, proof.clone()).unwrap();
checker.read_value(b"key1").unwrap();
checker.read_value(b"key1").unwrap().unwrap();
checker.read_value(b"key2").unwrap();
checker.read_value(b"key4").unwrap();
checker.read_value(b"key22").unwrap();
assert_eq!(checker.ensure_no_unused_nodes(), Ok(()));

let checker = StorageProofChecker::<sp_core::Blake2Hasher>::new(root, proof).unwrap();
assert_eq!(checker.ensure_no_unused_nodes(), Err(Error::UnusedNodesInTheProof));
assert_eq!(
checker.ensure_no_unused_nodes(),
Err(Error::AccessedNodesTracker(AccessedNodesTrackerError::UnusedNodes.into()))
);
}
}
112 changes: 55 additions & 57 deletions substrate/frame/session/src/historical/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,22 @@ use sp_runtime::{
};
use sp_session::{MembershipProof, ValidatorCount};
use sp_staking::SessionIndex;
use sp_std::prelude::*;
use sp_std::{fmt::Debug, prelude::*};
use sp_trie::{
trie_types::{TrieDBBuilder, TrieDBMutBuilderV0},
LayoutV0, MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX,
LayoutV0, MemoryDB, Recorder, StorageProof, Trie, TrieMut, TrieRecorder,
};

use frame_support::{
print,
traits::{KeyOwnerProofSystem, ValidatorSet, ValidatorSetWithIdentification},
Parameter,
Parameter, LOG_TARGET,
};

use crate::{self as pallet_session, Pallet as Session};

pub use pallet::*;
use sp_trie::{accessed_nodes_tracker::AccessedNodesTracker, recorder_ext::RecorderExt};

#[frame_support::pallet]
pub mod pallet {
Expand Down Expand Up @@ -118,6 +119,16 @@ impl<T: Config> Pallet<T> {
}
})
}

fn full_id_validators() -> Vec<(T::ValidatorId, T::FullIdentification)> {
<Session<T>>::validators()
.into_iter()
.filter_map(|validator| {
T::FullIdentificationOf::convert(validator.clone())
.map(|full_id| (validator, full_id))
})
.collect::<Vec<_>>()
}
}

impl<T: Config> ValidatorSet<T::AccountId> for Pallet<T> {
Expand Down Expand Up @@ -264,46 +275,34 @@ impl<T: Config> ProvingTrie<T> {
Ok(ProvingTrie { db, root })
}

fn from_nodes(root: T::Hash, nodes: &[Vec<u8>]) -> Self {
use sp_trie::HashDBT;

let mut memory_db = MemoryDB::default();
for node in nodes {
HashDBT::insert(&mut memory_db, EMPTY_PREFIX, &node[..]);
}

ProvingTrie { db: memory_db, root }
fn from_proof(root: T::Hash, proof: StorageProof) -> Self {
ProvingTrie { db: proof.into_memory_db(), root }
}

/// Prove the full verification data for a given key and key ID.
pub fn prove(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option<Vec<Vec<u8>>> {
let mut recorder = Recorder::<LayoutV0<T::Hashing>>::new();
{
let trie =
TrieDBBuilder::new(&self.db, &self.root).with_recorder(&mut recorder).build();
let val_idx = (key_id, key_data).using_encoded(|s| {
trie.get(s).ok()?.and_then(|raw| u32::decode(&mut &*raw).ok())
})?;

val_idx.using_encoded(|s| {
trie.get(s)
.ok()?
.and_then(|raw| <IdentificationTuple<T>>::decode(&mut &*raw).ok())
})?;
}
self.query(key_id, key_data, Some(&mut recorder));

Some(recorder.drain().into_iter().map(|r| r.data).collect())
Some(recorder.into_raw_storage_proof())
}

/// Access the underlying trie root.
pub fn root(&self) -> &T::Hash {
&self.root
}

// Check a proof contained within the current memory-db. Returns `None` if the
// nodes within the current `MemoryDB` are insufficient to query the item.
fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option<IdentificationTuple<T>> {
let trie = TrieDBBuilder::new(&self.db, &self.root).build();
/// Search for a key inside the proof.
fn query(
&self,
key_id: KeyTypeId,
key_data: &[u8],
recorder: Option<&mut dyn TrieRecorder<T::Hash>>,
) -> Option<IdentificationTuple<T>> {
let trie = TrieDBBuilder::new(&self.db, &self.root)
.with_optional_recorder(recorder)
.build();

let val_idx = (key_id, key_data)
.using_encoded(|s| trie.get(s))
.ok()?
Expand All @@ -322,13 +321,7 @@ impl<T: Config, D: AsRef<[u8]>> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet<T

fn prove(key: (KeyTypeId, D)) -> Option<Self::Proof> {
let session = <Session<T>>::current_index();
let validators = <Session<T>>::validators()
.into_iter()
.filter_map(|validator| {
T::FullIdentificationOf::convert(validator.clone())
.map(|full_id| (validator, full_id))
})
.collect::<Vec<_>>();
let validators = Self::full_id_validators();

let count = validators.len() as ValidatorCount;

Expand All @@ -343,30 +336,35 @@ impl<T: Config, D: AsRef<[u8]>> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet<T
}

fn check_proof(key: (KeyTypeId, D), proof: Self::Proof) -> Option<IdentificationTuple<T>> {
let (id, data) = key;

if proof.session == <Session<T>>::current_index() {
<Session<T>>::key_owner(id, data.as_ref()).and_then(|owner| {
T::FullIdentificationOf::convert(owner.clone()).and_then(move |id| {
let count = <Session<T>>::validators().len() as ValidatorCount;

if count != proof.validator_count {
return None
}
fn print_error<E: Debug>(e: E) {
log::error!(
target: LOG_TARGET,
"Rejecting equivocation report because of key ownership proof error: {:?}", e
);
}

Some((owner, id))
})
})
let (id, data) = key;
let (root, count) = if proof.session == <Session<T>>::current_index() {
let validators = Self::full_id_validators();
let count = validators.len() as ValidatorCount;
let trie = ProvingTrie::<T>::generate_for(validators).ok()?;
(trie.root, count)
} else {
let (root, count) = <HistoricalSessions<T>>::get(&proof.session)?;

if count != proof.validator_count {
return None
}
<HistoricalSessions<T>>::get(&proof.session)?
};

let trie = ProvingTrie::<T>::from_nodes(root, &proof.trie_nodes);
trie.query(id, data.as_ref())
if count != proof.validator_count {
return None
}

let proof = StorageProof::new_with_duplicate_nodes_check(proof.trie_nodes)
.map_err(print_error)
.ok()?;
let mut accessed_nodes_tracker = AccessedNodesTracker::<T::Hash>::new(proof.len());
let trie = ProvingTrie::<T>::from_proof(root, proof);
let res = trie.query(id, data.as_ref(), Some(&mut accessed_nodes_tracker))?;
accessed_nodes_tracker.ensure_no_unused_nodes().map_err(print_error).ok()?;
Some(res)
}
}

Expand Down
Loading

0 comments on commit 7bfaeb3

Please sign in to comment.