Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
add additional assurances to create_inherent (#4349)
Browse files Browse the repository at this point in the history
* minor: move checks into separate fn

* add additional validity checks

* simplify shuffling

* Closes potential OOB weight

* improve docs

* fooo

* remove obsolete comment

* move filtering into the rollback-transaction

Technically this is not necessary but avoids future footguns.

* move check up and avoid duplicate checks

* refactor: make sure backed candidates are sane, even more

* doc wording

Co-authored-by: Zeke Mostov <z.mostov@gmail.com>

* refactor: avoid const generics for sake of wasm size

`true` -> `FullCheck::Skip`, `false` -> `FullCheck::Yes`.

* chore: unify `CandidateCheckContext` instance names

* refactor: introduce `IndexedRetain` for `Vec<T>`

* chore: make tests prefix free

* doc: re-introduce removed comment

* refactor: remove another const generic to save some wasm size

Co-authored-by: Zeke Mostov <z.mostov@gmail.com>
  • Loading branch information
2 people authored and ordian committed Nov 26, 2021
1 parent a4fb3d6 commit 072019b
Show file tree
Hide file tree
Showing 3 changed files with 446 additions and 319 deletions.
5 changes: 3 additions & 2 deletions roadmap/implementers-guide/src/runtime/inclusion.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,21 +51,22 @@ All failed checks should lead to an unrecoverable error making the block invalid
1. For each applied bit of each availability-bitfield, set the bit for the validator in the `CandidatePendingAvailability`'s `availability_votes` bitfield. Track all candidates that now have >2/3 of bits set in their `availability_votes`. These candidates are now available and can be enacted.
1. For all now-available candidates, invoke the `enact_candidate` routine with the candidate and relay-parent number.
1. Return a list of `(CoreIndex, CandidateHash)` from freed cores consisting of the cores where candidates have become available.
* `sanitize_bitfields<T: crate::inclusion::Config, const CHECK_SIGS: bool>(
* `sanitize_bitfields<T: crate::inclusion::Config>(
unchecked_bitfields: UncheckedSignedAvailabilityBitfields,
disputed_bitfield: DisputedBitfield,
expected_bits: usize,
parent_hash: T::Hash,
session_index: SessionIndex,
validators: &[ValidatorId],
full_check: FullCheck,
)`:
1. check that `disputed_bitfield` has the same number of bits as the `expected_bits`, iff not return early with an empty vec.
1. each of the below checks is for each bitfield. If a check does not pass the bitfield will be skipped.
1. check that there are no bits set that reference a disputed candidate.
1. check that the number of bits is equal to `expected_bits`.
1. check that the validator index is strictly increasing (and thus also unique).
1. check that the validator bit index is not out of bounds.
1. check the validators signature, iff `CHECK_SIGS=true`.
1. check the validators signature, iff `full_check=FullCheck::Yes`.

* `sanitize_backed_candidates<T: crate::inclusion::Config, F: Fn(CandidateHash) -> bool>(
relay_parent: T::Hash,
Expand Down
173 changes: 116 additions & 57 deletions runtime/parachains/src/inclusion.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

use crate::{
configuration, disputes, dmp, hrmp, paras,
paras_inherent::{sanitize_bitfields, DisputedBitfield, VERIFY_SIGS},
paras_inherent::{sanitize_bitfields, DisputedBitfield},
scheduler::CoreAssignment,
shared, ump,
};
Expand Down Expand Up @@ -56,6 +56,19 @@ pub struct AvailabilityBitfieldRecord<N> {
submitted_at: N, // for accounting, as meaning of bits may change over time.
}

/// Determines if all checks should be applied or if a subset was already completed
/// in a code path that will be executed afterwards or was already executed before.
#[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)]
pub(crate) enum FullCheck {
/// Yes, do a full check, skip nothing.
Yes,
/// Skip a subset of checks that are already completed before.
///
/// Attention: Should only be used when absolutely sure that the required
/// checks are completed before.
Skip,
}

/// A backed candidate pending availability.
#[derive(Encode, Decode, PartialEq, TypeInfo)]
#[cfg_attr(test, derive(Debug, Default))]
Expand Down Expand Up @@ -403,13 +416,14 @@ impl<T: Config> Pallet<T> {
let session_index = shared::Pallet::<T>::session_index();
let parent_hash = frame_system::Pallet::<T>::parent_hash();

let checked_bitfields = sanitize_bitfields::<T, VERIFY_SIGS>(
let checked_bitfields = sanitize_bitfields::<T>(
signed_bitfields,
disputed_bitfield,
expected_bits,
parent_hash,
session_index,
&validators[..],
FullCheck::Yes,
);

let freed_cores = Self::update_pending_availability_and_get_freed_cores::<_, true>(
Expand All @@ -427,12 +441,16 @@ impl<T: Config> Pallet<T> {
///
/// Both should be sorted ascending by core index, and the candidates should be a subset of
/// scheduled cores. If these conditions are not met, the execution of the function fails.
pub(crate) fn process_candidates(
pub(crate) fn process_candidates<GV>(
parent_storage_root: T::Hash,
candidates: Vec<BackedCandidate<T::Hash>>,
scheduled: Vec<CoreAssignment>,
group_validators: impl Fn(GroupIndex) -> Option<Vec<ValidatorIndex>>,
) -> Result<ProcessedCandidates<T::Hash>, DispatchError> {
group_validators: GV,
full_check: FullCheck,
) -> Result<ProcessedCandidates<T::Hash>, DispatchError>
where
GV: Fn(GroupIndex) -> Option<Vec<ValidatorIndex>>,
{
ensure!(candidates.len() <= scheduled.len(), Error::<T>::UnscheduledCandidate);

if scheduled.is_empty() {
Expand All @@ -446,7 +464,7 @@ impl<T: Config> Pallet<T> {
// before of the block where we include a candidate (i.e. this code path).
let now = <frame_system::Pallet<T>>::block_number();
let relay_parent_number = now - One::one();
let check_cx = CandidateCheckContext::<T>::new(now, relay_parent_number);
let check_ctx = CandidateCheckContext::<T>::new(now, relay_parent_number);

// Collect candidate receipts with backers.
let mut candidate_receipt_with_backing_validator_indices =
Expand Down Expand Up @@ -481,54 +499,20 @@ impl<T: Config> Pallet<T> {
//
// In the meantime, we do certain sanity checks on the candidates and on the scheduled
// list.
'a: for (candidate_idx, backed_candidate) in candidates.iter().enumerate() {
'next_backed_candidate: for (candidate_idx, backed_candidate) in
candidates.iter().enumerate()
{
if let FullCheck::Yes = full_check {
check_ctx.verify_backed_candidate(
parent_hash,
candidate_idx,
backed_candidate,
)?;
}

let para_id = backed_candidate.descriptor().para_id;
let mut backers = bitvec::bitvec![BitOrderLsb0, u8; 0; validators.len()];

// we require that the candidate is in the context of the parent block.
ensure!(
backed_candidate.descriptor().relay_parent == parent_hash,
Error::<T>::CandidateNotInParentContext,
);
ensure!(
backed_candidate.descriptor().check_collator_signature().is_ok(),
Error::<T>::NotCollatorSigned,
);

let validation_code_hash =
<paras::Pallet<T>>::validation_code_hash_at(para_id, now, None)
// A candidate for a parachain without current validation code is not scheduled.
.ok_or_else(|| Error::<T>::UnscheduledCandidate)?;
ensure!(
backed_candidate.descriptor().validation_code_hash == validation_code_hash,
Error::<T>::InvalidValidationCodeHash,
);

ensure!(
backed_candidate.descriptor().para_head ==
backed_candidate.candidate.commitments.head_data.hash(),
Error::<T>::ParaHeadMismatch,
);

if let Err(err) = check_cx.check_validation_outputs(
para_id,
&backed_candidate.candidate.commitments.head_data,
&backed_candidate.candidate.commitments.new_validation_code,
backed_candidate.candidate.commitments.processed_downward_messages,
&backed_candidate.candidate.commitments.upward_messages,
T::BlockNumber::from(backed_candidate.candidate.commitments.hrmp_watermark),
&backed_candidate.candidate.commitments.horizontal_messages,
) {
log::debug!(
target: LOG_TARGET,
"Validation outputs checking during inclusion of a candidate {} for parachain `{}` failed: {:?}",
candidate_idx,
u32::from(para_id),
err,
);
Err(err.strip_into_dispatch_err::<T>())?;
};

for (i, assignment) in scheduled[skip..].iter().enumerate() {
check_assignment_in_order(assignment)?;

Expand Down Expand Up @@ -631,7 +615,7 @@ impl<T: Config> Pallet<T> {
backers,
assignment.group_idx,
));
continue 'a
continue 'next_backed_candidate
}
}

Expand Down Expand Up @@ -682,7 +666,7 @@ impl<T: Config> Pallet<T> {
availability_votes,
relay_parent_number,
backers: backers.to_bitvec(),
backed_in_number: check_cx.now,
backed_in_number: check_ctx.now,
backing_group: group,
},
);
Expand All @@ -704,9 +688,9 @@ impl<T: Config> Pallet<T> {
// `relay_parent_number` is equal to `now`.
let now = <frame_system::Pallet<T>>::block_number();
let relay_parent_number = now;
let check_cx = CandidateCheckContext::<T>::new(now, relay_parent_number);
let check_ctx = CandidateCheckContext::<T>::new(now, relay_parent_number);

if let Err(err) = check_cx.check_validation_outputs(
if let Err(err) = check_ctx.check_validation_outputs(
para_id,
&validation_outputs.head_data,
&validation_outputs.new_validation_code,
Expand Down Expand Up @@ -941,17 +925,78 @@ impl<BlockNumber> AcceptanceCheckErr<BlockNumber> {
}

/// A collection of data required for checking a candidate.
struct CandidateCheckContext<T: Config> {
pub(crate) struct CandidateCheckContext<T: Config> {
config: configuration::HostConfiguration<T::BlockNumber>,
now: T::BlockNumber,
relay_parent_number: T::BlockNumber,
}

impl<T: Config> CandidateCheckContext<T> {
fn new(now: T::BlockNumber, relay_parent_number: T::BlockNumber) -> Self {
pub(crate) fn new(now: T::BlockNumber, relay_parent_number: T::BlockNumber) -> Self {
Self { config: <configuration::Pallet<T>>::config(), now, relay_parent_number }
}

/// Execute verification of the candidate.
///
/// Assures:
/// * correct expected relay parent reference
/// * collator signature check passes
/// * code hash of commitments matches current code hash
/// * para head in the descriptor and commitments match
pub(crate) fn verify_backed_candidate(
&self,
parent_hash: <T as frame_system::Config>::Hash,
candidate_idx: usize,
backed_candidate: &BackedCandidate<<T as frame_system::Config>::Hash>,
) -> Result<(), Error<T>> {
let para_id = backed_candidate.descriptor().para_id;
let now = self.now;

// we require that the candidate is in the context of the parent block.
ensure!(
backed_candidate.descriptor().relay_parent == parent_hash,
Error::<T>::CandidateNotInParentContext,
);
ensure!(
backed_candidate.descriptor().check_collator_signature().is_ok(),
Error::<T>::NotCollatorSigned,
);

let validation_code_hash = <paras::Pallet<T>>::validation_code_hash_at(para_id, now, None)
// A candidate for a parachain without current validation code is not scheduled.
.ok_or_else(|| Error::<T>::UnscheduledCandidate)?;
ensure!(
backed_candidate.descriptor().validation_code_hash == validation_code_hash,
Error::<T>::InvalidValidationCodeHash,
);

ensure!(
backed_candidate.descriptor().para_head ==
backed_candidate.candidate.commitments.head_data.hash(),
Error::<T>::ParaHeadMismatch,
);

if let Err(err) = self.check_validation_outputs(
para_id,
&backed_candidate.candidate.commitments.head_data,
&backed_candidate.candidate.commitments.new_validation_code,
backed_candidate.candidate.commitments.processed_downward_messages,
&backed_candidate.candidate.commitments.upward_messages,
T::BlockNumber::from(backed_candidate.candidate.commitments.hrmp_watermark),
&backed_candidate.candidate.commitments.horizontal_messages,
) {
log::debug!(
target: LOG_TARGET,
"Validation outputs checking during inclusion of a candidate {} for parachain `{}` failed: {:?}",
candidate_idx,
u32::from(para_id),
err,
);
Err(err.strip_into_dispatch_err::<T>())?;
};
Ok(())
}

/// Check the given outputs after candidate validation on whether it passes the acceptance
/// criteria.
fn check_validation_outputs(
Expand Down Expand Up @@ -1935,6 +1980,7 @@ pub(crate) mod tests {
vec![backed],
vec![chain_b_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::UnscheduledCandidate
);
Expand Down Expand Up @@ -1990,6 +2036,7 @@ pub(crate) mod tests {
vec![backed_b, backed_a],
vec![chain_a_assignment.clone(), chain_b_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::UnscheduledCandidate
);
Expand Down Expand Up @@ -2023,6 +2070,7 @@ pub(crate) mod tests {
vec![backed],
vec![chain_a_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::InsufficientBacking
);
Expand Down Expand Up @@ -2058,6 +2106,7 @@ pub(crate) mod tests {
vec![backed],
vec![chain_a_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::CandidateNotInParentContext
);
Expand Down Expand Up @@ -2097,6 +2146,7 @@ pub(crate) mod tests {
thread_a_assignment.clone(),
],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::WrongCollator,
);
Expand Down Expand Up @@ -2135,6 +2185,7 @@ pub(crate) mod tests {
vec![backed],
vec![thread_a_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::NotCollatorSigned
);
Expand Down Expand Up @@ -2185,6 +2236,7 @@ pub(crate) mod tests {
vec![backed],
vec![chain_a_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::CandidateScheduledBeforeParaFree
);
Expand Down Expand Up @@ -2228,6 +2280,7 @@ pub(crate) mod tests {
vec![backed],
vec![chain_a_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::CandidateScheduledBeforeParaFree
);
Expand Down Expand Up @@ -2279,6 +2332,7 @@ pub(crate) mod tests {
vec![backed],
vec![chain_a_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::PrematureCodeUpgrade
);
Expand Down Expand Up @@ -2313,6 +2367,7 @@ pub(crate) mod tests {
vec![backed],
vec![chain_a_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Err(Error::<Test>::ValidationDataHashMismatch.into()),
);
Expand Down Expand Up @@ -2348,6 +2403,7 @@ pub(crate) mod tests {
vec![backed],
vec![chain_a_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::InvalidValidationCodeHash
);
Expand Down Expand Up @@ -2383,6 +2439,7 @@ pub(crate) mod tests {
vec![backed],
vec![chain_a_assignment.clone()],
&group_validators,
FullCheck::Yes,
),
Error::<Test>::ParaHeadMismatch
);
Expand Down Expand Up @@ -2552,6 +2609,7 @@ pub(crate) mod tests {
thread_a_assignment.clone(),
],
&group_validators,
FullCheck::Yes,
)
.expect("candidates scheduled, in order, and backed");

Expand Down Expand Up @@ -2742,6 +2800,7 @@ pub(crate) mod tests {
vec![backed_a],
vec![chain_a_assignment.clone()],
&group_validators,
FullCheck::Yes,
)
.expect("candidates scheduled, in order, and backed");

Expand Down
Loading

0 comments on commit 072019b

Please sign in to comment.