Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CandidateDescriptor: disable collator signature and collator id usage #4665

Merged
merged 15 commits into from
Jul 25, 2024
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,6 @@ impl CandidateStorage {
state,
candidate: Arc::new(ProspectiveCandidate {
commitments: candidate.commitments,
collator: candidate.descriptor.collator,
collator_signature: candidate.descriptor.signature,
persisted_validation_data,
pov_hash: candidate.descriptor.pov_hash,
validation_code_hash: candidate.descriptor.validation_code_hash,
Expand Down
26 changes: 5 additions & 21 deletions polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,8 @@
/// That means a few blocks of execution time lost, which is not a big deal for code upgrades
/// in practice at most once every few weeks.
use polkadot_primitives::{
async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments,
CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData,
UpgradeRestriction, ValidationCodeHash,
async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, Hash,
HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash,
};
use std::{collections::HashMap, sync::Arc};

Expand Down Expand Up @@ -521,18 +520,13 @@ impl ConstraintModifications {
/// The prospective candidate.
///
/// This comprises the key information that represent a candidate
/// without pinning it to a particular session. For example, everything
/// to do with the collator's signature and commitments are represented
/// here. But the erasure-root is not. This means that prospective candidates
/// without pinning it to a particular session. For example commitments are
/// represented here. But the erasure-root is not. This means that prospective candidates
/// are not correlated to any session in particular.
#[derive(Debug, Clone, PartialEq)]
alexggh marked this conversation as resolved.
Show resolved Hide resolved
pub struct ProspectiveCandidate {
/// The commitments to the output of the execution.
pub commitments: CandidateCommitments,
/// The collator that created the candidate.
pub collator: CollatorId,
/// The signature of the collator on the payload.
pub collator_signature: CollatorSignature,
/// The persisted validation data used to create the candidate.
pub persisted_validation_data: PersistedValidationData,
/// The hash of the PoV.
Expand Down Expand Up @@ -800,10 +794,7 @@ fn validate_against_constraints(
#[cfg(test)]
mod tests {
use super::*;
use polkadot_primitives::{
CollatorPair, HorizontalMessages, OutboundHrmpMessage, ValidationCode,
};
use sp_application_crypto::Pair;
use polkadot_primitives::{HorizontalMessages, OutboundHrmpMessage, ValidationCode};

#[test]
fn stack_modifications() {
Expand Down Expand Up @@ -1162,11 +1153,6 @@ mod tests {
constraints: &Constraints,
relay_parent: &RelayChainBlockInfo,
) -> ProspectiveCandidate {
let collator_pair = CollatorPair::generate().0;
let collator = collator_pair.public();

let sig = collator_pair.sign(b"blabla".as_slice());

ProspectiveCandidate {
commitments: CandidateCommitments {
upward_messages: Default::default(),
Expand All @@ -1176,8 +1162,6 @@ mod tests {
processed_downward_messages: 0,
hrmp_watermark: relay_parent.number,
},
collator,
collator_signature: sig,
persisted_validation_data: PersistedValidationData {
parent_head: constraints.required_parent.clone(),
relay_parent_number: relay_parent.number,
Expand Down
6 changes: 5 additions & 1 deletion polkadot/primitives/src/v7/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2040,10 +2040,14 @@ pub mod node_features {
/// Must not be enabled unless all validators and collators have stopped using `req_chunk`
/// protocol version 1. If it is enabled, validators can start systematic chunk recovery.
AvailabilityChunkMapping = 2,
/// Enables node side support of `CoreIndex` committed candidate receipts.
/// See [RFC-103](https://github.com/polkadot-fellows/RFCs/pull/103) for details.
/// Only enable if at least 2/3 of nodes support the feature.
CandidateReceiptV2 = 3,
/// First unassigned feature bit.
/// Every time a new feature flag is assigned it should take this value.
/// and this should be incremented.
FirstUnassigned = 3,
FirstUnassigned = 4,
}
}

Expand Down
38 changes: 19 additions & 19 deletions polkadot/runtime/parachains/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,21 +30,30 @@ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec};
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
use polkadot_primitives::{
collator_signature_payload, node_features::FeatureIndex, AvailabilityBitfield, BackedCandidate,
CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature,
CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet,
GroupIndex, HeadData, Id as ParaId, IndexedVec, InherentData as ParachainsInherentData,
InvalidDisputeStatementKind, PersistedValidationData, SessionIndex, SigningContext,
UncheckedSigned, ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex,
ValidityAttestation,
node_features::FeatureIndex, AvailabilityBitfield, BackedCandidate, CandidateCommitments,
CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt,
CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet, GroupIndex, HeadData,
Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, InvalidDisputeStatementKind,
PersistedValidationData, SessionIndex, SigningContext, UncheckedSigned,
ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, ValidityAttestation,
};
use sp_core::{sr25519, H256};
use sp_core::{sr25519, ByteArray, H256};
use sp_runtime::{
generic::Digest,
traits::{Header as HeaderT, One, TrailingZeroInput, Zero},
RuntimeAppPublic,
};

/// Create a null collator id.
pub fn dummy_collator() -> CollatorId {
CollatorId::from_slice(&vec![0u8; 32]).expect("32 bytes; qed")
}

/// Create a null collator signature.
pub fn dummy_collator_signature() -> CollatorSignature {
CollatorSignature::from_slice(&vec![0u8; 64]).expect("64 bytes; qed")
}

fn mock_validation_code() -> ValidationCode {
ValidationCode(vec![1, 2, 3])
}
Expand Down Expand Up @@ -584,7 +593,6 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {

// This generates a pair and adds it to the keystore, returning just the
// public.
let collator_public = CollatorId::generate_pair(None);
let header = Self::header(self.block_number);
let relay_parent = header.hash();

Expand Down Expand Up @@ -614,14 +622,6 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {

let pov_hash = Default::default();
let validation_code_hash = mock_validation_code().hash();
let payload = collator_signature_payload(
&relay_parent,
&para_id,
&persisted_validation_data_hash,
&pov_hash,
&validation_code_hash,
);
let signature = collator_public.sign(&payload).unwrap();

let mut past_code_meta =
paras::ParaPastCodeMeta::<BlockNumberFor<T>>::default();
Expand All @@ -634,11 +634,11 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
descriptor: CandidateDescriptor::<T::Hash> {
para_id,
relay_parent,
collator: collator_public,
collator: dummy_collator(),
persisted_validation_data_hash,
pov_hash,
erasure_root: Default::default(),
signature,
signature: dummy_collator_signature(),
para_head: head_data.hash(),
validation_code_hash,
},
Expand Down
8 changes: 0 additions & 8 deletions polkadot/runtime/parachains/src/inclusion/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -338,8 +338,6 @@ pub mod pallet {
InsufficientBacking,
/// Invalid (bad signature, unknown validator, etc.) backing.
InvalidBacking,
/// Collator did not sign PoV.
NotCollatorSigned,
/// The validation data hash does not match expected.
ValidationDataHashMismatch,
/// The downward message queue is not processed correctly.
Expand Down Expand Up @@ -1222,7 +1220,6 @@ impl<T: Config> CandidateCheckContext<T> {
///
/// Assures:
/// * relay-parent in-bounds
/// * collator signature check passes
/// * code hash of commitments matches current code hash
/// * para head in the descriptor and commitments match
///
Expand Down Expand Up @@ -1259,11 +1256,6 @@ impl<T: Config> CandidateCheckContext<T> {
);
}

ensure!(
Copy link
Contributor

@alexggh alexggh Jul 23, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Correlated with this this:

if let Err(()) = candidate.check_collator_signature() {
, isn't it possible we actually stall finality if backers collude to include a candidate with the wrong signature ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

With this change, the runtime will no longer care about what is being put in collator signature.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right, but wouldn't that mean that the block author will accept blocks with invalid signature, that will then fail the candidate-validation because we are still checking the signature there ?

Anyways, I thought a bit more about this and it is a non-issue because, that will just trigger a dispute which will punish the malicious backers, so this change does not introduce any attack-vector.

Copy link
Contributor Author

@sandreim sandreim Jul 24, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is actually a good point. It is a bit subtle but yes, validators strictly need to still check the signatures even if the runtime doesn't. The problem I see with this is that malicious collators can put garbage in the signature, candidates get backed by validators upgraded to not check the signature in the future. During approval checking, these candidates will be disputed by older validators that check the signature.

Then who gets punished depends on which kind of super majority we have. This sounds like a new attack vector to me.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We'll likely need another node feature 🤦🏼 just to give people more time to upgrade, but at least 2/3, then we need to communicate the deadline when it becomes dangerous (due to prev comment) to not have upgraded yet.

Copy link
Contributor Author

@sandreim sandreim Jul 24, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One way to solve this is to still keep the signature checks only for node but only check them if we don't have a core index (signaled by Ump commitment and zero padding in descriptor).

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One way to solve this is to still keep the signature checks only for node but only check them if we don't have a core index (signaled by Ump commitment and zero padding in descriptor).

I had a similar idea, but I don't see how this fixes anything. You still have the problem of different validators arriving at different validity.

The node feature is actually the way to go. We still need to wait for enough validators to upgrade, but then we have perfect consensus on what strategy to use in an instant. Any too old validators might get slashed, but we actually have requirements on validators to keep their node current, so that is fine.

So basic plan is to implement everything at once:

  1. Ignore signature
  2. If signature zero, re-interpret data.
  3. Enable all of that with a node feature.

Collators will not be affected. Collators can still provide a signature, it will simply be ignored. If they set it to 0, we re-interpret fields.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agreed. I'll add the node feature in this PR. It will signal the nodes to not check the signature if there is a core_index.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Once the feature is enabled no signature needs to be checked at all anymore. But we should also directly start re-interpreting fields in case the signature is 0. Otherwise we would need two node features: One to disable checking and then another one to check the core index.

backed_candidate_receipt.descriptor().check_collator_signature().is_ok(),
Error::<T>::NotCollatorSigned,
);

let validation_code_hash = paras::CurrentCodeHash::<T>::get(para_id)
// A candidate for a parachain without current validation code is not scheduled.
.ok_or_else(|| Error::<T>::UnscheduledCandidate)?;
Expand Down
56 changes: 46 additions & 10 deletions polkadot/runtime/parachains/src/inclusion/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1545,16 +1545,52 @@ fn candidate_checks() {
None,
);

assert_noop!(
ParaInclusion::process_candidates(
&allowed_relay_parents,
&vec![(thread_a_assignment.0, vec![(backed, thread_a_assignment.1)])]
.into_iter()
.collect(),
&group_validators,
false
),
Error::<Test>::NotCollatorSigned
let ProcessedCandidates {
core_indices: occupied_cores,
candidate_receipt_with_backing_validator_indices,
} = ParaInclusion::process_candidates(
&allowed_relay_parents,
&vec![(thread_a_assignment.0, vec![(backed.clone(), thread_a_assignment.1)])]
.into_iter()
.collect(),
&group_validators,
false,
)
.expect("candidate is accepted with bad collator signature");

assert_eq!(occupied_cores, vec![(CoreIndex::from(2), thread_a)]);

let mut expected = std::collections::HashMap::<
CandidateHash,
(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>),
>::new();
let backed_candidate = backed;
let candidate_receipt_with_backers = expected
.entry(backed_candidate.hash())
.or_insert_with(|| (backed_candidate.receipt(), Vec::new()));
let (validator_indices, _maybe_core_index) =
backed_candidate.validator_indices_and_core_index(true);
assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones());
candidate_receipt_with_backers.1.extend(
validator_indices
.iter()
.enumerate()
.filter(|(_, signed)| **signed)
.zip(backed_candidate.validity_votes().iter().cloned())
.filter_map(|((validator_index_within_group, _), attestation)| {
let grp_idx = GroupIndex(2);
group_validators(grp_idx).map(|validator_indices| {
(validator_indices[validator_index_within_group], attestation)
})
}),
);

assert_eq!(
expected,
candidate_receipt_with_backing_validator_indices
.into_iter()
.map(|c| (c.0.hash(), c))
.collect()
);
}

Expand Down
21 changes: 21 additions & 0 deletions prdoc/pr_4665.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json

title: "Remove runtime collator signature checks"

doc:
- audience: [Runtime Dev, Node Dev]
description: |
Removes runtime collator signature checks, but these are still being done on the node. Remove collator
and signature from the `ProspectiveCandidate` definition in the inclusion emulator. Add
`CandidateReceiptV2` node feature bit.

crates:
- name: polkadot-primitives
bump: minor
- name: polkadot-node-subsystem-util
bump: minor
- name: polkadot-node-core-prospective-parachains
bump: patch
- name: polkadot-runtime-parachains
bump: patch
Loading