Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

beefy: error logs for validators with dummy keys #3939

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 13 additions & 12 deletions polkadot/node/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1233,6 +1233,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
prometheus_registry: prometheus_registry.clone(),
links: beefy_links,
on_demand_justifications_handler: beefy_on_demand_justifications_handler,
is_authority: role.is_authority(),
};

let gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params);
Expand All @@ -1242,18 +1243,18 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
task_manager
.spawn_essential_handle()
.spawn_blocking("beefy-gadget", None, gadget);
// When offchain indexing is enabled, MMR gadget should also run.
if is_offchain_indexing_enabled {
task_manager.spawn_essential_handle().spawn_blocking(
"mmr-gadget",
None,
MmrGadget::start(
client.clone(),
backend.clone(),
sp_mmr_primitives::INDEXING_PREFIX.to_vec(),
),
);
}
}
// When offchain indexing is enabled, MMR gadget should also run.
if is_offchain_indexing_enabled {
task_manager.spawn_essential_handle().spawn_blocking(
"mmr-gadget",
None,
MmrGadget::start(
client.clone(),
backend.clone(),
sp_mmr_primitives::INDEXING_PREFIX.to_vec(),
),
);
}

let config = grandpa::Config {
Expand Down
1 change: 1 addition & 0 deletions substrate/bin/node/cli/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -652,6 +652,7 @@ pub fn new_full_base(
prometheus_registry: prometheus_registry.clone(),
links: beefy_links,
on_demand_justifications_handler: beefy_on_demand_justifications_handler,
is_authority: role.is_authority(),
};

let beefy_gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params);
Expand Down
18 changes: 17 additions & 1 deletion substrate/client/consensus/beefy/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,8 @@ pub struct BeefyParams<B: Block, BE, C, N, P, R, S> {
pub links: BeefyVoterLinks<B>,
/// Handler for incoming BEEFY justifications requests from a remote peer.
pub on_demand_justifications_handler: BeefyJustifsRequestHandler<B, C>,
/// Whether running under "Authority" role.
pub is_authority: bool,
}
/// Helper object holding BEEFY worker communication/gossip components.
///
Expand Down Expand Up @@ -270,6 +272,7 @@ where
min_block_delta: u32,
gossip_validator: Arc<GossipValidator<B>>,
finality_notifications: &mut Fuse<FinalityNotifications<B>>,
is_authority: bool,
) -> Result<Self, Error> {
// Wait for BEEFY pallet to be active before starting voter.
let (beefy_genesis, best_grandpa) =
Expand All @@ -283,6 +286,7 @@ where
runtime.clone(),
&key_store,
&metrics,
is_authority,
)
.await?;
// Update the gossip validator with the right starting round and set id.
Expand All @@ -301,6 +305,7 @@ where
comms: BeefyComms<B>,
links: BeefyVoterLinks<B>,
pending_justifications: BTreeMap<NumberFor<B>, BeefyVersionedFinalityProof<B>>,
is_authority: bool,
) -> BeefyWorker<B, BE, P, R, S> {
BeefyWorker {
backend: self.backend,
Expand All @@ -313,6 +318,7 @@ where
comms,
links,
pending_justifications,
is_authority,
}
}

Expand Down Expand Up @@ -423,6 +429,7 @@ where
runtime: Arc<R>,
key_store: &BeefyKeystore<AuthorityId>,
metrics: &Option<VoterMetrics>,
is_authority: bool,
) -> Result<PersistedState<B>, Error> {
// Initialize voter state from AUX DB if compatible.
if let Some(mut state) = crate::aux_schema::load_persistent(backend.as_ref())?
Expand Down Expand Up @@ -455,7 +462,13 @@ where
"🥩 Handling missed BEEFY session after node restart: {:?}.",
new_session_start
);
state.init_session_at(new_session_start, validator_set, key_store, metrics);
state.init_session_at(
new_session_start,
validator_set,
key_store,
metrics,
is_authority,
);
}
return Ok(state)
}
Expand Down Expand Up @@ -491,6 +504,7 @@ pub async fn start_beefy_gadget<B, BE, C, N, P, R, S>(
prometheus_registry,
links,
mut on_demand_justifications_handler,
is_authority,
} = beefy_params;

let BeefyNetworkParams {
Expand Down Expand Up @@ -553,6 +567,7 @@ pub async fn start_beefy_gadget<B, BE, C, N, P, R, S>(
min_block_delta,
beefy_comms.gossip_validator.clone(),
&mut finality_notifications,
is_authority,
).fuse() => {
match builder_init_result {
Ok(builder) => break builder,
Expand Down Expand Up @@ -580,6 +595,7 @@ pub async fn start_beefy_gadget<B, BE, C, N, P, R, S>(
beefy_comms,
links.clone(),
BTreeMap::new(),
is_authority,
);

match futures::future::select(
Expand Down
2 changes: 2 additions & 0 deletions substrate/client/consensus/beefy/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,7 @@ async fn voter_init_setup(
Arc::new(api.clone()),
&key_store,
&metrics,
true,
)
.await
}
Expand Down Expand Up @@ -438,6 +439,7 @@ where
min_block_delta,
prometheus_registry: None,
on_demand_justifications_handler: on_demand_justif_handler,
is_authority: true,
};
let task = crate::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params);

Expand Down
77 changes: 17 additions & 60 deletions substrate/client/consensus/beefy/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ use crate::{
};
use codec::{Codec, Decode, DecodeAll, Encode};
use futures::{stream::Fuse, FutureExt, StreamExt};
use log::{debug, error, info, log_enabled, trace, warn};
use log::{debug, error, info, trace, warn};
use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend};
use sc_utils::notification::NotificationReceiver;
use sp_api::ProvideRuntimeApi;
Expand All @@ -51,7 +51,7 @@ use sp_runtime::{
SaturatedConversion,
};
use std::{
collections::{BTreeMap, BTreeSet, VecDeque},
collections::{BTreeMap, VecDeque},
fmt::Debug,
sync::Arc,
};
Expand Down Expand Up @@ -332,6 +332,7 @@ impl<B: Block> PersistedState<B> {
validator_set: ValidatorSet<AuthorityId>,
key_store: &BeefyKeystore<AuthorityId>,
metrics: &Option<VoterMetrics>,
is_authority: bool,
) {
debug!(target: LOG_TARGET, "🥩 New active validator set: {:?}", validator_set);

Expand All @@ -348,11 +349,16 @@ impl<B: Block> PersistedState<B> {
}
}

if log_enabled!(target: LOG_TARGET, log::Level::Debug) {
// verify the new validator set - only do it if we're also logging the warning
if verify_validator_set::<B>(&new_session_start, &validator_set, key_store).is_err() {
metric_inc!(metrics, beefy_no_authority_found_in_store);
}
// verify we have some BEEFY key available in keystore when role is authority.
if is_authority && key_store.public_keys().map_or(false, |k| k.is_empty()) {
error!(
target: LOG_TARGET,
"🥩 for session starting at block {:?} no BEEFY authority key found in store, \
you must generate valid session keys \
(https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#generating-the-session-keys)",
new_session_start,
);
metric_inc!(metrics, beefy_no_authority_found_in_store);
}

let id = validator_set.id();
Expand Down Expand Up @@ -390,6 +396,8 @@ pub(crate) struct BeefyWorker<B: Block, BE, P, RuntimeApi, S> {
pub persisted_state: PersistedState<B>,
/// BEEFY voter metrics
pub metrics: Option<VoterMetrics>,
/// Node runs under "Authority" role.
pub is_authority: bool,
}

impl<B, BE, P, R, S> BeefyWorker<B, BE, P, R, S>
Expand Down Expand Up @@ -425,6 +433,7 @@ where
validator_set,
&self.key_store,
&self.metrics,
self.is_authority,
);
}

Expand Down Expand Up @@ -1040,33 +1049,6 @@ where
}
}

/// Verify `active` validator set for `block` against the key store
///
/// We want to make sure that we have _at least one_ key in our keystore that
/// is part of the validator set, that's because if there are no local keys
/// then we can't perform our job as a validator.
///
/// Note that for a non-authority node there will be no keystore, and we will
/// return an error and don't check. The error can usually be ignored.
fn verify_validator_set<B: Block>(
block: &NumberFor<B>,
active: &ValidatorSet<AuthorityId>,
key_store: &BeefyKeystore<AuthorityId>,
) -> Result<(), Error> {
let active: BTreeSet<&AuthorityId> = active.validators().iter().collect();

let public_keys = key_store.public_keys()?;
let store: BTreeSet<&AuthorityId> = public_keys.iter().collect();

if store.intersection(&active).count() == 0 {
let msg = "no authority public key found in store".to_string();
debug!(target: LOG_TARGET, "🥩 for block {:?} {}", block, msg);
Err(Error::Keystore(msg))
} else {
Ok(())
}
}

#[cfg(test)]
pub(crate) mod tests {
use super::*;
Expand Down Expand Up @@ -1208,6 +1190,7 @@ pub(crate) mod tests {
comms,
pending_justifications: BTreeMap::new(),
persisted_state,
is_authority: true,
}
}

Expand Down Expand Up @@ -1471,32 +1454,6 @@ pub(crate) mod tests {
assert_eq!(extracted, Some(validator_set));
}

#[tokio::test]
async fn keystore_vs_validator_set() {
let keys = &[Keyring::Alice];
let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap();
let mut net = BeefyTestNet::new(1);
let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone());

// keystore doesn't contain other keys than validators'
assert_eq!(verify_validator_set::<Block>(&1, &validator_set, &worker.key_store), Ok(()));

// unknown `Bob` key
let keys = &[Keyring::Bob];
let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap();
let err_msg = "no authority public key found in store".to_string();
let expected = Err(Error::Keystore(err_msg));
assert_eq!(verify_validator_set::<Block>(&1, &validator_set, &worker.key_store), expected);

// worker has no keystore
worker.key_store = None.into();
let expected_err = Err(Error::Keystore("no Keystore".into()));
assert_eq!(
verify_validator_set::<Block>(&1, &validator_set, &worker.key_store),
expected_err
);
}

#[tokio::test]
async fn should_finalize_correctly() {
let keys = [Keyring::Alice];
Expand Down
Loading