Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(lints): Use expect over allow #4402

Merged
merged 10 commits into from
Dec 16, 2024
6 changes: 4 additions & 2 deletions consensus/core/src/authority_node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ use crate::{
/// ConsensusAuthority is used by Iota to manage the lifetime of AuthorityNode.
/// It hides the details of the implementation from the caller,
/// MysticetiManager.
#[allow(private_interfaces)]
pub enum ConsensusAuthority {
#[expect(private_interfaces)]
WithTonic(AuthorityNode<TonicManager>),
}

Expand Down Expand Up @@ -100,7 +100,7 @@ impl ConsensusAuthority {
}
}

#[allow(unused)]
#[cfg(test)]
fn sync_last_known_own_block_enabled(&self) -> bool {
match self {
Self::WithTonic(authority) => authority.sync_last_known_own_block,
Expand All @@ -124,6 +124,7 @@ where
broadcaster: Option<Broadcaster>,
subscriber: Option<Subscriber<N::Client, AuthorityService<ChannelCoreThreadDispatcher>>>,
network_manager: N,
#[cfg(test)]
sync_last_known_own_block: bool,
}

Expand Down Expand Up @@ -306,6 +307,7 @@ where
broadcaster,
subscriber,
network_manager,
#[cfg(test)]
sync_last_known_own_block,
}
}
Expand Down
12 changes: 6 additions & 6 deletions consensus/core/src/base_committer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -427,29 +427,29 @@ mod base_committer_builder {
}
}

#[allow(unused)]
#[expect(unused)]
pub(crate) fn with_wave_length(mut self, wave_length: u32) -> Self {
self.wave_length = wave_length;
self
}

#[allow(unused)]
#[expect(unused)]
pub(crate) fn with_leader_offset(mut self, leader_offset: u32) -> Self {
self.leader_offset = leader_offset;
self
}

#[allow(unused)]
#[expect(unused)]
pub(crate) fn with_round_offset(mut self, round_offset: u32) -> Self {
self.round_offset = round_offset;
self
}

pub(crate) fn build(self) -> BaseCommitter {
let options = BaseCommitterOptions {
wave_length: DEFAULT_WAVE_LENGTH,
leader_offset: 0,
round_offset: 0,
wave_length: self.wave_length,
leader_offset: self.leader_offset,
round_offset: self.round_offset,
};
BaseCommitter::new(
self.context.clone(),
Expand Down
3 changes: 2 additions & 1 deletion consensus/core/src/block_verifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -206,9 +206,10 @@ impl BlockVerifier for SignedBlockVerifier {
}
}

#[allow(unused)]
#[cfg(test)]
pub(crate) struct NoopBlockVerifier;

#[cfg(test)]
impl BlockVerifier for NoopBlockVerifier {
fn verify(&self, _block: &SignedBlock) -> ConsensusResult<()> {
Ok(())
Expand Down
1 change: 0 additions & 1 deletion consensus/core/src/commit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ impl Commit {
pub(crate) trait CommitAPI {
fn round(&self) -> Round;
fn index(&self) -> CommitIndex;
#[allow(dead_code)]
fn previous_digest(&self) -> CommitDigest;
fn timestamp_ms(&self) -> BlockTimestampMs;
fn leader(&self) -> BlockRef;
Expand Down
2 changes: 1 addition & 1 deletion consensus/core/src/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -849,7 +849,7 @@ pub(crate) struct CoreTextFixture {
pub core: Core,
pub signal_receivers: CoreSignalsReceivers,
pub block_receiver: broadcast::Receiver<VerifiedBlock>,
#[allow(unused)]
#[expect(unused)]
pub commit_receiver: UnboundedReceiver<CommittedSubDag>,
pub store: Arc<MemStore>,
}
Expand Down
2 changes: 1 addition & 1 deletion consensus/core/src/leader_schedule.rs
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ impl LeaderSwapTable {
context: Arc<Context>,
// Ignore linter warning in simtests.
// TODO: maybe override protocol configs in tests for swap_stake_threshold, and call new().
#[allow(unused_variables)] swap_stake_threshold: u64,
#[cfg_attr(msim, expect(unused_variables))] swap_stake_threshold: u64,
commit_index: CommitIndex,
reputation_scores: ReputationScores,
) -> Self {
Expand Down
2 changes: 1 addition & 1 deletion consensus/core/src/leader_scoring_strategy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@ use crate::{
stake_aggregator::{QuorumThreshold, StakeAggregator},
};

#[allow(unused)]
pub(crate) trait ScoringStrategy: Send + Sync {
fn calculate_scores_for_leader(&self, subdag: &UnscoredSubdag, leader_slot: Slot) -> Vec<u64>;

// Based on the scoring strategy there is a minimum number of rounds required
// for the scores to be calculated. This method allows that to be set by the
// scoring strategy.
#[expect(unused)]
fn leader_scoring_round_range(&self, min_round: u32, max_round: u32) -> Range<u32>;
}

Expand Down
2 changes: 1 addition & 1 deletion consensus/core/src/network/metrics_layer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ impl MetricsCallbackMaker {
pub(crate) struct MetricsResponseCallback {
metrics: Arc<NetworkRouteMetrics>,
// The timer is held on to and "observed" once dropped
#[allow(unused)]
#[expect(unused)]
timer: HistogramTimer,
route: String,
excessive_message_size: usize,
Expand Down
3 changes: 2 additions & 1 deletion consensus/core/src/stake_aggregator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ pub(crate) trait CommitteeThreshold {

pub(crate) struct QuorumThreshold;

#[allow(unused)]
#[cfg(test)]
pub(crate) struct ValidityThreshold;

impl CommitteeThreshold for QuorumThreshold {
Expand All @@ -21,6 +21,7 @@ impl CommitteeThreshold for QuorumThreshold {
}
}

#[cfg(test)]
impl CommitteeThreshold for ValidityThreshold {
fn is_threshold(committee: &Committee, amount: Stake) -> bool {
committee.reached_validity(amount)
Expand Down
3 changes: 0 additions & 3 deletions consensus/core/src/storage/mem_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,10 @@ use crate::{
};

/// In-memory storage for testing.
#[allow(unused)]
pub(crate) struct MemStore {
inner: RwLock<Inner>,
}

#[allow(unused)]
struct Inner {
blocks: BTreeMap<(Round, AuthorityIndex, BlockDigest), VerifiedBlock>,
digests_by_authorities: BTreeSet<(AuthorityIndex, Round, BlockDigest)>,
Expand All @@ -36,7 +34,6 @@ struct Inner {
}

impl MemStore {
#[cfg(test)]
pub(crate) fn new() -> Self {
MemStore {
inner: RwLock::new(Inner {
Expand Down
6 changes: 3 additions & 3 deletions consensus/core/src/storage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
// Modifications Copyright (c) 2024 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0

#[cfg(test)]
pub(crate) mod mem_store;
pub(crate) mod rocksdb_store;

Expand All @@ -12,13 +13,12 @@ use consensus_config::AuthorityIndex;

use crate::{
CommitIndex,
block::{BlockRef, Round, Slot, VerifiedBlock},
block::{BlockRef, Round, VerifiedBlock},
commit::{CommitInfo, CommitRange, CommitRef, TrustedCommit},
error::ConsensusResult,
};

/// A common interface for consensus storage.
#[allow(unused)]
pub(crate) trait Store: Send + Sync {
/// Writes blocks, consensus commits and other data to store atomically.
fn write(&self, write_batch: WriteBatch) -> ConsensusResult<()>;
Expand All @@ -31,7 +31,7 @@ pub(crate) trait Store: Send + Sync {

/// Checks whether there is any block at the given slot
#[allow(dead_code)]
fn contains_block_at_slot(&self, slot: Slot) -> ConsensusResult<bool>;
fn contains_block_at_slot(&self, slot: crate::block::Slot) -> ConsensusResult<bool>;

/// Reads blocks for an authority, from start_round.
fn scan_blocks_by_author(
Expand Down
8 changes: 4 additions & 4 deletions consensus/core/src/storage/rocksdb_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// Modifications Copyright (c) 2024 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0

use std::{collections::VecDeque, ops::Bound::Included, time::Duration};
use std::{ops::Bound::Included, time::Duration};

use bytes::Bytes;
use consensus_config::AuthorityIndex;
Expand All @@ -16,7 +16,7 @@ use typed_store::{

use super::{CommitInfo, Store, WriteBatch};
use crate::{
block::{BlockAPI as _, BlockDigest, BlockRef, Round, SignedBlock, Slot, VerifiedBlock},
block::{BlockAPI as _, BlockDigest, BlockRef, Round, SignedBlock, VerifiedBlock},
commit::{CommitAPI as _, CommitDigest, CommitIndex, CommitRange, CommitRef, TrustedCommit},
error::{ConsensusError, ConsensusResult},
};
Expand Down Expand Up @@ -176,7 +176,7 @@ impl Store for RocksDBStore {
Ok(exist)
}

fn contains_block_at_slot(&self, slot: Slot) -> ConsensusResult<bool> {
fn contains_block_at_slot(&self, slot: crate::block::Slot) -> ConsensusResult<bool> {
let found = self
.digests_by_authorities
.safe_range_iter((
Expand Down Expand Up @@ -222,7 +222,7 @@ impl Store for RocksDBStore {
before_round: Option<Round>,
) -> ConsensusResult<Vec<VerifiedBlock>> {
let before_round = before_round.unwrap_or(Round::MAX);
let mut refs = VecDeque::new();
let mut refs = std::collections::VecDeque::new();
for kv in self
.digests_by_authorities
.safe_range_iter((
Expand Down
13 changes: 4 additions & 9 deletions consensus/core/src/synchronizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -975,21 +975,16 @@ impl<C: NetworkClient, V: BlockVerifier, D: CoreThreadDispatcher> Synchronizer<C
.take(MAX_PEERS * context.parameters.max_blocks_per_fetch)
.collect::<Vec<_>>();

#[allow(unused_mut)]
#[cfg_attr(test, expect(unused_mut))]
let mut peers = context
.committee
.authorities()
.filter_map(|(peer_index, _)| (peer_index != context.own_index).then_some(peer_index))
.collect::<Vec<_>>();

// TODO: probably inject the RNG to allow unit testing - this is a work around
// for now.
cfg_if::cfg_if! {
if #[cfg(not(test))] {
// Shuffle the peers
peers.shuffle(&mut ThreadRng::default());
}
}
// In test, the order is not randomized
#[cfg(not(test))]
peers.shuffle(&mut ThreadRng::default());

let mut peers = peers.into_iter();
let mut request_futures = FuturesUnordered::new();
Expand Down
21 changes: 10 additions & 11 deletions consensus/core/src/test_dag_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ use crate::{
/// dag_builder.layer(1).build();
/// dag_builder.print(); // pretty print the entire DAG
/// ```
#[allow(unused)]
pub(crate) struct DagBuilder {
pub(crate) context: Arc<Context>,
pub(crate) leader_schedule: LeaderSchedule,
Expand All @@ -93,7 +92,6 @@ pub(crate) struct DagBuilder {
pipeline: bool,
}

#[allow(unused)]
impl DagBuilder {
pub(crate) fn new(context: Arc<Context>) -> Self {
let leader_schedule = LeaderSchedule::new(context.clone(), LeaderSwapTable::default());
Expand Down Expand Up @@ -206,25 +204,26 @@ impl DagBuilder {
!self.blocks.is_empty(),
"No blocks have been created, please make sure that you have called build method"
);
self.blocks
.iter()
.find(|(block_ref, block)| {
block_ref.round == round
&& block_ref.author == self.leader_schedule.elect_leader(round, 0)
})
.map(|(_block_ref, block)| block.clone())
self.blocks.iter().find_map(|(block_ref, block)| {
(block_ref.round == round
&& block_ref.author == self.leader_schedule.elect_leader(round, 0))
.then_some(block.clone())
})
}

#[expect(unused)]
pub(crate) fn with_wave_length(mut self, wave_length: Round) -> Self {
self.wave_length = wave_length;
self
}

#[expect(unused)]
pub(crate) fn with_number_of_leaders(mut self, number_of_leaders: u32) -> Self {
self.number_of_leaders = number_of_leaders;
self
}

#[expect(unused)]
pub(crate) fn with_pipeline(mut self, pipeline: bool) -> Self {
self.pipeline = pipeline;
self
Expand Down Expand Up @@ -290,7 +289,7 @@ impl DagBuilder {
/// Gets all uncommitted blocks in a slot.
pub(crate) fn get_uncommitted_blocks_at_slot(&self, slot: Slot) -> Vec<VerifiedBlock> {
let mut blocks = vec![];
for (block_ref, block) in self.blocks.range((
for (_block_ref, block) in self.blocks.range((
Included(BlockRef::new(slot.round, slot.authority, BlockDigest::MIN)),
Included(BlockRef::new(slot.round, slot.authority, BlockDigest::MAX)),
)) {
Expand Down Expand Up @@ -366,7 +365,7 @@ pub struct LayerBuilder<'a> {
blocks: Vec<VerifiedBlock>,
}

#[allow(unused)]
#[expect(unused)]
impl<'a> LayerBuilder<'a> {
fn new(dag_builder: &'a mut DagBuilder, start_round: Round) -> Self {
assert!(start_round > 0, "genesis round is created by default");
Expand Down
3 changes: 2 additions & 1 deletion consensus/core/src/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -240,9 +240,10 @@ pub enum ValidationError {
}

/// `NoopTransactionVerifier` accepts all transactions.
#[allow(unused)]
#[cfg(test)]
pub(crate) struct NoopTransactionVerifier;

#[cfg(test)]
impl TransactionVerifier for NoopTransactionVerifier {
fn verify_batch(&self, _batch: &[&[u8]]) -> Result<(), ValidationError> {
Ok(())
Expand Down
2 changes: 1 addition & 1 deletion consensus/core/src/universal_committer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ pub(crate) mod universal_committer_builder {
}
}

#[allow(unused)]
#[expect(unused)]
pub(crate) fn with_wave_length(mut self, wave_length: Round) -> Self {
self.wave_length = wave_length;
self
Expand Down
4 changes: 2 additions & 2 deletions crates/iota-analytics-indexer/src/analytics_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ pub struct AnalyticsProcessor<S: Serialize + ParquetSchema> {
metrics: AnalyticsMetrics,
config: AnalyticsIndexerConfig,
sender: mpsc::Sender<FileMetadata>,
#[allow(dead_code)]
#[expect(dead_code)]
kill_sender: oneshot::Sender<()>,
#[allow(dead_code)]
#[expect(dead_code)]
max_checkpoint_sender: oneshot::Sender<()>,
}

Expand Down
2 changes: 1 addition & 1 deletion crates/iota-archival/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ use tracing::{error, info};

use crate::reader::{ArchiveReader, ArchiveReaderMetrics};

#[allow(rustdoc::invalid_html_tags)]
#[expect(rustdoc::invalid_html_tags)]
/// Checkpoints and summaries are persisted as blob files. Files are committed
/// to local store by duration or file size. Committed files are synced with the
/// remote store continuously. Files are optionally compressed with the zstd
Expand Down
1 change: 0 additions & 1 deletion crates/iota-aws-orchestrator/src/benchmark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,6 @@ pub enum LoadType {

/// Search for the breaking point of the L-graph.
// TODO: Doesn't work very well, use tps regression as additional signal.
#[allow(dead_code)]
Search {
/// The initial load to test (and use a baseline).
starting_load: usize,
Expand Down
3 changes: 1 addition & 2 deletions crates/iota-aws-orchestrator/src/monitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -220,15 +220,14 @@ impl Grafana {
}
}

#[allow(dead_code)]
/// Bootstrap the grafana with datasource to connect to the given instances.
/// NOTE: Only for macOS. Grafana must be installed through homebrew (and not
/// from source). Deeper grafana configuration can be done through the
/// grafana.ini file (/opt/homebrew/etc/grafana/grafana.ini) or the plist file
/// (~/Library/LaunchAgents/homebrew.mxcl.grafana.plist).
pub struct LocalGrafana;

#[allow(dead_code)]
#[expect(dead_code)]
impl LocalGrafana {
/// The default grafana home directory (macOS, homebrew install).
const DEFAULT_GRAFANA_HOME: &'static str = "/opt/homebrew/opt/grafana/share/grafana/";
Expand Down
Loading
Loading