Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Bump lru from 0.7.8 to 0.8.0 #6060

Merged
merged 6 commits into from
Oct 4, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion node/core/approval-voting/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ futures-timer = "3.0.2"
parity-scale-codec = { version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] }
gum = { package = "tracing-gum", path = "../../gum" }
bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] }
lru = "0.7"
lru = "0.8"
merlin = "2.0"
schnorrkel = "0.9.1"
kvdb = "0.11.0"
Expand Down
7 changes: 6 additions & 1 deletion node/core/approval-voting/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ use std::{
collections::{
btree_map::Entry as BTMEntry, hash_map::Entry as HMEntry, BTreeMap, HashMap, HashSet,
},
num::NonZeroUsize,
sync::Arc,
time::Duration,
};
Expand Down Expand Up @@ -104,7 +105,11 @@ const APPROVAL_CHECKING_TIMEOUT: Duration = Duration::from_secs(120);
/// Value rather arbitrarily: Should not be hit in practice, it exists to more easily diagnose dead
/// lock issues for example.
const WAIT_FOR_SIGS_TIMEOUT: Duration = Duration::from_millis(500);
const APPROVAL_CACHE_SIZE: usize = 1024;
const APPROVAL_CACHE_SIZE: NonZeroUsize = match NonZeroUsize::new(1024) {
Some(cap) => cap,
None => panic!("Approval cache size must be non-zero."),
};

const TICK_TOO_FAR_IN_FUTURE: Tick = 20; // 10 seconds.
const APPROVAL_DELAY: Tick = 2;
const LOG_TARGET: &str = "parachain::approval-voting";
Expand Down
2 changes: 1 addition & 1 deletion node/core/dispute-coordinator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ gum = { package = "tracing-gum", path = "../../gum" }
parity-scale-codec = "3.1.5"
kvdb = "0.11.0"
thiserror = "1.0.31"
lru = "0.7.7"
lru = "0.8.0"
fatality = "0.0.6"

polkadot-primitives = { path = "../../../primitives" }
Expand Down
10 changes: 8 additions & 2 deletions node/core/dispute-coordinator/src/scraping/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,10 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.

use std::collections::{BTreeMap, HashSet};
use std::{
collections::{BTreeMap, HashSet},
num::NonZeroUsize,
};

use futures::channel::oneshot;
use lru::LruCache;
Expand Down Expand Up @@ -44,7 +47,10 @@ mod tests;
/// `last_observed_blocks` LRU. This means, this value should the very least be as large as the
/// number of expected forks for keeping chain scraping efficient. Making the LRU much larger than
/// that has very limited use.
const LRU_OBSERVED_BLOCKS_CAPACITY: usize = 20;
const LRU_OBSERVED_BLOCKS_CAPACITY: NonZeroUsize = match NonZeroUsize::new(20) {
Some(cap) => cap,
None => panic!("Observed blocks cache size must be non-zero"),
};

/// Chain scraper
///
Expand Down
2 changes: 1 addition & 1 deletion node/network/availability-distribution/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste
thiserror = "1.0.31"
rand = "0.8.5"
derive_more = "0.99.17"
lru = "0.7.7"
lru = "0.8.0"
fatality = "0.0.6"

[dev-dependencies]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.

use std::collections::HashSet;
use std::{collections::HashSet, num::NonZeroUsize};

use lru::LruCache;
use rand::{seq::SliceRandom, thread_rng};
Expand Down Expand Up @@ -85,7 +85,7 @@ impl SessionCache {
pub fn new() -> Self {
SessionCache {
// We need to cache the current and the last session the most:
session_info_cache: LruCache::new(2),
session_info_cache: LruCache::new(NonZeroUsize::new(2).unwrap()),
}
}

Expand Down
2 changes: 1 addition & 1 deletion node/network/availability-recovery/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ edition = "2021"

[dependencies]
futures = "0.3.21"
lru = "0.7.7"
lru = "0.8.0"
rand = "0.8.5"
fatality = "0.0.6"
thiserror = "1.0.31"
Expand Down
6 changes: 5 additions & 1 deletion node/network/availability-recovery/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

use std::{
collections::{HashMap, VecDeque},
num::NonZeroUsize,
pin::Pin,
time::Duration,
};
Expand Down Expand Up @@ -77,7 +78,10 @@ const LOG_TARGET: &str = "parachain::availability-recovery";
const N_PARALLEL: usize = 50;

// Size of the LRU cache where we keep recovered data.
const LRU_SIZE: usize = 16;
const LRU_SIZE: NonZeroUsize = match NonZeroUsize::new(16) {
Some(cap) => cap,
None => panic!("Availability-recovery cache size must be non-zero."),
};

const COST_INVALID_REQUEST: Rep = Rep::CostMajor("Peer sent unparsable request");

Expand Down
2 changes: 1 addition & 1 deletion node/network/dispute-distribution/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ sp-application-crypto = { git = "https://github.com/paritytech/substrate", branc
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
thiserror = "1.0.31"
fatality = "0.0.6"
lru = "0.7.7"
lru = "0.8.0"

[dev-dependencies]
async-trait = "0.1.57"
Expand Down
5 changes: 4 additions & 1 deletion node/network/dispute-distribution/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
//! The sender is responsible for getting our vote out, see [`sender`]. The receiver handles
//! incoming [`DisputeRequest`]s and offers spam protection, see [`receiver`].

use std::num::NonZeroUsize;

use futures::{channel::mpsc, FutureExt, StreamExt, TryFutureExt};

use polkadot_node_network_protocol::authority_discovery::AuthorityDiscovery;
Expand Down Expand Up @@ -145,7 +147,8 @@ where
) -> Self {
let runtime = RuntimeInfo::new_with_config(runtime::Config {
keystore: Some(keystore),
session_cache_lru_size: DISPUTE_WINDOW.get() as usize,
session_cache_lru_size: NonZeroUsize::new(DISPUTE_WINDOW.get() as usize)
.expect("Dispute window can not be 0; qed"),
});
let (tx, sender_rx) = mpsc::channel(1);
let disputes_sender = DisputeSender::new(tx, metrics.clone());
Expand Down
13 changes: 10 additions & 3 deletions node/network/dispute-distribution/src/receiver/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

use std::{
collections::HashSet,
num::NonZeroUsize,
pin::Pin,
task::{Context, Poll},
};
Expand Down Expand Up @@ -61,6 +62,11 @@ const COST_NOT_A_VALIDATOR: Rep = Rep::CostMajor("Reporting peer was not a valid
/// How many statement imports we want to issue in parallel:
pub const MAX_PARALLEL_IMPORTS: usize = 10;

const BANNED_PEERS_CACHE_SIZE: NonZeroUsize = match NonZeroUsize::new(MAX_PARALLEL_IMPORTS) {
Some(cap) => cap,
None => panic!("Banned peers cache size should not be 0."),
};

/// State for handling incoming `DisputeRequest` messages.
///
/// This is supposed to run as its own task in order to easily impose back pressure on the incoming
Expand Down Expand Up @@ -146,7 +152,8 @@ where
) -> Self {
let runtime = RuntimeInfo::new_with_config(runtime::Config {
keystore: None,
session_cache_lru_size: DISPUTE_WINDOW.get() as usize,
session_cache_lru_size: NonZeroUsize::new(DISPUTE_WINDOW.get() as usize)
.expect("Dispute window can not be 0; qed"),
});
Self {
runtime,
Expand All @@ -156,7 +163,7 @@ where
pending_imports: PendingImports::new(),
// Size of MAX_PARALLEL_IMPORTS ensures we are going to immediately get rid of any
// malicious requests still pending in the incoming queue.
banned_peers: LruCache::new(MAX_PARALLEL_IMPORTS),
banned_peers: LruCache::new(BANNED_PEERS_CACHE_SIZE),
metrics,
}
}
Expand Down Expand Up @@ -222,7 +229,7 @@ where
}

// Wait for a free slot:
if self.pending_imports.len() >= MAX_PARALLEL_IMPORTS as usize {
if self.pending_imports.len() >= MAX_PARALLEL_IMPORTS {
// Wait for one to finish:
let r = self.pending_imports.next().await;
self.ban_bad_peer(r.expect("pending_imports.len() is greater 0. qed."))?;
Expand Down
2 changes: 1 addition & 1 deletion node/overseer/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ polkadot-node-metrics = { path = "../metrics" }
polkadot-primitives = { path = "../../primitives" }
orchestra = "0.0.2"
gum = { package = "tracing-gum", path = "../gum" }
lru = "0.7"
lru = "0.8"
parity-util-mem = { version = "0.11.0", default-features = false }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
async-trait = "0.1.57"
Expand Down
6 changes: 5 additions & 1 deletion node/overseer/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
use std::{
collections::{hash_map, HashMap},
fmt::{self, Debug},
num::NonZeroUsize,
pin::Pin,
sync::Arc,
time::Duration,
Expand Down Expand Up @@ -112,7 +113,10 @@ pub use orchestra::{

/// Store 2 days worth of blocks, not accounting for forks,
/// in the LRU cache. Assumes a 6-second block time.
pub const KNOWN_LEAVES_CACHE_SIZE: usize = 2 * 24 * 3600 / 6;
pub const KNOWN_LEAVES_CACHE_SIZE: NonZeroUsize = match NonZeroUsize::new(2 * 24 * 3600 / 6) {
Some(cap) => cap,
None => panic!("Known leaves cache size must be non-zero"),
};

#[cfg(test)]
mod tests;
Expand Down
2 changes: 1 addition & 1 deletion node/service/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ kvdb = "0.11.0"
kvdb-rocksdb = { version = "0.15.2", optional = true }
parity-db = { version = "0.3.16", optional = true }
async-trait = "0.1.57"
lru = "0.7"
lru = "0.8"

# Polkadot
polkadot-node-core-parachains-inherent = { path = "../core/parachains-inherent" }
Expand Down
2 changes: 1 addition & 1 deletion node/subsystem-util/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ thiserror = "1.0.31"
fatality = "0.0.6"
gum = { package = "tracing-gum", path = "../gum" }
derive_more = "0.99.17"
lru = "0.7.7"
lru = "0.8.0"

polkadot-node-subsystem = {path = "../subsystem" }
polkadot-node-jaeger = { path = "../jaeger" }
Expand Down
11 changes: 7 additions & 4 deletions node/subsystem-util/src/runtime/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

//! Convenient interface to runtime information.

use std::cmp::max;
use std::num::NonZeroUsize;

use lru::LruCache;

Expand Down Expand Up @@ -52,7 +52,7 @@ pub struct Config {
pub keystore: Option<SyncCryptoStorePtr>,

/// How many sessions should we keep in the cache?
pub session_cache_lru_size: usize,
pub session_cache_lru_size: NonZeroUsize,
}

/// Caching of session info.
Expand Down Expand Up @@ -95,7 +95,7 @@ impl Default for Config {
Self {
keystore: None,
// Usually we need to cache the current and the last session.
session_cache_lru_size: 2,
session_cache_lru_size: NonZeroUsize::new(2).expect("2 is larger than 0; qed"),
}
}
}
Expand All @@ -109,7 +109,10 @@ impl RuntimeInfo {
/// Create with more elaborate configuration options.
pub fn new_with_config(cfg: Config) -> Self {
Self {
session_index_cache: LruCache::new(max(10, cfg.session_cache_lru_size)),
session_index_cache: LruCache::new(
cfg.session_cache_lru_size
.max(NonZeroUsize::new(10).expect("10 is larger than 0; qed")),
),
session_info_cache: LruCache::new(cfg.session_cache_lru_size),
keystore: cfg.keystore,
}
Expand Down