diff --git a/chain/chain-primitives/src/error.rs b/chain/chain-primitives/src/error.rs index 89652d7927b..52c6687e328 100644 --- a/chain/chain-primitives/src/error.rs +++ b/chain/chain-primitives/src/error.rs @@ -315,7 +315,7 @@ impl Error { } /// Some blockchain errors are reported in the prometheus metrics. In such cases a report might - /// contain a label that specifies the type of error that has occured. For example when the node + /// contain a label that specifies the type of error that has occurred. For example when the node /// receives a block with an invalid signature this would be reported as: /// `near_num_invalid_blocks{error="invalid_signature"}`. /// This function returns the value of the error label for a specific instance of Error. diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index 5ae2862a833..3bf73c0f791 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -4459,7 +4459,7 @@ pub struct BlockCatchUpResponse { /// 3. We've got response from sync jobs actor that block was processed. Block hash, state /// changes from preprocessing and result of processing block are moved to processed blocks /// 4. Results are postprocessed. If there is any error block goes back to pending to try again. -/// Otherwise results are commited, block is moved to done blocks and any blocks that +/// Otherwise results are committed, block is moved to done blocks and any blocks that /// have this block as previous are added to pending pub struct BlocksCatchUpState { /// Hash of first block of an epoch diff --git a/chain/chunks/src/lib.rs b/chain/chunks/src/lib.rs index b2db9a6096f..b69fc666929 100644 --- a/chain/chunks/src/lib.rs +++ b/chain/chunks/src/lib.rs @@ -1182,7 +1182,7 @@ impl ShardsManager { let parts = forward.parts.into_iter().filter_map(|part| { let part_ord = part.part_ord; if part_ord > num_total_parts { - warn!(target: "chunks", "Received chunk part with part_ord greater than the the total number of chunks"); + warn!(target: "chunks", "Received chunk part with part_ord greater than the total number of chunks"); None } else { Some((part_ord, part)) @@ -1195,7 +1195,7 @@ impl ShardsManager { for part in forward.parts { let part_ord = part.part_ord; if part_ord > num_total_parts { - warn!(target: "chunks", "Received chunk part with part_ord greater than the the total number of chunks"); + warn!(target: "chunks", "Received chunk part with part_ord greater than the total number of chunks"); continue; } existing_parts.insert(part_ord, part); @@ -2567,7 +2567,7 @@ mod test { #[test] // Test that when a validator receives a chunk forward before the chunk header, and that the - // chunk header first arrives as part of a block, it should store the the forward and use it + // chunk header first arrives as part of a block, it should store the forward and use it // when it receives the header. fn test_receive_forward_before_chunk_header_from_block() { let fixture = ChunkTestFixture::default(); diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index a806ca12f3e..58250ed9f49 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -217,7 +217,7 @@ pub struct BlockDebugStatus { // Chunk statuses are below: // We first sent the request to fetch the chunk // Later we get the response from the peer and we try to reconstruct it. - // If reconstructions suceeds, the chunk will be marked as complete. + // If reconstructions succeeds, the chunk will be marked as complete. // If it fails (or fragments are missing) - we're going to re-request the chunk again. // Chunks that we reqeusted (sent the request to peers). @@ -2595,7 +2595,7 @@ impl Client { tracing::debug_span!(target: "client", "get_tier1_accounts(): recomputing").entered(); // What we really need are: chunk producers, block producers and block approvers for - // this epoch and the beginnig of the next epoch (so that all required connections are + // this epoch and the beginning of the next epoch (so that all required connections are // established in advance). Note that block producers and block approvers are not // exactly the same - last blocks of this epoch will also need to be signed by the // block producers of the next epoch. On the other hand, block approvers diff --git a/chain/client/src/stateless_validation/chunk_endorsement_tracker.rs b/chain/client/src/stateless_validation/chunk_endorsement_tracker.rs index 1f333386188..74415e1c52c 100644 --- a/chain/client/src/stateless_validation/chunk_endorsement_tracker.rs +++ b/chain/client/src/stateless_validation/chunk_endorsement_tracker.rs @@ -171,7 +171,7 @@ impl ChunkEndorsementTracker { )?; // Get the chunk_endorsements for the chunk from our cache. // Note that these chunk endorsements are already validated as part of process_chunk_endorsement. - // We can safely rely on the the following details + // We can safely rely on the following details // 1. The chunk endorsements are from valid chunk_validator for this chunk. // 2. The chunk endorsements signatures are valid. let Some(chunk_endorsements) = self.chunk_endorsements.get(&chunk_header.chunk_hash()) diff --git a/chain/client/src/stateless_validation/processing_tracker.rs b/chain/client/src/stateless_validation/processing_tracker.rs index 9b0da813a16..aad8cd8a7f3 100644 --- a/chain/client/src/stateless_validation/processing_tracker.rs +++ b/chain/client/src/stateless_validation/processing_tracker.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use once_cell::sync::OnceCell; -/// `ProcessingDoneTracker` can be used in conjuction with a `ProcessingDoneWaiter` +/// `ProcessingDoneTracker` can be used in conjunction with a `ProcessingDoneWaiter` /// to wait until some processing is finished. `ProcessingDoneTracker` should be /// kept alive as long as the processing is ongoing, then once it's dropped, /// the paired `ProcessingDoneWaiter` will be notified that the processing has finished. diff --git a/chain/client/src/sync/block.rs b/chain/client/src/sync/block.rs index d00468c6e84..e4d3fd8e9bf 100644 --- a/chain/client/src/sync/block.rs +++ b/chain/client/src/sync/block.rs @@ -213,7 +213,7 @@ impl BlockSync { let mut num_requests = 0; for (height, hash) in requests { let request_from_archival = self.archive && height < gc_stop_height; - // Assume that heads of `highest_height_peers` are are ahead of the blocks we're requesting. + // Assume that heads of `highest_height_peers` are ahead of the blocks we're requesting. let peer = if request_from_archival { // Normal peers are unlikely to have old blocks, request from an archival node. let archival_peer_iter = highest_height_peers.iter().filter(|p| p.archival); diff --git a/chain/epoch-manager/src/validator_selection.rs b/chain/epoch-manager/src/validator_selection.rs index 3d723654440..27d00170e9d 100644 --- a/chain/epoch-manager/src/validator_selection.rs +++ b/chain/epoch-manager/src/validator_selection.rs @@ -723,7 +723,7 @@ mod tests { fn test_validator_assignment_ratio_condition() { // There are more seats than proposals, however the // lower proposals are too small relative to the total - // (the reason we can't choose them is because the the probability of them actually + // (the reason we can't choose them is because the probability of them actually // being selected to make a block would be too low since it is done in // proportion to stake). let epoch_config = create_epoch_config( diff --git a/chain/indexer/src/streamer/fetchers.rs b/chain/indexer/src/streamer/fetchers.rs index ace1d2f9985..31a40d7c908 100644 --- a/chain/indexer/src/streamer/fetchers.rs +++ b/chain/indexer/src/streamer/fetchers.rs @@ -1,5 +1,5 @@ //! Streamer watches the network and collects all the blocks and related chunks -//! into one struct and pushes in in to the given queue +//! into one struct and pushes in to the given queue use std::collections::HashMap; use actix::Addr; diff --git a/chain/network/src/peer_manager/tests/snapshot_hosts.rs b/chain/network/src/peer_manager/tests/snapshot_hosts.rs index ee1b7d1fa65..1ea2afbfcba 100644 --- a/chain/network/src/peer_manager/tests/snapshot_hosts.rs +++ b/chain/network/src/peer_manager/tests/snapshot_hosts.rs @@ -185,7 +185,7 @@ async fn invalid_signature_not_broadcast() { let empty_sync_msg = peer3.events.recv_until(take_sync_snapshot_msg).await; assert_eq!(empty_sync_msg.hosts, vec![]); - tracing::info!(target:"test", "Send an invalid SyncSnapshotHosts message from from peer1. One of the host infos has an invalid signature."); + tracing::info!(target:"test", "Send an invalid SyncSnapshotHosts message from peer1. One of the host infos has an invalid signature."); let random_secret_key = SecretKey::from_random(near_crypto::KeyType::ED25519); let invalid_info = make_snapshot_host_info(&peer1_config.node_id(), &random_secret_key, rng); @@ -249,7 +249,7 @@ async fn too_many_shards_not_broadcast() { let empty_sync_msg = peer3.events.recv_until(take_sync_snapshot_msg).await; assert_eq!(empty_sync_msg.hosts, vec![]); - tracing::info!(target:"test", "Send an invalid SyncSnapshotHosts message from from peer1. One of the host infos has more shard ids than allowed."); + tracing::info!(target:"test", "Send an invalid SyncSnapshotHosts message from peer1. One of the host infos has more shard ids than allowed."); let too_many_shards: Vec = (0..(MAX_SHARDS_PER_SNAPSHOT_HOST_INFO as u64 + 1)).collect(); let invalid_info = Arc::new(SnapshotHostInfo::new( diff --git a/chain/network/src/types.rs b/chain/network/src/types.rs index 3ebcef073ab..31dee91f93c 100644 --- a/chain/network/src/types.rs +++ b/chain/network/src/types.rs @@ -461,7 +461,7 @@ mod tests { /// case fall back to sending to the account. /// Otherwise, send to the account, unless we do not know the route, in which case send to the peer. pub struct AccountIdOrPeerTrackingShard { - /// Target account to send the the request to + /// Target account to send the request to pub account_id: Option, /// Whether to check peers first or target account first pub prefer_peer: bool, diff --git a/core/parameters/res/README.md b/core/parameters/res/README.md index 02900c5e334..f5efd6232fb 100644 --- a/core/parameters/res/README.md +++ b/core/parameters/res/README.md @@ -21,5 +21,5 @@ versions which generally means the default value is used to fill in the `RuntimeConfig` object. The latest values of parameters can be found in `parameters.snap`. This file is -automatically generated by tests and needs to be reviewed and commited whenever +automatically generated by tests and needs to be reviewed and committed whenever any of the parameters changes. diff --git a/core/primitives/src/network.rs b/core/primitives/src/network.rs index 2a40e482454..d670be8bef6 100644 --- a/core/primitives/src/network.rs +++ b/core/primitives/src/network.rs @@ -64,7 +64,7 @@ pub struct AnnounceAccount { impl AnnounceAccount { /// We hash only (account_id, peer_id, epoch_id). There is no need hash the signature - /// as it's uniquely determined the the triple. + /// as it's uniquely determined the triple. pub fn build_header_hash( account_id: &AccountId, peer_id: &PeerId, diff --git a/core/primitives/src/shard_layout.rs b/core/primitives/src/shard_layout.rs index 3b5129d81d2..709b12fb349 100644 --- a/core/primitives/src/shard_layout.rs +++ b/core/primitives/src/shard_layout.rs @@ -76,7 +76,7 @@ type ShardSplitMap = Vec>; pub struct ShardLayoutV1 { /// The boundary accounts are the accounts on boundaries between shards. /// Each shard contains a range of accounts from one boundary account to - /// another - or the the smallest or largest account possible. The total + /// another - or the smallest or largest account possible. The total /// number of shards is equal to the number of boundary accounts plus 1. boundary_accounts: Vec, /// Maps shards from the last shard layout to shards that it splits to in this shard layout, diff --git a/core/store/src/db/colddb.rs b/core/store/src/db/colddb.rs index 1ce41f3cd10..ab238bd8048 100644 --- a/core/store/src/db/colddb.rs +++ b/core/store/src/db/colddb.rs @@ -26,7 +26,7 @@ impl ColdDB { format!("Reading from column missing from cold storage. {col:?}") } - // Checks if the column is is the cold db and returns an error if not. + // Checks if the column is the cold db and returns an error if not. fn check_is_in_colddb(col: DBCol) -> std::io::Result<()> { if !col.is_in_colddb() { return Err(std::io::Error::other(Self::err_msg(col))); @@ -34,7 +34,7 @@ impl ColdDB { Ok(()) } - // Checks if the column is is the cold db and panics if not. + // Checks if the column is the cold db and panics if not. fn log_assert_is_in_colddb(col: DBCol) { log_assert!(col.is_in_colddb(), "{}", Self::err_msg(col)); } diff --git a/core/store/src/db/splitdb.rs b/core/store/src/db/splitdb.rs index a639d79a17c..75842a4ce19 100644 --- a/core/store/src/db/splitdb.rs +++ b/core/store/src/db/splitdb.rs @@ -255,7 +255,7 @@ mod test { let cold = create_cold(); let split = SplitDB::new(hot.clone(), cold.clone()); - // Block is a nice column for testing because is is a cold column but + // Block is a nice column for testing because is a cold column but // cold doesn't do anything funny to it. let col = DBCol::Block; diff --git a/core/store/src/flat/store_helper.rs b/core/store/src/flat/store_helper.rs index 3c4510634db..ea5b1c6f0da 100644 --- a/core/store/src/flat/store_helper.rs +++ b/core/store/src/flat/store_helper.rs @@ -232,7 +232,7 @@ pub fn set_flat_storage_status( /// Returns iterator over flat storage entries for a given shard and range of /// state keys. `None` means that there is no bound in respective direction. /// It reads data only from `FlatState` column which represents the state at -/// flat storage head. Reads only commited changes. +/// flat storage head. Reads only committed changes. pub fn iter_flat_state_entries<'a>( shard_uid: ShardUId, store: &'a Store, diff --git a/core/store/src/trie/mod.rs b/core/store/src/trie/mod.rs index 50219c409b8..6ab766cdc7f 100644 --- a/core/store/src/trie/mod.rs +++ b/core/store/src/trie/mod.rs @@ -1476,7 +1476,7 @@ impl Trie { { match &self.memtries { Some(memtries) => { - // If we have in-memory tries, use it to construct the the changes entirely (for + // If we have in-memory tries, use it to construct the changes entirely (for // both in-memory and on-disk updates) because it's much faster. let guard = memtries.read().unwrap(); let mut trie_update = guard.update(self.root, true)?; diff --git a/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs b/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs index faa5bf9cf2d..c860eff6428 100644 --- a/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs +++ b/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs @@ -304,7 +304,7 @@ fn test_transaction_nonce_too_large() { /// the requirement that the block must be in the same epoch as the next block after its accepted ancestor /// - test1 processes partial chunk responses for block 8 and 9 /// - check that test1 sends missing chunk requests for block 11 to 10+NUM_ORPHAN_ANCESTORS+CHECK, -/// since now they satisfy the the requirements for requesting chunks for orphans +/// since now they satisfy the requirements for requesting chunks for orphans /// - process the rest of blocks #[test] fn test_request_chunks_for_orphan() { diff --git a/pytest/lib/data.py b/pytest/lib/data.py index 1346b79838d..b0d03ea5ffc 100644 --- a/pytest/lib/data.py +++ b/pytest/lib/data.py @@ -41,7 +41,7 @@ def linear_regression(xs, ys): def compute_rate(timestamps): ''' Given a list of timestamps indicating the times - some event occured, returns the average rate at + some event occurred, returns the average rate at which the events happen. If the units of the timestamps are seconds, then the output units will be `events/s`. diff --git a/pytest/tests/sanity/rpc_finality.py b/pytest/tests/sanity/rpc_finality.py index 44c4284b567..3b21f0d6fcb 100644 --- a/pytest/tests/sanity/rpc_finality.py +++ b/pytest/tests/sanity/rpc_finality.py @@ -58,7 +58,7 @@ def test_finality(self): latest_block_hash) logger.info("About to send payment") # this transaction will be added to the block (probably around block 5) - # and the the receipts & transfers will happen in the next block (block 6). + # and the receipts & transfers will happen in the next block (block 6). # This function should return as soon as block 6 arrives in node0. logger.info(nodes[0].send_tx_rpc(tx, wait_until='INCLUDED', timeout=10)) logger.info("Done") diff --git a/runtime/near-vm-runner/src/instrument/stack_height/mod.rs b/runtime/near-vm-runner/src/instrument/stack_height/mod.rs index 434a33b26da..a0da4859330 100644 --- a/runtime/near-vm-runner/src/instrument/stack_height/mod.rs +++ b/runtime/near-vm-runner/src/instrument/stack_height/mod.rs @@ -86,7 +86,7 @@ macro_rules! instrument_call { mod max_height; mod thunk; -/// Error that occured during processing the module. +/// Error that occurred during processing the module. /// /// This means that the module is invalid. #[derive(Debug)] diff --git a/runtime/near-vm/compiler/src/error.rs b/runtime/near-vm/compiler/src/error.rs index 2572312baf0..1f5359d5278 100644 --- a/runtime/near-vm/compiler/src/error.rs +++ b/runtime/near-vm/compiler/src/error.rs @@ -10,11 +10,11 @@ use crate::lib::std::string::String; /// [compiler-error]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/CompileError #[derive(Debug, thiserror::Error)] pub enum CompileError { - /// A Wasm translation error occured. + /// A Wasm translation error occurred. #[error("WebAssembly translation error: {0}")] Wasm(WasmError), - /// A compilation error occured. + /// A compilation error occurred. #[error("Compilation error: {0}")] Codegen(String), diff --git a/runtime/near-vm/engine/src/error.rs b/runtime/near-vm/engine/src/error.rs index ebb498d2504..5145222e499 100644 --- a/runtime/near-vm/engine/src/error.rs +++ b/runtime/near-vm/engine/src/error.rs @@ -84,7 +84,7 @@ pub enum InstantiationError { #[error("module compiled with CPU feature that is missing from host")] CpuFeature(String), - /// A runtime error occured while invoking the start function + /// A runtime error occurred while invoking the start function #[error(transparent)] Start(RuntimeError), } diff --git a/runtime/near-vm/test-api/src/sys/instance.rs b/runtime/near-vm/test-api/src/sys/instance.rs index 1df6969d9d1..7bc779f7cb6 100644 --- a/runtime/near-vm/test-api/src/sys/instance.rs +++ b/runtime/near-vm/test-api/src/sys/instance.rs @@ -38,7 +38,7 @@ pub enum InstantiationError { #[error(transparent)] Link(LinkError), - /// A runtime error occured while invoking the start function + /// A runtime error occurred while invoking the start function #[error("could not invoke the start function: {0}")] Start(RuntimeError), diff --git a/runtime/near-vm/wast/src/error.rs b/runtime/near-vm/wast/src/error.rs index 53f0ab6d3c5..3c052aad198 100644 --- a/runtime/near-vm/wast/src/error.rs +++ b/runtime/near-vm/wast/src/error.rs @@ -15,7 +15,7 @@ pub struct DirectiveError { /// A structure holding the list of all executed directives #[derive(Error, Debug)] pub struct DirectiveErrors { - /// The filename where the error occured + /// The filename where the error occurred pub filename: String, /// The list of errors pub errors: Vec, diff --git a/runtime/runtime-params-estimator/README.md b/runtime/runtime-params-estimator/README.md index 83a37ec443a..4ac0232807c 100644 --- a/runtime/runtime-params-estimator/README.md +++ b/runtime/runtime-params-estimator/README.md @@ -91,7 +91,7 @@ done ``` When running these command, make sure to run with `sequential` and to disable -prefetching is disabled, or else the the replaying modes that match requests to +prefetching is disabled, or else the replaying modes that match requests to receipts will not work properly. ```js diff --git a/runtime/runtime/src/lib.rs b/runtime/runtime/src/lib.rs index 03d542fbb60..0bfea5a87a3 100644 --- a/runtime/runtime/src/lib.rs +++ b/runtime/runtime/src/lib.rs @@ -868,7 +868,7 @@ impl Runtime { gas_balance_refund = 0; } } else { - // Refund for the difference of the purchased gas price and the the current gas price. + // Refund for the difference of the purchased gas price and the current gas price. gas_balance_refund = safe_add_balance( gas_balance_refund, safe_gas_to_balance( diff --git a/tools/themis/src/rules.rs b/tools/themis/src/rules.rs index a3adccfc5b7..aa4ed461a95 100644 --- a/tools/themis/src/rules.rs +++ b/tools/themis/src/rules.rs @@ -256,7 +256,7 @@ pub fn publishable_has_license_file(workspace: &Workspace) -> anyhow::Result<()> const EXPECTED_LICENSE: &str = "MIT OR Apache-2.0"; -/// Ensure all non-private crates use the the same expected license +/// Ensure all non-private crates use the same expected license pub fn publishable_has_unified_license(workspace: &Workspace) -> anyhow::Result<()> { let outliers = workspace .members diff --git a/utils/near-cache/src/cell.rs b/utils/near-cache/src/cell.rs index d6f29bfe48c..783a0b6fc5f 100644 --- a/utils/near-cache/src/cell.rs +++ b/utils/near-cache/src/cell.rs @@ -19,7 +19,7 @@ where Self { inner: RefCell::new(LruCache::::new(cap)) } } - /// Returns the number of key-value pairs that are currently in the the cache. + /// Returns the number of key-value pairs that are currently in the cache. pub fn len(&self) -> usize { self.inner.borrow().len() } diff --git a/utils/near-cache/src/sync.rs b/utils/near-cache/src/sync.rs index 9728293c573..44b5cea1aed 100644 --- a/utils/near-cache/src/sync.rs +++ b/utils/near-cache/src/sync.rs @@ -19,7 +19,7 @@ where Self { inner: Mutex::new(LruCache::::new(cap)) } } - /// Returns the number of key-value pairs that are currently in the the cache. + /// Returns the number of key-value pairs that are currently in the cache. pub fn len(&self) -> usize { self.inner.lock().unwrap().len() }