diff --git a/api/src/config.rs b/api/src/config.rs index 7a40e0e4211..b2f93bca75e 100644 --- a/api/src/config.rs +++ b/api/src/config.rs @@ -2299,7 +2299,7 @@ mod tests { } #[test] - fn test_get_confg() { + fn test_get_config() { get_config("localdisk"); get_config("localfs"); get_config("oss"); @@ -2579,7 +2579,7 @@ mod tests { } #[test] - fn test_bckend_config_try_from() { + fn test_backend_config_try_from() { let config = BackendConfig { backend_type: "localdisk".to_string(), backend_config: serde_json::to_value(LocalDiskConfig::default()).unwrap(), diff --git a/builder/src/compact.rs b/builder/src/compact.rs index 743c7d669da..3ff27eeac69 100644 --- a/builder/src/compact.rs +++ b/builder/src/compact.rs @@ -53,7 +53,7 @@ pub struct Config { /// we compact blobs whose size are less than compact_blob_size #[serde(default = "default_compact_blob_size")] compact_blob_size: usize, - /// size of compacted blobs should not be large than max_compact_size + /// size of compacted blobs should not be larger than max_compact_size #[serde(default = "default_max_compact_size")] max_compact_size: usize, /// if number of blobs >= layers_to_compact, do compact @@ -642,7 +642,7 @@ impl BlobCompactor { return Ok(None); } - info!("compatctor: successfully compacted blob"); + info!("compactor: successfully compacted blob"); // blobs have already been dumped, dump bootstrap only let blob_table = compactor.new_blob_mgr.to_blob_table(&build_ctx)?; bootstrap.build(&mut build_ctx, &mut bootstrap_ctx)?; diff --git a/builder/src/merge.rs b/builder/src/merge.rs index 917c8860159..25ba20c07e9 100644 --- a/builder/src/merge.rs +++ b/builder/src/merge.rs @@ -409,7 +409,7 @@ mod tests { ); assert!(build_output.is_ok()); let build_output = build_output.unwrap(); - println!("BuildOutpu: {}", build_output); + println!("BuildOutput: {}", build_output); assert_eq!(build_output.blob_size, Some(16)); } } diff --git a/rafs/src/blobfs/mod.rs b/rafs/src/blobfs/mod.rs index d88ab67748f..fa5a3c19047 100644 --- a/rafs/src/blobfs/mod.rs +++ b/rafs/src/blobfs/mod.rs @@ -167,7 +167,7 @@ impl BlobFs { fn load_bootstrap(cfg: &Config) -> io::Result { let blob_ondemand_conf = BlobOndemandConfig::from_str(&cfg.blob_ondemand_cfg)?; if !blob_ondemand_conf.rafs_conf.validate() { - return Err(einval!("blobfs: invlidate configuration for blobfs")); + return Err(einval!("blobfs: invalidate configuration for blobfs")); } let rafs_cfg = blob_ondemand_conf.rafs_conf.get_rafs_config()?; if rafs_cfg.mode != "direct" { diff --git a/rafs/src/fs.rs b/rafs/src/fs.rs index a3119477c3d..280b946516b 100644 --- a/rafs/src/fs.rs +++ b/rafs/src/fs.rs @@ -434,7 +434,7 @@ impl Rafs { } // Perform different policy for v5 format and v6 format as rafs v6's blobs are capable to - // to download chunks and decompress them all by themselves. For rafs v6, directly perform + // download chunks and decompress them all by themselves. For rafs v6, directly perform // chunk based full prefetch if !ignore_prefetch_all && (inlay_prefetch_all || prefetch_all || startup_prefetch_all) { if sb.meta.is_v6() { diff --git a/rafs/src/metadata/mod.rs b/rafs/src/metadata/mod.rs index 9136bbbed66..27f20fb448f 100644 --- a/rafs/src/metadata/mod.rs +++ b/rafs/src/metadata/mod.rs @@ -792,7 +792,7 @@ impl RafsSuper { // Backward compatibility: try to fix blob id for old converters. // Old converters extracts bootstraps from data blobs with inlined bootstrap // use blob digest as the bootstrap file name. The last blob in the blob table from - // the bootstrap has wrong blod id, so we need to fix it. + // the bootstrap has wrong blob id, so we need to fix it. let blobs = rs.superblock.get_blob_infos(); for blob in blobs.iter() { // Fix blob id for new images with old converters. diff --git a/service/src/block_nbd.rs b/service/src/block_nbd.rs index 9e71fb4de75..4bb6a746d72 100644 --- a/service/src/block_nbd.rs +++ b/service/src/block_nbd.rs @@ -105,7 +105,7 @@ impl NbdService { }) } - /// Create a [NbdWoker] to run the event loop to handle NBD requests from kernel. + /// Create a [NbdWorker] to run the event loop to handle NBD requests from kernel. pub fn create_worker(&self) -> Result { // Let the NBD driver go. let (sock1, sock2) = std::os::unix::net::UnixStream::pair()?; diff --git a/service/src/fs_service.rs b/service/src/fs_service.rs index b37128e6f14..f260cf364ad 100644 --- a/service/src/fs_service.rs +++ b/service/src/fs_service.rs @@ -222,7 +222,7 @@ pub trait FsService: Send + Sync { /// Validate prefetch file list from user input. /// /// Validation rules: -/// - an item may be file or directroy. +/// - an item may be file or directory. /// - items must be separated by space, such as " ". /// - each item must be absolute path, such as "/foo1/bar1 /foo2/bar2". fn validate_prefetch_file_list(input: &Option>) -> Result>> { diff --git a/src/bin/nydus-image/inspect.rs b/src/bin/nydus-image/inspect.rs index 06af8cbaecf..0a0e720f72d 100644 --- a/src/bin/nydus-image/inspect.rs +++ b/src/bin/nydus-image/inspect.rs @@ -204,7 +204,7 @@ impl RafsInspector { } // Walk through children inodes to find the file - // Print its basic information and all chunk infomation + // Print its basic information and all chunk information let dir_inode = self.rafs_meta.get_extended_inode(self.cur_dir_ino, false)?; dir_inode.walk_children_inodes(0, &mut |_inode, child_name, child_ino, _offset| { if child_name == file_name { diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index 1b9a89037f8..de4e1a7e276 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -832,9 +832,9 @@ impl Command { let chunk_size = Self::get_chunk_size(matches, conversion_type)?; let batch_size = Self::get_batch_size(matches, version, conversion_type, chunk_size)?; let blob_cache_storage = Self::get_blob_cache_storage(matches, conversion_type)?; - // blob-cacher-dir and blob-dir/blob are a set of mutually exclusive functions, + // blob-cache-dir and blob-dir/blob are a set of mutually exclusive functions, // the former is used to generate blob cache, nydusd is directly started through blob cache, - // the latter is to generate nydus blob, as nydud backend to start + // the latter is to generate nydus blob, as nydusd backend to start let blob_storage = if blob_cache_storage.is_some() { None } else { diff --git a/src/bin/nydusd/api_server_glue.rs b/src/bin/nydusd/api_server_glue.rs index b1cd72f7c8f..bd1b6510dfa 100644 --- a/src/bin/nydusd/api_server_glue.rs +++ b/src/bin/nydusd/api_server_glue.rs @@ -118,7 +118,7 @@ impl ApiServer { } /// External supervisor wants this instance to fetch `/dev/fuse` fd. Before - /// invoking this method, supervisor should already listens on a Unix socket and + /// invoking this method, supervisor should already listen on a Unix socket and /// waits for connection from this instance. Then supervisor should send the *fd* /// back. Note, the http response does not mean this process already finishes Takeover /// procedure. Supervisor has to continuously query the state of Nydusd until it gets diff --git a/src/logger.rs b/src/logger.rs index e78a1d91caa..b82dc686b6d 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -117,7 +117,7 @@ pub fn setup_logging( })?; spec = spec.basename(basename); - // `flexi_logger` automatically add `.log` suffix if the file name has not extension. + // `flexi_logger` automatically add `.log` suffix if the file name has no extension. if let Some(suffix) = path.extension() { let suffix = suffix.to_str().ok_or_else(|| { eprintln!("invalid file extension {:?}", suffix); diff --git a/storage/src/backend/mod.rs b/storage/src/backend/mod.rs index aec8db9de06..155fcecf8d5 100644 --- a/storage/src/backend/mod.rs +++ b/storage/src/backend/mod.rs @@ -213,7 +213,7 @@ pub trait BlobBackend: Send + Sync { /// Get metrics object. fn metrics(&self) -> &BackendMetrics; - /// Get a blob reader object to access blod `blob_id`. + /// Get a blob reader object to access blob `blob_id`. fn get_reader(&self, blob_id: &str) -> BackendResult>; } diff --git a/storage/src/backend/registry.rs b/storage/src/backend/registry.rs index a9947f49efb..4e1ac4a9035 100644 --- a/storage/src/backend/registry.rs +++ b/storage/src/backend/registry.rs @@ -299,7 +299,7 @@ impl RegistryState { form.insert("scope".to_string(), auth.scope.clone()); form.insert("grant_type".to_string(), "password".to_string()); form.insert("username".to_string(), self.username.clone()); - form.insert("passward".to_string(), self.password.clone()); + form.insert("password".to_string(), self.password.clone()); form.insert("client_id".to_string(), REGISTRY_CLIENT_ID.to_string()); let token_resp = connection @@ -1089,7 +1089,7 @@ mod tests { assert_eq!(&auth.service, "my-registry.com"); assert_eq!(&auth.scope, "repository:test/repo:pull,push"); } - _ => panic!("failed to pase `Bearer` authentication header"), + _ => panic!("failed to parse `Bearer` authentication header"), } let str = "Basic realm=\"https://auth.my-registry.com/token\""; @@ -1097,7 +1097,7 @@ mod tests { let auth = RegistryState::parse_auth(&header).unwrap(); match auth { Auth::Basic(auth) => assert_eq!(&auth.realm, "https://auth.my-registry.com/token"), - _ => panic!("failed to pase `Bearer` authentication header"), + _ => panic!("failed to parse `Bearer` authentication header"), } let str = "Base realm=\"https://auth.my-registry.com/token\""; diff --git a/storage/src/cache/dummycache.rs b/storage/src/cache/dummycache.rs index 7a0465f36e2..3fc4fcf9c72 100644 --- a/storage/src/cache/dummycache.rs +++ b/storage/src/cache/dummycache.rs @@ -180,7 +180,7 @@ pub struct DummyCacheMgr { } impl DummyCacheMgr { - /// Create a new instance of `DummmyCacheMgr`. + /// Create a new instance of `DummyCacheMgr`. pub fn new( config: &CacheConfigV2, backend: Arc, diff --git a/storage/src/cache/state/digested_chunk_map.rs b/storage/src/cache/state/digested_chunk_map.rs index 887bf2d9554..30df1e147af 100644 --- a/storage/src/cache/state/digested_chunk_map.rs +++ b/storage/src/cache/state/digested_chunk_map.rs @@ -7,7 +7,7 @@ //! //! This module provides a chunk state tracking driver for legacy Rafs images without chunk array, //! which uses chunk digest as id to track chunk readiness state. The [DigestedChunkMap] is not -//! optimal in case of performance and memory consumption. So it is only used only to keep backward +//! optimal in case of performance and memory consumption. So it is only used to keep backward /// compatibility with the old nydus image format. use std::collections::HashSet; use std::io::Result; diff --git a/storage/src/cache/state/mod.rs b/storage/src/cache/state/mod.rs index e02559de93e..8cdac7574f9 100644 --- a/storage/src/cache/state/mod.rs +++ b/storage/src/cache/state/mod.rs @@ -74,7 +74,7 @@ pub trait ChunkMap: Any + Send + Sync { /// /// The function returns: /// - `Err(Timeout)` waiting for inflight backend IO timeouts. - /// - `Ok(true)` if the the chunk is ready. + /// - `Ok(true)` if the chunk is ready. /// - `Ok(false)` marks the chunk as pending, either set_ready_and_clear_pending() or /// clear_pending() must be called to clear the pending state. fn check_ready_and_mark_pending(&self, _chunk: &dyn BlobChunkInfo) -> StorageResult { diff --git a/storage/src/device.rs b/storage/src/device.rs index 0800f1c59c3..cdefa5f77b8 100644 --- a/storage/src/device.rs +++ b/storage/src/device.rs @@ -787,7 +787,7 @@ pub struct BlobIoVec { bi_blob: Arc, /// Total size of blob IOs to be performed. bi_size: u64, - /// Array of blob IOs, these IOs should executed sequentially. + /// Array of blob IOs, these IOs should be executed sequentially. pub(crate) bi_vec: Vec, } @@ -829,7 +829,7 @@ impl BlobIoVec { self.bi_vec.len() } - /// Check whether there's 'BlobIoDesc' in the'BlobIoVec'. + /// Check whether there's 'BlobIoDesc' in the 'BlobIoVec'. pub fn is_empty(&self) -> bool { self.bi_vec.is_empty() } diff --git a/storage/src/lib.rs b/storage/src/lib.rs index aae6deaea01..1795c176e1f 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -9,7 +9,7 @@ //! storage containing data chunks. Data chunks may be compressed, encrypted and deduplicated by //! content digest value. When Rafs file is used for container images, Rafs metadata blob contains //! all filesystem metadatas, such as directory, file name, permission etc. Actually file contents -//! are split into chunks and stored into data blobs. Rafs may built one data blob for each +//! are split into chunks and stored into data blobs. Rafs may build one data blob for each //! container image layer or build a single data blob for the whole image, according to building //! options. //! diff --git a/storage/src/meta/batch.rs b/storage/src/meta/batch.rs index 4f3e40ffe43..828849a9291 100644 --- a/storage/src/meta/batch.rs +++ b/storage/src/meta/batch.rs @@ -75,7 +75,7 @@ impl BatchContextGenerator { self.chunk_data_buf.is_empty() } - /// Get the lenth of chunk data buffer. + /// Get the length of chunk data buffer. pub fn chunk_data_buf_len(&self) -> usize { self.chunk_data_buf.len() } diff --git a/storage/src/meta/mod.rs b/storage/src/meta/mod.rs index 77bea8f1053..ef892b56b97 100644 --- a/storage/src/meta/mod.rs +++ b/storage/src/meta/mod.rs @@ -82,7 +82,7 @@ const BLOB_TOC_FILE_SUFFIX: &str = "blob.toc"; /// and can be used as marker to locate the compression context table. All fields of compression /// context table header should be encoded in little-endian format. /// -/// The compression context table and header are arranged in the data blob as follow: +/// The compression context table and header are arranged in the data blob as follows: /// /// `chunk data` | `compression context table` | `[ZRan context table | ZRan dictionary]` | `compression context table header` #[repr(C)] @@ -705,7 +705,7 @@ impl BlobCompressionContextInfo { } /// Get compressed size associated with the chunk at `chunk_index`. - /// Capabale of handling both batch and non-batch chunks. + /// Capable of handling both batch and non-batch chunks. pub fn get_compressed_size(&self, chunk_index: u32) -> Result { self.state.get_compressed_size(chunk_index as usize) } @@ -1012,7 +1012,7 @@ impl BlobCompressionContext { } /// Get compressed size associated with the chunk at `chunk_index`. - /// Capabale of handling both batch and non-batch chunks. + /// Capable of handling both batch and non-batch chunks. pub fn get_compressed_size(&self, chunk_index: usize) -> Result { if self.is_batch_chunk(chunk_index) { let ctx = self @@ -1379,7 +1379,7 @@ impl BlobMetaChunkArray { // - `mid < size`: `mid` is limited by `[left; right)` bound. let entry = &chunks[mid]; if compressed { - // Capabale of handling both batch and non-batch chunks. + // Capable of handling both batch and non-batch chunks. let c_offset = entry.compressed_offset(); let c_size = state.get_compressed_size(mid)?; (start, end) = (c_offset, c_offset + c_size as u64); diff --git a/storage/src/utils.rs b/storage/src/utils.rs index ba040971b5c..726ad921cf4 100644 --- a/storage/src/utils.rs +++ b/storage/src/utils.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -//! Utility helpers to supprt the storage subsystem. +//! Utility helpers to support the storage subsystem. use std::alloc::{alloc, Layout}; use std::cmp::{self, min}; use std::io::{ErrorKind, IoSliceMut, Result}; diff --git a/utils/src/compress/lz4_standard.rs b/utils/src/compress/lz4_standard.rs index 723962f22a3..bd4e51d940f 100644 --- a/utils/src/compress/lz4_standard.rs +++ b/utils/src/compress/lz4_standard.rs @@ -66,8 +66,8 @@ mod tests { #[test] fn test_error_input() { let mut big_buf = vec![0x0u8; u32::MAX as usize]; - let mock_comperessed = vec![0x0u8; 32]; + let mock_compressed = vec![0x0u8; 32]; assert!(lz4_compress(&big_buf).is_err()); - assert!(lz4_decompress(&mock_comperessed, big_buf.as_mut_slice()).is_err()); + assert!(lz4_decompress(&mock_compressed, big_buf.as_mut_slice()).is_err()); } } diff --git a/utils/src/crypt.rs b/utils/src/crypt.rs index a5646ce61d3..f37b40670fe 100644 --- a/utils/src/crypt.rs +++ b/utils/src/crypt.rs @@ -233,7 +233,7 @@ impl Cipher { .map_err(|e| eother!(format!("failed to encrypt data, {}", e))) } Cipher::Aes256Gcm(_cipher) => { - Err(einval!("Cipher::entrypt() doesn't support Aes256Gcm")) + Err(einval!("Cipher::encrypt() doesn't support Aes256Gcm")) } } } @@ -247,7 +247,7 @@ impl Cipher { Cipher::Aes256Xts(cipher) => Self::cipher(*cipher, symm::Mode::Decrypt, key, iv, data) .map_err(|e| eother!(format!("failed to decrypt data, {}", e))), Cipher::Aes256Gcm(_cipher) => { - Err(einval!("Cipher::detrypt() doesn't support Aes256Gcm")) + Err(einval!("Cipher::decrypt() doesn't support Aes256Gcm")) } }?; @@ -751,7 +751,7 @@ mod tests { CipherContext::new(error_key.to_vec(), iv.to_vec(), true, Algorithm::Aes128Xts) .is_err() ); - // create wtih symmetry key + // create with symmetry key assert!(CipherContext::new( symmetry_key.to_vec(), iv.to_vec(), diff --git a/utils/src/digest.rs b/utils/src/digest.rs index 99d28d0ffed..26a6997a530 100644 --- a/utils/src/digest.rs +++ b/utils/src/digest.rs @@ -88,7 +88,7 @@ pub trait DigestHasher { /// So we should avoid any unnecessary clone() operation. Add we prefer allocation on stack /// instead of allocation on heap. /// -/// If allocating memory for blake3::Hahser is preferred over using the stack, please try: +/// If allocating memory for blake3::Hasher is preferred over using the stack, please try: /// Blake3(Box). But be careful, this will cause one extra memory allocation/free /// for each digest. #[derive(Clone, Debug)] diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 4785e36a9f4..9a1c4d1f901 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -166,7 +166,7 @@ mod tests { } #[test] - fn test_round_up_uszie() { + fn test_round_up_usize() { assert_eq!(round_up_usize(10, 8), 16); assert_eq!(round_up_usize(100, 8), 104); assert_eq!(round_up_usize(1000, 8), 1000);