Skip to content

Commit

Permalink
chore: Make rust 1.80 clippy happy (#4927)
Browse files Browse the repository at this point in the history
Signed-off-by: Xuanwo <github@xuanwo.io>
  • Loading branch information
Xuanwo committed Jul 26, 2024
1 parent f859bce commit 8f96fa0
Show file tree
Hide file tree
Showing 11 changed files with 25 additions and 27 deletions.
16 changes: 8 additions & 8 deletions .github/workflows/docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ on:
branches:
- main
tags:
- 'v*'
- "v*"
pull_request:
branches:
- main
Expand All @@ -40,8 +40,8 @@ concurrency:
env:
# This same toolchain for rust 1.79.0 but nightly so we can use new features.
RUST_DOC_TOOLCHAIN: nightly-2024-06-13
# Enable cfg docs to make sure docs are built.
RUSTDOCFLAGS: "--cfg docs"
# Enable cfg docsrs to make sure docs are built.
RUSTDOCFLAGS: "--cfg docsrs"

jobs:
build-rust-doc:
Expand All @@ -64,7 +64,7 @@ jobs:
- uses: actions/setup-java@v4
with:
distribution: temurin
java-version: '17'
java-version: "17"

- name: Build OpenDAL doc
working-directory: core
Expand All @@ -87,7 +87,7 @@ jobs:
- uses: actions/setup-java@v4
with:
distribution: temurin
java-version: '17'
java-version: "17"

- name: Build and test
working-directory: bindings/java
Expand All @@ -111,7 +111,7 @@ jobs:

- uses: actions/setup-node@v4
with:
node-version: '18'
node-version: "18"
cache: pnpm
cache-dependency-path: "bindings/nodejs/pnpm-lock.yaml"
- name: Corepack
Expand Down Expand Up @@ -144,7 +144,7 @@ jobs:

- uses: actions/setup-python@v5
with:
python-version: '3.11'
python-version: "3.11"
- name: Setup Rust toolchain
uses: ./.github/actions/setup

Expand Down Expand Up @@ -435,7 +435,7 @@ jobs:

- uses: actions/setup-node@v4
with:
node-version: '18'
node-version: "18"
cache: pnpm
cache-dependency-path: "website/pnpm-lock.yaml"

Expand Down
1 change: 0 additions & 1 deletion core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ version = "0.47.3"

[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docs"]

[workspace]
default-members = ["."]
Expand Down
2 changes: 1 addition & 1 deletion core/src/layers/blocking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ use crate::*;
/// In a pure blocking context, we can create a runtime and use it to create the `BlockingLayer`.
///
/// > The following code uses a global statically created runtime as an example, please manage the
/// runtime on demand.
/// > runtime on demand.
///
/// ```rust,no_run
/// use once_cell::sync::Lazy;
Expand Down
4 changes: 2 additions & 2 deletions core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/apache/opendal/main/website/static/img/logo.svg"
)]
#![cfg_attr(docs, feature(doc_auto_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]

//! Apache OpenDAL™ is a data access layer that allows users to easily and
//! efficiently retrieve data from various storage services in a unified way.
Expand Down Expand Up @@ -132,7 +132,7 @@ mod types;
pub use types::*;

// Public modules, they will be accessed like `opendal::layers::Xxxx`
#[cfg(docs)]
#[cfg(docsrs)]
pub mod docs;
pub mod layers;
pub mod raw;
Expand Down
2 changes: 1 addition & 1 deletion core/src/raw/oio/write/multipart_write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ pub trait MultipartWrite: Send + Sync + Unpin + 'static {
///
/// - the total size of data is unknown.
/// - the total size of data is known, but the size of current write
/// is less then the total size.
/// is less then the total size.
fn initiate_part(&self) -> impl Future<Output = Result<String>> + MaybeSend;

/// write_part will write a part of the data and returns the result
Expand Down
2 changes: 1 addition & 1 deletion core/src/services/fs/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ impl FsBuilder {
/// # Notes
///
/// - When append is enabled, we will not use atomic write
/// to avoid data loss and performance issue.
/// to avoid data loss and performance issue.
pub fn atomic_write_dir(mut self, dir: &str) -> Self {
if !dir.is_empty() {
self.config.atomic_write_dir = Some(dir.to_string());
Expand Down
2 changes: 1 addition & 1 deletion core/src/services/gdrive/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ impl GdriveBuilder {
///
/// - An access token is valid for 1 hour.
/// - If you want to use the access token for a long time,
/// you can use the refresh token to get a new access token.
/// you can use the refresh token to get a new access token.
pub fn access_token(mut self, access_token: &str) -> Self {
self.config.access_token = Some(access_token.to_string());
self
Expand Down
2 changes: 1 addition & 1 deletion core/src/services/github/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ impl GithubCore {

let mut req_body = CreateOrUpdateContentsRequest {
message: format!("Write {} at {} via opendal", path, chrono::Local::now()),
content: base64::engine::general_purpose::STANDARD.encode(&bs.to_bytes()),
content: base64::engine::general_purpose::STANDARD.encode(bs.to_bytes()),
sha: None,
};

Expand Down
2 changes: 1 addition & 1 deletion core/src/services/hdfs/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ impl HdfsBuilder {
/// # Notes
///
/// - When append is enabled, we will not use atomic write
/// to avoid data loss and performance issue.
/// to avoid data loss and performance issue.
pub fn atomic_write_dir(mut self, dir: &str) -> Self {
self.config.atomic_write_dir = if dir.is_empty() {
None
Expand Down
17 changes: 8 additions & 9 deletions core/src/services/s3/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,13 +147,13 @@ pub struct S3Config {
/// server_side_encryption_aws_kms_key_id for this backend
///
/// - If `server_side_encryption` set to `aws:kms`, and `server_side_encryption_aws_kms_key_id`
/// is not set, S3 will use aws managed kms key to encrypt data.
/// is not set, S3 will use aws managed kms key to encrypt data.
/// - If `server_side_encryption` set to `aws:kms`, and `server_side_encryption_aws_kms_key_id`
/// is a valid kms key id, S3 will use the provided kms key to encrypt data.
/// is a valid kms key id, S3 will use the provided kms key to encrypt data.
/// - If the `server_side_encryption_aws_kms_key_id` is invalid or not found, an error will be
/// returned.
/// returned.
/// - If `server_side_encryption` is not `aws:kms`, setting `server_side_encryption_aws_kms_key_id`
/// is a noop.
/// is a noop.
pub server_side_encryption_aws_kms_key_id: Option<String>,
/// server_side_encryption_customer_algorithm for this backend.
///
Expand Down Expand Up @@ -398,13 +398,12 @@ impl S3Builder {
/// Set server_side_encryption_aws_kms_key_id for this backend
///
/// - If `server_side_encryption` set to `aws:kms`, and `server_side_encryption_aws_kms_key_id`
/// is not set, S3 will use aws managed kms key to encrypt data.
/// is not set, S3 will use aws managed kms key to encrypt data.
/// - If `server_side_encryption` set to `aws:kms`, and `server_side_encryption_aws_kms_key_id`
/// is a valid kms key id, S3 will use the provided kms key to encrypt data.
/// is a valid kms key id, S3 will use the provided kms key to encrypt data.
/// - If the `server_side_encryption_aws_kms_key_id` is invalid or not found, an error will be
/// returned.
/// - If `server_side_encryption` is not `aws:kms`, setting `server_side_encryption_aws_kms_key_id`
/// is a noop.
/// returned.
/// - If `server_side_encryption` is not `aws:kms`, setting `server_side_encryption_aws_kms_key_id` is a noop.
///
/// # Note
///
Expand Down
2 changes: 1 addition & 1 deletion core/src/types/read/buffer_stream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ impl oio::Read for ChunkedReader {
/// The underlying reader is either a StreamingReader or a ChunkedReader.
///
/// - If chunk is None, BufferStream will use StreamingReader to iterate
/// data in streaming way.
/// data in streaming way.
/// - Otherwise, BufferStream will use ChunkedReader to read data in chunks.
pub struct BufferStream {
state: State,
Expand Down

0 comments on commit 8f96fa0

Please sign in to comment.