Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Exit while cache is not configured correctly #1515

Merged
merged 9 commits into from
Dec 30, 2022
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
164 changes: 81 additions & 83 deletions src/cache/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,16 @@ use crate::cache::memcached::MemcachedCache;
use crate::cache::redis::RedisCache;
#[cfg(feature = "s3")]
use crate::cache::s3::S3Cache;
use crate::config::{self, CacheType, Config};
use crate::config::Config;
#[cfg(any(
feature = "azure",
feature = "gcs",
feature = "gha",
feature = "memcached",
feature = "redis",
feature = "s3"
))]
use crate::config::{self, CacheType};
use std::fmt;
use std::fs;
use std::io::{self, Cursor, Read, Seek, Write};
Expand Down Expand Up @@ -326,7 +335,7 @@ pub trait Storage: Send + Sync {
}

/// Implement storage for operator.
#[cfg(any(feature = "s3", feature = "azure", feature = "gcs"))]
#[cfg(any(feature = "s3", feature = "azure", feature = "gcs", feature = "redis"))]
#[async_trait]
impl Storage for opendal::Operator {
async fn get(&self, key: &str) -> Result<Cache> {
Expand Down Expand Up @@ -373,30 +382,31 @@ impl Storage for opendal::Operator {
}

/// Normalize key `abcdef` into `a/b/c/abcdef`
#[allow(dead_code)]
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could it be removed altogether?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This function will be used if feature like s3, azure enabled. Adding a dead_code is the most simple way to address clippy errors under --no-default-feature.

fn normalize_key(key: &str) -> String {
format!("{}/{}/{}/{}", &key[0..1], &key[1..2], &key[2..3], &key)
}

/// Get a suitable `Storage` implementation from configuration.
#[allow(clippy::cognitive_complexity)] // TODO simplify!
pub fn storage_from_config(config: &Config, pool: &tokio::runtime::Handle) -> Arc<dyn Storage> {
for cache_type in config.caches.iter() {
match *cache_type {
pub fn storage_from_config(
config: &Config,
pool: &tokio::runtime::Handle,
) -> Result<Arc<dyn Storage>> {
if let Some(cache_type) = &config.cache {
match cache_type {
#[cfg(feature = "azure")]
CacheType::Azure(config::AzureCacheConfig {
ref connection_string,
ref container,
ref key_prefix,
}) => {
debug!("Trying Azure Blob Store account({})", key_prefix);
#[cfg(feature = "azure")]
match AzureBlobCache::build(connection_string, container, key_prefix) {
Ok(storage) => {
trace!("Using AzureBlobCache");
return Arc::new(storage);
}
Err(e) => warn!("Failed to create Azure cache: {:?}", e),
}
debug!("Init azure cache with container {container}, key_prefix {key_prefix}");
let storage = AzureBlobCache::build(connection_string, container, key_prefix)
.map_err(|err| anyhow!("create azure cache failed: {err:?}"))?;
return Ok(Arc::new(storage));
}
#[cfg(feature = "gcs")]
CacheType::GCS(config::GCSCacheConfig {
ref bucket,
ref key_prefix,
Expand All @@ -405,99 +415,87 @@ pub fn storage_from_config(config: &Config, pool: &tokio::runtime::Handle) -> Ar
ref service_account,
ref credential_url,
}) => {
debug!(
"Trying GCS bucket({}, {}, {:?})",
bucket, key_prefix, rw_mode
);
#[cfg(feature = "gcs")]
{
let gcs_read_write_mode = match rw_mode {
config::GCSCacheRWMode::ReadOnly => RWMode::ReadOnly,
config::GCSCacheRWMode::ReadWrite => RWMode::ReadWrite,
};

match GCSCache::build(
bucket,
key_prefix,
cred_path.as_deref(),
service_account.as_deref(),
gcs_read_write_mode,
credential_url.as_deref(),
) {
Ok(s) => {
trace!("Using GCSCache");
return Arc::new(s);
}
Err(e) => warn!("Failed to create GCS Cache: {:?}", e),
}
}
debug!("Init gcs cache with bucket {bucket}, key_prefix {key_prefix}");

let gcs_read_write_mode = match rw_mode {
config::GCSCacheRWMode::ReadOnly => RWMode::ReadOnly,
config::GCSCacheRWMode::ReadWrite => RWMode::ReadWrite,
};

let storage = GCSCache::build(
bucket,
key_prefix,
cred_path.as_deref(),
service_account.as_deref(),
gcs_read_write_mode,
credential_url.as_deref(),
)
.map_err(|err| anyhow!("create gcs cache failed: {err:?}"))?;

return Ok(Arc::new(storage));
}
#[cfg(feature = "gha")]
CacheType::GHA(config::GHACacheConfig {
ref url,
ref token,
ref cache_to,
ref cache_from,
}) => {
debug!(
"Trying GHA Cache ({url}, {}***, {cache_to:?}, {cache_from:?})",
&token[..usize::min(3, token.len())]
);
#[cfg(feature = "gha")]
match GHACache::new(url, token, cache_to.clone(), cache_from.clone()) {
Ok(s) => {
trace!("Using GHA Cache: {}", url);
return Arc::new(s);
}
Err(e) => warn!("Failed to create GHA Cache: {:?}", e),
}
debug!("Init gha cache with url {url}");

let storage = GHACache::new(url, token, cache_to.clone(), cache_from.clone())
.map_err(|err| anyhow!("create gha cache failed: {err:?}"))?;
return Ok(Arc::new(storage));
}
#[cfg(feature = "memcached")]
CacheType::Memcached(config::MemcachedCacheConfig { ref url }) => {
debug!("Trying Memcached({})", url);
#[cfg(feature = "memcached")]
match MemcachedCache::new(url, pool) {
Ok(s) => {
trace!("Using Memcached: {}", url);
return Arc::new(s);
}
Err(e) => warn!("Failed to create MemcachedCache: {:?}", e),
}
debug!("Init memcached cache with url {url}");

let storage = MemcachedCache::new(url, pool)
.map_err(|err| anyhow!("create memcached cache failed: {err:?}"))?;
return Ok(Arc::new(storage));
}
#[cfg(feature = "redis")]
CacheType::Redis(config::RedisCacheConfig { ref url }) => {
debug!("Trying Redis({})", url);
#[cfg(feature = "redis")]
match RedisCache::build(url) {
Ok(s) => {
trace!("Using Redis: {}", url);
return Arc::new(s);
}
Err(e) => warn!("Failed to create RedisCache: {:?}", e),
}
debug!("Init redis cache with url {url}");
let storage = RedisCache::build(url)
.map_err(|err| anyhow!("create redis cache failed: {err:?}"))?;
return Ok(Arc::new(storage));
}
#[cfg(feature = "s3")]
CacheType::S3(ref c) => {
debug!("Trying S3Cache({:?})", c);
#[cfg(feature = "s3")]
match S3Cache::build(
debug!(
"Init s3 cache with bucket {}, endpoint {:?}",
c.bucket, c.endpoint
);

let storage = S3Cache::build(
&c.bucket,
c.region.as_deref(),
&c.key_prefix,
c.no_credentials,
c.endpoint.as_deref(),
c.use_ssl,
) {
Ok(s) => {
trace!("Using S3Cache");
return Arc::new(s);
}
Err(e) => warn!("Failed to create S3Cache: {:?}", e),
}
)
.map_err(|err| anyhow!("create s3 cache failed: {err:?}"))?;

return Ok(Arc::new(storage));
}
#[cfg(not(any(
feature = "azure",
feature = "gcs",
feature = "gha",
feature = "memcached",
feature = "redis",
feature = "s3"
)))]
_ => return Err(anyhow!("cache type is not enabled")),
Xuanwo marked this conversation as resolved.
Show resolved Hide resolved
}
}

info!("No configured caches successful, falling back to default");
let (dir, size) = (&config.fallback_cache.dir, config.fallback_cache.size);
trace!("Using DiskCache({:?}, {})", dir, size);
Arc::new(DiskCache::new(&dir, size, pool))
debug!("Init disk cache with dir {:?}, size {}", dir, size);
Ok(Arc::new(DiskCache::new(&dir, size, pool)))
}

#[cfg(test)]
Expand Down
43 changes: 16 additions & 27 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -244,9 +244,9 @@ pub struct CacheConfigs {
}

impl CacheConfigs {
/// Return a vec of the available cache types in an arbitrary but
/// Return cache type in an arbitrary but
/// consistent ordering
fn into_vec_and_fallback(self) -> (Vec<CacheType>, DiskCacheConfig) {
fn into_fallback(self) -> (Option<CacheType>, DiskCacheConfig) {
let CacheConfigs {
azure,
disk,
Expand All @@ -257,18 +257,17 @@ impl CacheConfigs {
s3,
} = self;

let caches = s3
let cache_type = s3
.map(CacheType::S3)
.into_iter()
.chain(redis.map(CacheType::Redis))
.chain(memcached.map(CacheType::Memcached))
.chain(gcs.map(CacheType::GCS))
.chain(gha.map(CacheType::GHA))
.chain(azure.map(CacheType::Azure))
.collect();
.or_else(|| redis.map(CacheType::Redis))
.or_else(|| memcached.map(CacheType::Memcached))
.or_else(|| gcs.map(CacheType::GCS))
.or_else(|| gha.map(CacheType::GHA))
.or_else(|| azure.map(CacheType::Azure));

let fallback = disk.unwrap_or_default();

(caches, fallback)
(cache_type, fallback)
}

/// Override self with any existing fields from other
Expand Down Expand Up @@ -666,7 +665,7 @@ fn config_file(env_var: &str, leaf: &str) -> PathBuf {

#[derive(Debug, Default, PartialEq, Eq)]
pub struct Config {
pub caches: Vec<CacheType>,
pub cache: Option<CacheType>,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Was this used anywhere before? Did multiple backends ever work?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did multiple backends ever work?

No, they never works at the same time.

The only case is users configure different backends at the same time, and some of them are wrong. For example, users configure s3, azure, redis at the same time, and s3, azure is invalid, only redis works.

In old logic, the errors returned by s3 and azure will be ignore, and using redis finally. In this PR, we will only choose the first valid cache type and returns error happened which makes the behavior more predictable.

In the future, I will return errors if users configured different backends at the same time.

pub fallback_cache: DiskCacheConfig,
pub dist: DistConfig,
pub server_startup_timeout: Option<std::time::Duration>,
Expand Down Expand Up @@ -700,9 +699,9 @@ impl Config {
let EnvConfig { cache } = env_conf;
conf_caches.merge(cache);

let (caches, fallback_cache) = conf_caches.into_vec_and_fallback();
let (caches, fallback_cache) = conf_caches.into_fallback();
Self {
caches,
cache: caches,
fallback_cache,
dist,
server_startup_timeout,
Expand Down Expand Up @@ -990,19 +989,9 @@ fn config_overrides() {
assert_eq!(
Config::from_env_and_file_configs(env_conf, file_conf),
Config {
caches: vec![
CacheType::Redis(RedisCacheConfig {
url: "myotherredisurl".to_owned()
}),
CacheType::Memcached(MemcachedCacheConfig {
url: "memurl".to_owned()
}),
CacheType::Azure(AzureCacheConfig {
connection_string: String::new(),
container: String::new(),
key_prefix: String::new()
}),
],
cache: Some(CacheType::Redis(RedisCacheConfig {
url: "myotherredisurl".to_owned()
}),),
fallback_cache: DiskCacheConfig {
dir: "/env-cache".into(),
size: 5,
Expand Down
2 changes: 1 addition & 1 deletion src/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ pub fn start_server(config: &Config, port: u16) -> Result<()> {
.build()?;
let pool = runtime.handle().clone();
let dist_client = DistClientContainer::new(config, &pool);
let storage = storage_from_config(config, &pool);
let storage = storage_from_config(config, &pool)?;
let res =
SccacheServer::<ProcessCommandCreator>::new(port, runtime, client, dist_client, storage);
let notify = env::var_os("SCCACHE_STARTUP_NOTIFY");
Expand Down
15 changes: 1 addition & 14 deletions tests/sccache_cargo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -302,24 +302,11 @@ fn test_gcp_arg_check() -> Result<()> {
let mut cmd = Command::new(SCCACHE_BIN.as_os_str());
cmd.arg("--start-server")
.env("SCCACHE_LOG", "debug")
.env("SCCACHE_GCS_OAUTH_URL", "http://foobar");
.env("SCCACHE_GCS_OAUTH_URL", "http://127.0.0.1");

cmd.assert().failure().stderr(predicate::str::contains(
"If setting GCS credentials, SCCACHE_GCS_BUCKET",
));

stop_sccache()?;
Xuanwo marked this conversation as resolved.
Show resolved Hide resolved
let mut cmd = Command::new(SCCACHE_BIN.as_os_str());
cmd.arg("--start-server")
.env("SCCACHE_LOG", "debug")
.env("SCCACHE_GCS_BUCKET", "b")
.env("SCCACHE_GCS_OAUTH_URL", "http://foobar")
.env("SCCACHE_GCS_KEY_PATH", "foo.json");

// This is just a warning
cmd.assert().success().stderr(predicate::str::contains(
"SCCACHE_GCS_OAUTH_URL has been deprecated",
));

Ok(())
}