diff --git a/src/cache/cache.rs b/src/cache/cache.rs index 42213b720..2a8786e61 100644 --- a/src/cache/cache.rs +++ b/src/cache/cache.rs @@ -25,7 +25,16 @@ use crate::cache::memcached::MemcachedCache; use crate::cache::redis::RedisCache; #[cfg(feature = "s3")] use crate::cache::s3::S3Cache; -use crate::config::{self, CacheType, Config}; +use crate::config::Config; +#[cfg(any( + feature = "azure", + feature = "gcs", + feature = "gha", + feature = "memcached", + feature = "redis", + feature = "s3" +))] +use crate::config::{self, CacheType}; use std::fmt; use std::fs; use std::io::{self, Cursor, Read, Seek, Write}; @@ -326,7 +335,7 @@ pub trait Storage: Send + Sync { } /// Implement storage for operator. -#[cfg(any(feature = "s3", feature = "azure", feature = "gcs"))] +#[cfg(any(feature = "s3", feature = "azure", feature = "gcs", feature = "redis"))] #[async_trait] impl Storage for opendal::Operator { async fn get(&self, key: &str) -> Result { @@ -373,30 +382,31 @@ impl Storage for opendal::Operator { } /// Normalize key `abcdef` into `a/b/c/abcdef` +#[allow(dead_code)] fn normalize_key(key: &str) -> String { format!("{}/{}/{}/{}", &key[0..1], &key[1..2], &key[2..3], &key) } /// Get a suitable `Storage` implementation from configuration. #[allow(clippy::cognitive_complexity)] // TODO simplify! -pub fn storage_from_config(config: &Config, pool: &tokio::runtime::Handle) -> Arc { - for cache_type in config.caches.iter() { - match *cache_type { +pub fn storage_from_config( + config: &Config, + pool: &tokio::runtime::Handle, +) -> Result> { + if let Some(cache_type) = &config.cache { + match cache_type { + #[cfg(feature = "azure")] CacheType::Azure(config::AzureCacheConfig { ref connection_string, ref container, ref key_prefix, }) => { - debug!("Trying Azure Blob Store account({})", key_prefix); - #[cfg(feature = "azure")] - match AzureBlobCache::build(connection_string, container, key_prefix) { - Ok(storage) => { - trace!("Using AzureBlobCache"); - return Arc::new(storage); - } - Err(e) => warn!("Failed to create Azure cache: {:?}", e), - } + debug!("Init azure cache with container {container}, key_prefix {key_prefix}"); + let storage = AzureBlobCache::build(connection_string, container, key_prefix) + .map_err(|err| anyhow!("create azure cache failed: {err:?}"))?; + return Ok(Arc::new(storage)); } + #[cfg(feature = "gcs")] CacheType::GCS(config::GCSCacheConfig { ref bucket, ref key_prefix, @@ -405,99 +415,87 @@ pub fn storage_from_config(config: &Config, pool: &tokio::runtime::Handle) -> Ar ref service_account, ref credential_url, }) => { - debug!( - "Trying GCS bucket({}, {}, {:?})", - bucket, key_prefix, rw_mode - ); - #[cfg(feature = "gcs")] - { - let gcs_read_write_mode = match rw_mode { - config::GCSCacheRWMode::ReadOnly => RWMode::ReadOnly, - config::GCSCacheRWMode::ReadWrite => RWMode::ReadWrite, - }; - - match GCSCache::build( - bucket, - key_prefix, - cred_path.as_deref(), - service_account.as_deref(), - gcs_read_write_mode, - credential_url.as_deref(), - ) { - Ok(s) => { - trace!("Using GCSCache"); - return Arc::new(s); - } - Err(e) => warn!("Failed to create GCS Cache: {:?}", e), - } - } + debug!("Init gcs cache with bucket {bucket}, key_prefix {key_prefix}"); + + let gcs_read_write_mode = match rw_mode { + config::GCSCacheRWMode::ReadOnly => RWMode::ReadOnly, + config::GCSCacheRWMode::ReadWrite => RWMode::ReadWrite, + }; + + let storage = GCSCache::build( + bucket, + key_prefix, + cred_path.as_deref(), + service_account.as_deref(), + gcs_read_write_mode, + credential_url.as_deref(), + ) + .map_err(|err| anyhow!("create gcs cache failed: {err:?}"))?; + + return Ok(Arc::new(storage)); } + #[cfg(feature = "gha")] CacheType::GHA(config::GHACacheConfig { ref url, ref token, ref cache_to, ref cache_from, }) => { - debug!( - "Trying GHA Cache ({url}, {}***, {cache_to:?}, {cache_from:?})", - &token[..usize::min(3, token.len())] - ); - #[cfg(feature = "gha")] - match GHACache::new(url, token, cache_to.clone(), cache_from.clone()) { - Ok(s) => { - trace!("Using GHA Cache: {}", url); - return Arc::new(s); - } - Err(e) => warn!("Failed to create GHA Cache: {:?}", e), - } + debug!("Init gha cache with url {url}"); + + let storage = GHACache::new(url, token, cache_to.clone(), cache_from.clone()) + .map_err(|err| anyhow!("create gha cache failed: {err:?}"))?; + return Ok(Arc::new(storage)); } + #[cfg(feature = "memcached")] CacheType::Memcached(config::MemcachedCacheConfig { ref url }) => { - debug!("Trying Memcached({})", url); - #[cfg(feature = "memcached")] - match MemcachedCache::new(url, pool) { - Ok(s) => { - trace!("Using Memcached: {}", url); - return Arc::new(s); - } - Err(e) => warn!("Failed to create MemcachedCache: {:?}", e), - } + debug!("Init memcached cache with url {url}"); + + let storage = MemcachedCache::new(url, pool) + .map_err(|err| anyhow!("create memcached cache failed: {err:?}"))?; + return Ok(Arc::new(storage)); } + #[cfg(feature = "redis")] CacheType::Redis(config::RedisCacheConfig { ref url }) => { - debug!("Trying Redis({})", url); - #[cfg(feature = "redis")] - match RedisCache::build(url) { - Ok(s) => { - trace!("Using Redis: {}", url); - return Arc::new(s); - } - Err(e) => warn!("Failed to create RedisCache: {:?}", e), - } + debug!("Init redis cache with url {url}"); + let storage = RedisCache::build(url) + .map_err(|err| anyhow!("create redis cache failed: {err:?}"))?; + return Ok(Arc::new(storage)); } + #[cfg(feature = "s3")] CacheType::S3(ref c) => { - debug!("Trying S3Cache({:?})", c); - #[cfg(feature = "s3")] - match S3Cache::build( + debug!( + "Init s3 cache with bucket {}, endpoint {:?}", + c.bucket, c.endpoint + ); + + let storage = S3Cache::build( &c.bucket, c.region.as_deref(), &c.key_prefix, c.no_credentials, c.endpoint.as_deref(), c.use_ssl, - ) { - Ok(s) => { - trace!("Using S3Cache"); - return Arc::new(s); - } - Err(e) => warn!("Failed to create S3Cache: {:?}", e), - } + ) + .map_err(|err| anyhow!("create s3 cache failed: {err:?}"))?; + + return Ok(Arc::new(storage)); } + #[cfg(not(any( + feature = "azure", + feature = "gcs", + feature = "gha", + feature = "memcached", + feature = "redis", + feature = "s3" + )))] + _ => bail!("cache type is not enabled"), } } - info!("No configured caches successful, falling back to default"); let (dir, size) = (&config.fallback_cache.dir, config.fallback_cache.size); - trace!("Using DiskCache({:?}, {})", dir, size); - Arc::new(DiskCache::new(&dir, size, pool)) + debug!("Init disk cache with dir {:?}, size {}", dir, size); + Ok(Arc::new(DiskCache::new(&dir, size, pool))) } #[cfg(test)] diff --git a/src/cache/gcs.rs b/src/cache/gcs.rs index c1fa3dc89..30324a820 100644 --- a/src/cache/gcs.rs +++ b/src/cache/gcs.rs @@ -17,6 +17,7 @@ use crate::errors::*; use opendal::Operator; use opendal::{layers::LoggingLayer, services::gcs}; use reqsign::{GoogleBuilder, GoogleToken, GoogleTokenLoad}; +use url::Url; #[derive(Copy, Clone)] pub enum RWMode { @@ -59,6 +60,8 @@ impl GCSCache { signer_builder.credential_path(path); } if let Some(cred_url) = credential_url { + let _ = Url::parse(cred_url) + .map_err(|err| anyhow!("gcs credential url is invalid: {err:?}"))?; signer_builder.customed_token_loader(TaskClusterTokenLoader { client: reqwest::blocking::Client::default(), scope: rw_mode.to_scope().to_string(), diff --git a/src/config.rs b/src/config.rs index 9073fac65..1ba495c0b 100644 --- a/src/config.rs +++ b/src/config.rs @@ -244,9 +244,9 @@ pub struct CacheConfigs { } impl CacheConfigs { - /// Return a vec of the available cache types in an arbitrary but + /// Return cache type in an arbitrary but /// consistent ordering - fn into_vec_and_fallback(self) -> (Vec, DiskCacheConfig) { + fn into_fallback(self) -> (Option, DiskCacheConfig) { let CacheConfigs { azure, disk, @@ -257,18 +257,17 @@ impl CacheConfigs { s3, } = self; - let caches = s3 + let cache_type = s3 .map(CacheType::S3) - .into_iter() - .chain(redis.map(CacheType::Redis)) - .chain(memcached.map(CacheType::Memcached)) - .chain(gcs.map(CacheType::GCS)) - .chain(gha.map(CacheType::GHA)) - .chain(azure.map(CacheType::Azure)) - .collect(); + .or_else(|| redis.map(CacheType::Redis)) + .or_else(|| memcached.map(CacheType::Memcached)) + .or_else(|| gcs.map(CacheType::GCS)) + .or_else(|| gha.map(CacheType::GHA)) + .or_else(|| azure.map(CacheType::Azure)); + let fallback = disk.unwrap_or_default(); - (caches, fallback) + (cache_type, fallback) } /// Override self with any existing fields from other @@ -666,7 +665,7 @@ fn config_file(env_var: &str, leaf: &str) -> PathBuf { #[derive(Debug, Default, PartialEq, Eq)] pub struct Config { - pub caches: Vec, + pub cache: Option, pub fallback_cache: DiskCacheConfig, pub dist: DistConfig, pub server_startup_timeout: Option, @@ -700,9 +699,9 @@ impl Config { let EnvConfig { cache } = env_conf; conf_caches.merge(cache); - let (caches, fallback_cache) = conf_caches.into_vec_and_fallback(); + let (caches, fallback_cache) = conf_caches.into_fallback(); Self { - caches, + cache: caches, fallback_cache, dist, server_startup_timeout, @@ -990,19 +989,9 @@ fn config_overrides() { assert_eq!( Config::from_env_and_file_configs(env_conf, file_conf), Config { - caches: vec![ - CacheType::Redis(RedisCacheConfig { - url: "myotherredisurl".to_owned() - }), - CacheType::Memcached(MemcachedCacheConfig { - url: "memurl".to_owned() - }), - CacheType::Azure(AzureCacheConfig { - connection_string: String::new(), - container: String::new(), - key_prefix: String::new() - }), - ], + cache: Some(CacheType::Redis(RedisCacheConfig { + url: "myotherredisurl".to_owned() + }),), fallback_cache: DiskCacheConfig { dir: "/env-cache".into(), size: 5, diff --git a/src/server.rs b/src/server.rs index 939f44613..d27bfde02 100644 --- a/src/server.rs +++ b/src/server.rs @@ -399,10 +399,27 @@ pub fn start_server(config: &Config, port: u16) -> Result<()> { .build()?; let pool = runtime.handle().clone(); let dist_client = DistClientContainer::new(config, &pool); - let storage = storage_from_config(config, &pool); + + let notify = env::var_os("SCCACHE_STARTUP_NOTIFY"); + + let storage = match storage_from_config(config, &pool) { + Ok(storage) => storage, + Err(err) => { + error!("storage init failed for: {err:?}"); + + notify_server_startup( + ¬ify, + ServerStartup::Err { + reason: err.to_string(), + }, + )?; + + return Err(err); + } + }; + let res = SccacheServer::::new(port, runtime, client, dist_client, storage); - let notify = env::var_os("SCCACHE_STARTUP_NOTIFY"); match res { Ok(srv) => { let port = srv.port(); diff --git a/tests/sccache_cargo.rs b/tests/sccache_cargo.rs index 8f885b924..779e3f4ea 100644 --- a/tests/sccache_cargo.rs +++ b/tests/sccache_cargo.rs @@ -302,7 +302,7 @@ fn test_gcp_arg_check() -> Result<()> { let mut cmd = Command::new(SCCACHE_BIN.as_os_str()); cmd.arg("--start-server") .env("SCCACHE_LOG", "debug") - .env("SCCACHE_GCS_OAUTH_URL", "http://foobar"); + .env("SCCACHE_GCS_OAUTH_URL", "http://127.0.0.1"); cmd.assert().failure().stderr(predicate::str::contains( "If setting GCS credentials, SCCACHE_GCS_BUCKET", @@ -313,13 +313,13 @@ fn test_gcp_arg_check() -> Result<()> { cmd.arg("--start-server") .env("SCCACHE_LOG", "debug") .env("SCCACHE_GCS_BUCKET", "b") - .env("SCCACHE_GCS_OAUTH_URL", "http://foobar") + .env("SCCACHE_GCS_CREDENTIALS_URL", "not_valid_url//127.0.0.1") .env("SCCACHE_GCS_KEY_PATH", "foo.json"); // This is just a warning - cmd.assert().success().stderr(predicate::str::contains( - "SCCACHE_GCS_OAUTH_URL has been deprecated", - )); + cmd.assert() + .failure() + .stderr(predicate::str::contains("gcs credential url is invalid")); Ok(()) }