From 432262a46f0882d0a9699ff3607815330d669a03 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 3 Sep 2023 11:19:40 +0800 Subject: [PATCH] Remove the thread pool from `sync` caches - Remove `scheduled-thread-pool` crate. - Remove `thread_pool` and `unsafe_weak_pointers` modules. - Remove notification `DeliveryMode` as now `sync` caches only support the `Immediate` mode (like `future::Cache`). - Remove `sync::ConcurrentCacheExt`. - Rename `sync::ConcurrentCacheExt::sync` method to `sync::{Cache, SegmentedCache}::run_pending_tasks`. --- .cirrus.yml | 6 - .github/workflows/CI.yml | 12 - Cargo.toml | 12 +- examples/eviction_listener.rs | 4 +- src/cht.rs | 4 +- src/cht/segment.rs | 7 - src/common.rs | 2 +- src/common/builder_utils.rs | 12 - src/common/concurrent.rs | 6 - src/common/concurrent/constants.rs | 9 - src/common/concurrent/housekeeper.rs | 317 +--- src/common/concurrent/thread_pool.rs | 100 - src/common/concurrent/unsafe_weak_pointer.rs | 47 - src/future/cache.rs | 49 +- src/notification.rs | 110 -- src/notification/notifier.rs | 350 +--- src/sync/builder.rs | 71 +- src/sync/cache.rs | 1795 +++++++----------- src/sync/segment.rs | 917 ++++----- src/sync_base/base_cache.rs | 335 ++-- src/sync_base/invalidator.rs | 473 +++-- 21 files changed, 1520 insertions(+), 3118 deletions(-) delete mode 100644 src/common/concurrent/thread_pool.rs delete mode 100644 src/common/concurrent/unsafe_weak_pointer.rs diff --git a/.cirrus.yml b/.cirrus.yml index b830aa31..60294fd1 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -66,12 +66,6 @@ linux_arm64_task: # Run tests (release, sync feature) - cargo test -j 1 --release --features sync -- --test-threads=$NUM_CPUS - # Run tests (release, sync feature, thread-pool test for sync::Cache) - - cargo test --release --lib --features sync sync::cache::tests::enabling_and_disabling_thread_pools -- --exact --ignored - - # Run tests (release, sync feature, thread-pool test for sync::SegmentedCache) - - cargo test --release --lib --features sync sync::segment::tests::enabling_and_disabling_thread_pools -- --exact --ignored - # Run tests (sync feature, key lock test for notification) - cargo test --release --lib --features sync sync::cache::tests::test_key_lock_used_by_immediate_removal_notifications -- --exact --ignored diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 47522a69..264f1629 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -92,18 +92,6 @@ jobs: env: RUSTFLAGS: '--cfg rustver' - - name: Run tests (sync feature, thread-pool test for sync::Cache) - uses: actions-rs/cargo@v1 - with: - command: test - args: --release --lib --features sync sync::cache::tests::enabling_and_disabling_thread_pools -- --exact --ignored - - - name: Run tests (sync feature, thread-pool test for sync::SegmentedCache) - uses: actions-rs/cargo@v1 - with: - command: test - args: --release --lib --features sync sync::segment::tests::enabling_and_disabling_thread_pools -- --exact --ignored - - name: Run tests (sync feature, key lock test for notification) uses: actions-rs/cargo@v1 with: diff --git a/Cargo.toml b/Cargo.toml index 4cbec117..0d69780f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "moka" -version = "0.12.0-beta.1" +version = "0.12.0-beta.2" edition = "2018" # Rust 1.65 was released on Nov 3, 2022. rust-version = "1.65" @@ -16,11 +16,10 @@ exclude = [".circleci", ".devcontainer", ".github", ".gitpod.yml", ".vscode"] build = "build.rs" [features] -default = ["sync", "atomic64", "quanta"] +default = ["atomic64", "quanta"] -# This feature is enabled by default. Disable it when you do not need -# `moka::sync::{Cache, SegmentedCache}` -sync = ["scheduled-thread-pool"] +# Enabled this feature to use `moka::sync::{Cache, SegmentedCache}` +sync = [] # Enable this feature to use `moka::future::Cache`. future = ["async-lock", "async-trait", "futures-util"] @@ -64,9 +63,6 @@ triomphe = { version = "0.1.3", default-features = false } # Optional dependencies (enabled by default) quanta = { version = "0.11.0", optional = true } -# Optional dependencies (sync) -scheduled-thread-pool = { version = "0.2.7", optional = true } - # Optional dependencies (future) async-lock = { version = "2.4", optional = true } async-trait = { version = "0.1.58", optional = true } diff --git a/examples/eviction_listener.rs b/examples/eviction_listener.rs index f3c3cd45..84712c52 100644 --- a/examples/eviction_listener.rs +++ b/examples/eviction_listener.rs @@ -33,11 +33,9 @@ fn main() { // called. If you want to remove all entries immediately, call sync() method // repeatedly like the loop below. cache.invalidate_all(); - // This trait provides sync() method. - use moka::sync::ConcurrentCacheExt; loop { // Synchronization is limited to at most 500 entries for each call. - cache.sync(); + cache.run_pending_tasks(); // Check if all is done. Calling entry_count() requires calling sync() // first! if cache.entry_count() == 0 { diff --git a/src/cht.rs b/src/cht.rs index 073b410c..78a656f0 100644 --- a/src/cht.rs +++ b/src/cht.rs @@ -71,12 +71,10 @@ //! [Junction]: https://github.com/preshing/junction //! [a tech talk]: https://youtu.be/HJ-719EGIts +pub(crate) mod iter; pub(crate) mod map; pub(crate) mod segment; -#[cfg(feature = "future")] -pub(crate) mod iter; - #[cfg(test)] #[macro_use] pub(crate) mod test_util; diff --git a/src/cht/segment.rs b/src/cht/segment.rs index a6cbabed..5f6ae6ce 100644 --- a/src/cht/segment.rs +++ b/src/cht/segment.rs @@ -35,7 +35,6 @@ use crate::cht::map::{ DefaultHashBuilder, }; -#[cfg(feature = "future")] use super::iter::{Iter, ScanningGet}; use std::{ @@ -206,7 +205,6 @@ impl HashMap { /// /// This method on its own is safe, but other threads can add or remove /// elements at any time. - #[cfg(any(test, feature = "future"))] pub(crate) fn len(&self) -> usize { self.len.load(Ordering::Relaxed) } @@ -217,7 +215,6 @@ impl HashMap { /// /// This method on its own is safe, but other threads can add or remove /// elements at any time. - #[cfg(any(test, feature = "future"))] pub(crate) fn is_empty(&self) -> bool { self.len() == 0 } @@ -252,7 +249,6 @@ impl HashMap { } impl HashMap { - #[cfg(feature = "future")] #[inline] pub(crate) fn contains_key(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> bool { self.get_key_value_and_then(hash, eq, |_, _| Some(())) @@ -299,7 +295,6 @@ impl HashMap { /// /// If the map did have this key present, both the key and value are /// updated. - #[cfg(any(test, feature = "future"))] #[inline] pub fn insert_entry_and( &self, @@ -494,7 +489,6 @@ impl HashMap { Some(bucket_array_ref.keys(with_key)) } - #[cfg(feature = "future")] pub(crate) fn iter(&self) -> Iter<'_, K, V> where K: Clone, @@ -513,7 +507,6 @@ impl HashMap { } } -#[cfg(feature = "future")] impl ScanningGet for HashMap where K: Hash + Eq + Clone, diff --git a/src/common.rs b/src/common.rs index 9cbecdaf..a7a3d0bd 100644 --- a/src/common.rs +++ b/src/common.rs @@ -64,7 +64,7 @@ pub(crate) fn sketch_capacity(max_capacity: u64) -> u32 { max_capacity.try_into().unwrap_or(u32::MAX).max(128) } -#[cfg(any(test, feature = "sync"))] +#[cfg(test)] pub(crate) fn available_parallelism() -> usize { use std::{num::NonZeroUsize, thread::available_parallelism}; available_parallelism().map(NonZeroUsize::get).unwrap_or(1) diff --git a/src/common/builder_utils.rs b/src/common/builder_utils.rs index 20bc1f72..4db23a61 100644 --- a/src/common/builder_utils.rs +++ b/src/common/builder_utils.rs @@ -1,8 +1,5 @@ use std::time::Duration; -#[cfg(feature = "sync")] -use super::concurrent::housekeeper; - const YEAR_SECONDS: u64 = 365 * 24 * 3600; pub(crate) fn ensure_expirations_or_panic( @@ -17,12 +14,3 @@ pub(crate) fn ensure_expirations_or_panic( assert!(d <= max_duration, "time_to_idle is longer than 1000 years"); } } - -#[cfg(feature = "sync")] -pub(crate) fn housekeeper_conf(thread_pool_enabled: bool) -> housekeeper::Configuration { - if thread_pool_enabled { - housekeeper::Configuration::new_thread_pool(true) - } else { - housekeeper::Configuration::new_blocking() - } -} diff --git a/src/common/concurrent.rs b/src/common/concurrent.rs index c5e5d97f..3943c0b5 100644 --- a/src/common/concurrent.rs +++ b/src/common/concurrent.rs @@ -12,12 +12,6 @@ pub(crate) mod entry_info; #[cfg(feature = "sync")] pub(crate) mod housekeeper; -#[cfg(feature = "sync")] -pub(crate) mod thread_pool; - -#[cfg(feature = "sync")] -pub(crate) mod unsafe_weak_pointer; - // target_has_atomic is more convenient but yet unstable (Rust 1.55) // https://github.com/rust-lang/rust/issues/32976 // #[cfg_attr(target_has_atomic = "64", path = "common/time_atomic64.rs")] diff --git a/src/common/concurrent/constants.rs b/src/common/concurrent/constants.rs index bb6dbc68..bf5ad1ba 100644 --- a/src/common/concurrent/constants.rs +++ b/src/common/concurrent/constants.rs @@ -7,14 +7,5 @@ pub(crate) const READ_LOG_SIZE: usize = READ_LOG_FLUSH_POINT * (MAX_SYNC_REPEATS pub(crate) const WRITE_LOG_FLUSH_POINT: usize = 512; pub(crate) const WRITE_LOG_SIZE: usize = WRITE_LOG_FLUSH_POINT * (MAX_SYNC_REPEATS + 2); -#[cfg(feature = "sync")] -pub(crate) const WRITE_LOG_LOW_WATER_MARK: usize = WRITE_LOG_FLUSH_POINT / 2; - #[cfg(feature = "sync")] pub(crate) const WRITE_RETRY_INTERVAL_MICROS: u64 = 50; - -#[cfg(feature = "sync")] -pub(crate) const PERIODICAL_SYNC_NORMAL_PACE_MILLIS: u64 = 300; - -#[cfg(feature = "sync")] -pub(crate) const PERIODICAL_SYNC_FAST_PACE_NANOS: u64 = 500; diff --git a/src/common/concurrent/housekeeper.rs b/src/common/concurrent/housekeeper.rs index d9daf87a..e833975e 100644 --- a/src/common/concurrent/housekeeper.rs +++ b/src/common/concurrent/housekeeper.rs @@ -1,135 +1,58 @@ -use super::constants::{ - MAX_SYNC_REPEATS, PERIODICAL_SYNC_FAST_PACE_NANOS, PERIODICAL_SYNC_INITIAL_DELAY_MILLIS, - PERIODICAL_SYNC_NORMAL_PACE_MILLIS, -}; -use super::{ - thread_pool::{ThreadPool, ThreadPoolRegistry}, - unsafe_weak_pointer::UnsafeWeakPointer, -}; +use super::constants::{MAX_SYNC_REPEATS, PERIODICAL_SYNC_INITIAL_DELAY_MILLIS}; use super::{ atomic_time::AtomicInstant, constants::{READ_LOG_FLUSH_POINT, WRITE_LOG_FLUSH_POINT}, }; use crate::common::time::{CheckedTimeOps, Instant}; -use parking_lot::Mutex; -use scheduled_thread_pool::JobHandle; + use std::{ - marker::PhantomData, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, Weak, - }, + sync::atomic::{AtomicBool, Ordering}, time::Duration, }; pub(crate) trait InnerSync { - fn sync(&self, max_sync_repeats: usize) -> Option; + fn run_pending_tasks(&self, max_sync_repeats: usize); fn now(&self) -> Instant; } -#[derive(Clone, Debug)] -pub(crate) struct Configuration { - is_blocking: bool, - periodical_sync_enabled: bool, -} - -impl Configuration { - pub(crate) fn new_blocking() -> Self { - Self { - is_blocking: true, - periodical_sync_enabled: false, - } - } - - pub(crate) fn new_thread_pool(periodical_sync_enabled: bool) -> Self { - Self { - is_blocking: false, - periodical_sync_enabled, - } - } -} - -pub(crate) enum Housekeeper { - Blocking(BlockingHousekeeper), - ThreadPool(ThreadPoolHousekeeper), -} - -impl Housekeeper -where - T: InnerSync + 'static, -{ - pub(crate) fn new(inner: Weak, config: Configuration) -> Self { - if config.is_blocking { - Housekeeper::Blocking(BlockingHousekeeper::default()) - } else { - Housekeeper::ThreadPool(ThreadPoolHousekeeper::new( - inner, - config.periodical_sync_enabled, - )) - } - } - - pub(crate) fn should_apply_reads(&self, ch_len: usize, now: Instant) -> bool { - match self { - Housekeeper::Blocking(h) => h.should_apply_reads(ch_len, now), - Housekeeper::ThreadPool(h) => h.should_apply_reads(ch_len, now), - } - } - - pub(crate) fn should_apply_writes(&self, ch_len: usize, now: Instant) -> bool { - match self { - Housekeeper::Blocking(h) => h.should_apply_writes(ch_len, now), - Housekeeper::ThreadPool(h) => h.should_apply_writes(ch_len, now), - } - } - - pub(crate) fn try_sync(&self, cache: &impl InnerSync) -> bool { - match self { - Housekeeper::Blocking(h) => h.try_sync(cache), - Housekeeper::ThreadPool(h) => h.try_schedule_sync(), - } - } - - #[cfg(test)] - pub(crate) fn stop_periodical_sync_job(&self) { - match self { - Housekeeper::Blocking(_) => (), - Housekeeper::ThreadPool(h) => h.stop_periodical_sync_job(), - } - } -} - -pub(crate) struct BlockingHousekeeper { +pub(crate) struct Housekeeper { is_sync_running: AtomicBool, - sync_after: AtomicInstant, + run_after: AtomicInstant, + auto_run_enabled: AtomicBool, } -impl Default for BlockingHousekeeper { +impl Default for Housekeeper { fn default() -> Self { Self { is_sync_running: Default::default(), - sync_after: AtomicInstant::new(Self::sync_after(Instant::now())), + run_after: AtomicInstant::new(Self::sync_after(Instant::now())), + auto_run_enabled: AtomicBool::new(true), } } } -impl BlockingHousekeeper { - fn should_apply_reads(&self, ch_len: usize, now: Instant) -> bool { +impl Housekeeper { + pub(crate) fn should_apply_reads(&self, ch_len: usize, now: Instant) -> bool { self.should_apply(ch_len, READ_LOG_FLUSH_POINT / 8, now) } - fn should_apply_writes(&self, ch_len: usize, now: Instant) -> bool { + pub(crate) fn should_apply_writes(&self, ch_len: usize, now: Instant) -> bool { self.should_apply(ch_len, WRITE_LOG_FLUSH_POINT / 8, now) } #[inline] fn should_apply(&self, ch_len: usize, ch_flush_point: usize, now: Instant) -> bool { - ch_len >= ch_flush_point || now >= self.sync_after.instant().unwrap() + self.auto_run_enabled.load(Ordering::Relaxed) + && (ch_len >= ch_flush_point || now >= self.run_after.instant().unwrap()) } - fn try_sync(&self, cache: &T) -> bool { + pub(crate) fn run_pending_tasks(&self, cache: &T) { + self.do_run_pending_tasks(cache); + } + + pub(crate) fn try_run_pending_tasks(&self, cache: &T) -> bool { // Try to flip the value of sync_scheduled from false to true. match self.is_sync_running.compare_exchange( false, @@ -138,18 +61,22 @@ impl BlockingHousekeeper { Ordering::Acquire, ) { Ok(_) => { - let now = cache.now(); - self.sync_after.set_instant(Self::sync_after(now)); - - cache.sync(MAX_SYNC_REPEATS); - - self.is_sync_running.store(false, Ordering::Release); + self.do_run_pending_tasks(cache); true } Err(_) => false, } } + fn do_run_pending_tasks(&self, cache: &T) { + let now = cache.now(); + self.run_after.set_instant(Self::sync_after(now)); + + cache.run_pending_tasks(MAX_SYNC_REPEATS); + + self.is_sync_running.store(false, Ordering::Release); + } + fn sync_after(now: Instant) -> Instant { let dur = Duration::from_millis(PERIODICAL_SYNC_INITIAL_DELAY_MILLIS); let ts = now.checked_add(dur); @@ -159,185 +86,13 @@ impl BlockingHousekeeper { } } -#[derive(PartialEq, Eq)] -pub(crate) enum SyncPace { - Normal, - Fast, -} - -impl SyncPace { - fn make_duration(&self) -> Duration { - use SyncPace::*; - match self { - Normal => Duration::from_millis(PERIODICAL_SYNC_NORMAL_PACE_MILLIS), - Fast => Duration::from_nanos(PERIODICAL_SYNC_FAST_PACE_NANOS), - } - } -} - -pub(crate) struct ThreadPoolHousekeeper { - inner: Arc>>, - thread_pool: Arc, - is_shutting_down: Arc, - periodical_sync_job: Mutex>, - periodical_sync_running: Arc>, - on_demand_sync_scheduled: Arc, - _marker: PhantomData, -} - -impl Drop for ThreadPoolHousekeeper { - fn drop(&mut self) { - // Disallow to create and/or run sync jobs by now. - self.is_shutting_down.store(true, Ordering::Release); - - // Cancel the periodical sync job. (This will not abort the job if it is - // already running) - if let Some(j) = self.periodical_sync_job.lock().take() { - j.cancel() - } - - // Wait for the periodical sync job to finish. - // - // NOTE: As suggested by Clippy 1.59, drop the lock explicitly rather - // than doing non-binding let to `_`. - // https://rust-lang.github.io/rust-clippy/master/index.html#let_underscore_lock - std::mem::drop(self.periodical_sync_running.lock()); - - // Wait for the on-demand sync job to finish. (busy loop) - while self.on_demand_sync_scheduled.load(Ordering::Acquire) { - std::thread::sleep(Duration::from_millis(1)); - } - - // All sync jobs should have been finished by now. Clean other stuff up. - ThreadPoolRegistry::release_pool(&self.thread_pool); - std::mem::drop(unsafe { self.inner.lock().as_weak_arc() }); - } -} - -// functions/methods used by Cache -impl ThreadPoolHousekeeper -where - T: InnerSync + 'static, -{ - fn new(inner: Weak, periodical_sync_enable: bool) -> Self { - use super::thread_pool::PoolName; - - let thread_pool = ThreadPoolRegistry::acquire_pool(PoolName::Housekeeper); - let inner_ptr = Arc::new(Mutex::new(UnsafeWeakPointer::from_weak_arc(inner))); - let is_shutting_down = Arc::new(AtomicBool::new(false)); - let periodical_sync_running = Arc::new(Mutex::new(())); - - let maybe_sync_job = if periodical_sync_enable { - Some(Self::start_periodical_sync_job( - &thread_pool, - Arc::clone(&inner_ptr), - Arc::clone(&is_shutting_down), - Arc::clone(&periodical_sync_running), - )) - } else { - None - }; - - Self { - inner: inner_ptr, - thread_pool, - is_shutting_down, - periodical_sync_job: Mutex::new(maybe_sync_job), - periodical_sync_running, - on_demand_sync_scheduled: Arc::new(AtomicBool::new(false)), - _marker: PhantomData, - } - } - - fn start_periodical_sync_job( - thread_pool: &Arc, - unsafe_weak_ptr: Arc>>, - is_shutting_down: Arc, - periodical_sync_running: Arc>, - ) -> JobHandle { - let mut sync_pace = SyncPace::Normal; - - let housekeeper_closure = { - move || { - if !is_shutting_down.load(Ordering::Acquire) { - let _lock = periodical_sync_running.lock(); - if let Some(new_pace) = Self::call_sync(&unsafe_weak_ptr) { - if sync_pace != new_pace { - sync_pace = new_pace - } - } - } - - Some(sync_pace.make_duration()) - } - }; - - let initial_delay = Duration::from_millis(PERIODICAL_SYNC_INITIAL_DELAY_MILLIS); - - // Execute a task in a worker thread. - thread_pool - .pool - .execute_with_dynamic_delay(initial_delay, housekeeper_closure) +#[cfg(test)] +impl Housekeeper { + pub(crate) fn disable_auto_run(&self) { + self.auto_run_enabled.store(false, Ordering::Relaxed); } - fn should_apply_reads(&self, ch_len: usize, _now: Instant) -> bool { - ch_len >= READ_LOG_FLUSH_POINT - } - - fn should_apply_writes(&self, ch_len: usize, _now: Instant) -> bool { - ch_len >= WRITE_LOG_FLUSH_POINT - } - - fn try_schedule_sync(&self) -> bool { - // If shutting down, do not schedule the task. - if self.is_shutting_down.load(Ordering::Acquire) { - return false; - } - - // Try to flip the value of sync_scheduled from false to true. - match self.on_demand_sync_scheduled.compare_exchange( - false, - true, - Ordering::AcqRel, - Ordering::Acquire, - ) { - Ok(_) => { - let unsafe_weak_ptr = Arc::clone(&self.inner); - let sync_scheduled = Arc::clone(&self.on_demand_sync_scheduled); - // Execute a task in a worker thread. - self.thread_pool.pool.execute(move || { - Self::call_sync(&unsafe_weak_ptr); - sync_scheduled.store(false, Ordering::Release); - }); - true - } - Err(_) => false, - } - } - - #[cfg(test)] - pub(crate) fn stop_periodical_sync_job(&self) { - if let Some(j) = self.periodical_sync_job.lock().take() { - j.cancel(); - } - } -} - -impl ThreadPoolHousekeeper { - fn call_sync(unsafe_weak_ptr: &Arc>>) -> Option { - let lock = unsafe_weak_ptr.lock(); - // Restore the Weak pointer to Inner. - let weak = unsafe { lock.as_weak_arc() }; - if let Some(inner) = weak.upgrade() { - // TODO: Protect this call with catch_unwind(). - let sync_pace = inner.sync(MAX_SYNC_REPEATS); - // Avoid to drop the Arc>. - UnsafeWeakPointer::forget_arc(inner); - sync_pace - } else { - // Avoid to drop the Weak>. - UnsafeWeakPointer::forget_weak_arc(weak); - None - } + pub(crate) fn reset_run_after(&self, now: Instant) { + self.run_after.set_instant(Self::sync_after(now)); } } diff --git a/src/common/concurrent/thread_pool.rs b/src/common/concurrent/thread_pool.rs deleted file mode 100644 index e14a424d..00000000 --- a/src/common/concurrent/thread_pool.rs +++ /dev/null @@ -1,100 +0,0 @@ -use once_cell::sync::Lazy; -use parking_lot::RwLock; -use scheduled_thread_pool::ScheduledThreadPool; -use std::{collections::HashMap, sync::Arc}; - -static REGISTRY: Lazy = Lazy::new(ThreadPoolRegistry::default); - -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub(crate) enum PoolName { - Housekeeper, - Invalidator, - RemovalNotifier, -} - -impl PoolName { - fn thread_name_template(&self) -> &'static str { - match self { - PoolName::Housekeeper => "moka-housekeeper-{}", - PoolName::Invalidator => "moka-invalidator-{}", - PoolName::RemovalNotifier => "moka-notifier-{}", - } - } -} - -pub(crate) struct ThreadPool { - pub(crate) name: PoolName, - pub(crate) pool: ScheduledThreadPool, - // pub(crate) num_threads: usize, -} - -impl ThreadPool { - fn new(name: PoolName, num_threads: usize) -> Self { - let pool = ScheduledThreadPool::builder() - .num_threads(num_threads) - .thread_name_pattern(name.thread_name_template()) - .build(); - Self { - name, - pool, - // num_threads, - } - } -} - -pub(crate) struct ThreadPoolRegistry { - pools: RwLock>>, -} - -impl Default for ThreadPoolRegistry { - fn default() -> Self { - Self { - pools: RwLock::new(HashMap::default()), - } - } -} - -impl ThreadPoolRegistry { - pub(crate) fn acquire_pool(name: PoolName) -> Arc { - loop { - { - // Acquire a read lock and get the pool. - let pools = REGISTRY.pools.read(); - if let Some(pool) = pools.get(&name) { - return Arc::clone(pool); - } - } - { - // Acquire the write lock, double check the pool still does not exist, - // and insert a new pool. - let mut pools = REGISTRY.pools.write(); - pools.entry(name).or_insert_with(|| { - let num_threads = crate::common::available_parallelism(); - let pool = ThreadPool::new(name, num_threads); - Arc::new(pool) - }); - } - } - } - - pub(crate) fn release_pool(pool: &Arc) { - if Arc::strong_count(pool) <= 2 { - // No other client exists; only this Arc and the registry are - // the owners. Let's remove and drop the one in the registry. - let name = pool.name; - let mut pools = REGISTRY.pools.write(); - if let Some(pool) = pools.get(&name) { - if Arc::strong_count(pool) <= 2 { - pools.remove(&name); - } - } - } - } - - #[cfg(all(test, feature = "sync"))] - pub(crate) fn enabled_pools() -> Vec { - let mut names: Vec<_> = REGISTRY.pools.read().keys().cloned().collect(); - names.sort_unstable(); - names - } -} diff --git a/src/common/concurrent/unsafe_weak_pointer.rs b/src/common/concurrent/unsafe_weak_pointer.rs deleted file mode 100644 index e8f5341d..00000000 --- a/src/common/concurrent/unsafe_weak_pointer.rs +++ /dev/null @@ -1,47 +0,0 @@ -use std::sync::{Arc, Weak}; - -/// WARNING: Do not use this struct unless you are absolutely sure what you are -/// doing. Using this struct is unsafe and may cause memory related crashes and/or -/// security vulnerabilities. -pub(crate) struct UnsafeWeakPointer { - // This is a std::sync::Weak pointer to Inner. - raw_ptr: *mut T, -} - -unsafe impl Send for UnsafeWeakPointer {} - -impl UnsafeWeakPointer { - pub(crate) fn from_weak_arc(p: Weak) -> Self { - Self { - raw_ptr: p.into_raw() as *mut T, - } - } - - pub(crate) unsafe fn as_weak_arc(&self) -> Weak { - Weak::from_raw(self.raw_ptr.cast()) - } - - pub(crate) fn forget_arc(p: Arc) { - // Downgrade the Arc to Weak, then forget. - let weak = Arc::downgrade(&p); - std::mem::forget(weak); - } - - pub(crate) fn forget_weak_arc(p: Weak) { - std::mem::forget(p); - } -} - -/// `clone()` simply creates a copy of the `raw_ptr`, effectively creating many -/// copies of the same `Weak` pointer. We are doing this for a good reason for our -/// use case. -/// -/// When you want to drop the Weak pointer, ensure that you drop it only once for the -/// same `raw_ptr` across clones. -impl Clone for UnsafeWeakPointer { - fn clone(&self) -> Self { - Self { - raw_ptr: self.raw_ptr, - } - } -} diff --git a/src/future/cache.rs b/src/future/cache.rs index fb23c310..f1117691 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -484,28 +484,22 @@ use std::sync::atomic::{AtomicBool, Ordering}; /// } /// } /// -/// async fn write_data_file(&mut self, contents: String) -> io::Result { -/// loop { -/// // Generate a unique file path. -/// let mut path = self.base_dir.to_path_buf(); -/// path.push(Uuid::new_v4().as_hyphenated().to_string()); +/// async fn write_data_file( +/// &mut self, +/// key: impl AsRef, +/// contents: String +/// ) -> io::Result { +/// // Use the key as a part of the filename. +/// let mut path = self.base_dir.to_path_buf(); +/// path.push(key.as_ref()); /// -/// if path.exists() { -/// continue; // This path is already taken by others. Retry. -/// } -/// -/// // We have got a unique file path, so create the file at -/// // the path and write the contents to the file. -/// fs::write(&path, contents).await?; -/// self.file_count += 1; -/// println!( -/// "Created a data file at {:?} (file count: {})", -/// path, self.file_count -/// ); +/// assert!(!path.exists(), "Path already exists: {:?}", path); /// -/// // Return the path. -/// return Ok(path); -/// } +/// // create the file at the path and write the contents to the file. +/// fs::write(&path, contents).await?; +/// self.file_count += 1; +/// println!("Created a data file at {:?} (file count: {})", path, self.file_count); +/// Ok(path) /// } /// /// async fn read_data_file(&self, path: impl AsRef) -> io::Result { @@ -531,7 +525,12 @@ use std::sync::atomic::{AtomicBool, Ordering}; /// async fn main() -> anyhow::Result<()> { /// // Create an instance of the DataFileManager and wrap it with /// // Arc> so it can be shared across threads. -/// let file_mgr = DataFileManager::new(std::env::temp_dir()); +/// let mut base_dir = std::env::temp_dir(); +/// base_dir.push(Uuid::new_v4().as_hyphenated().to_string()); +/// println!("base_dir: {:?}", base_dir); +/// std::fs::create_dir(&base_dir)?; +/// +/// let file_mgr = DataFileManager::new(base_dir); /// let file_mgr = Arc::new(RwLock::new(file_mgr)); /// /// let file_mgr1 = Arc::clone(&file_mgr); @@ -572,11 +571,12 @@ use std::sync::atomic::{AtomicBool, Ordering}; /// // This will create and write a data file for the key "user1", store the /// // path of the file to the cache, and return it. /// println!("== try_get_with()"); +/// let key = "user1"; /// let path = cache -/// .try_get_with("user1", async { +/// .try_get_with(key, async { /// let mut mgr = file_mgr.write().await; /// let path = mgr -/// .write_data_file("user data".into()) +/// .write_data_file(key, "user data".into()) /// .await /// .with_context(|| format!("Failed to create a data file"))?; /// Ok(path) as anyhow::Result<_> @@ -600,6 +600,8 @@ use std::sync::atomic::{AtomicBool, Ordering}; /// // remove the file. /// tokio::time::sleep(Duration::from_secs(5)).await; /// +/// cache.run_pending_tasks(); +/// /// Ok(()) /// } /// ``` @@ -1599,6 +1601,7 @@ where Iter::new(inner) } + /// Performs any pending maintenance operations needed by the cache. pub async fn run_pending_tasks(&self) { if let Some(hk) = &self.base.housekeeper { self.base.retry_interrupted_ops().await; diff --git a/src/notification.rs b/src/notification.rs index cad55a7e..3817d0ae 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -17,10 +17,6 @@ pub type ListenerFuture = Pin + Send>>; pub(crate) type EvictionListener = Arc, V, RemovalCause) + Send + Sync + 'static>; -#[cfg(feature = "sync")] -pub(crate) type EvictionListenerRef<'a, K, V> = - &'a Arc, V, RemovalCause) + Send + Sync + 'static>; - #[cfg(feature = "future")] pub(crate) type AsyncEvictionListener = Box, V, RemovalCause) -> ListenerFuture + Send + Sync + 'static>; @@ -30,90 +26,6 @@ pub(crate) type AsyncEvictionListener = // the notifications, but currently there is no way to know when all entries // have been invalidated and their notifications have been sent. -/// Configuration for an eviction listener of a cache. -/// -/// Currently only setting the [`DeliveryMode`][delivery-mode] is supported. -/// -/// [delivery-mode]: ./enum.DeliveryMode.html -#[cfg(feature = "sync")] -#[derive(Clone, Debug, Default)] -pub struct Configuration { - mode: DeliveryMode, -} - -#[cfg(feature = "sync")] -impl Configuration { - pub fn builder() -> ConfigurationBuilder { - ConfigurationBuilder::default() - } - - pub fn delivery_mode(&self) -> DeliveryMode { - self.mode - } -} - -/// Builds a [`Configuration`][conf] with some configuration knobs. -/// -/// Currently only setting the [`DeliveryMode`][delivery-mode] is supported. -/// -/// [conf]: ./struct.Configuration.html -/// [delivery-mode]: ./enum.DeliveryMode.html -#[cfg(feature = "sync")] -#[derive(Default)] -pub struct ConfigurationBuilder { - mode: DeliveryMode, -} - -#[cfg(feature = "sync")] -impl ConfigurationBuilder { - pub fn build(self) -> Configuration { - Configuration { mode: self.mode } - } - - pub fn delivery_mode(self, mode: DeliveryMode) -> Self { - Self { mode } - } -} - -/// Specifies how and when an eviction notification should be delivered to an -/// eviction listener. -/// -/// For more details, see [the document][delivery-mode-doc] of `sync::Cache`. -/// -/// [delivery-mode-doc]: ../sync/struct.Cache.html#delivery-modes-for-eviction-listener -#[cfg(feature = "sync")] -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum DeliveryMode { - /// With this mode, a notification should be delivered to the listener - /// immediately after an entry was evicted. It also guarantees that eviction - /// notifications and cache write operations such and `insert`, `get_with` and - /// `invalidate` for a given cache key are ordered by the time when they - /// occurred. - /// - /// To guarantee the order, cache maintains key-level lock, which will reduce - /// concurrent write performance. - /// - /// Use this mode when the order is more import than the write performance. - Immediate, - /// With this mode, a notification will be delivered to the listener some time - /// after an entry was evicted. Therefore, it does not preserve the order of - /// eviction notifications and write operations. - /// - /// On the other hand, cache does not maintain key-level lock, so there will be - /// no overhead on write performance. - /// - /// Use this mode when write performance is more important than preserving the - /// order of eviction notifications and write operations. - Queued, -} - -#[cfg(feature = "sync")] -impl Default for DeliveryMode { - fn default() -> Self { - Self::Immediate - } -} - /// Indicates the reason why a cached entry was removed. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum RemovalCause { @@ -133,25 +45,3 @@ impl RemovalCause { matches!(self, Self::Expired | Self::Size) } } - -#[cfg(all(test, feature = "sync"))] -pub(crate) mod macros { - - macro_rules! assert_with_mode { - ($cond:expr, $delivery_mode:ident) => { - assert!( - $cond, - "assertion failed. (delivery mode: {:?})", - $delivery_mode - ) - }; - } - - macro_rules! assert_eq_with_mode { - ($left:expr, $right:expr, $delivery_mode:ident) => { - assert_eq!($left, $right, "(delivery mode: {:?})", $delivery_mode) - }; - } - - pub(crate) use {assert_eq_with_mode, assert_with_mode}; -} diff --git a/src/notification/notifier.rs b/src/notification/notifier.rs index 4c37f609..cb7eaeda 100644 --- a/src/notification/notifier.rs +++ b/src/notification/notifier.rs @@ -1,100 +1,19 @@ -use std::{ - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::Duration, +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, }; -use crate::{ - common::concurrent::{ - constants::WRITE_RETRY_INTERVAL_MICROS, - thread_pool::{PoolName, ThreadPool, ThreadPoolRegistry}, - }, - notification::{self, DeliveryMode, EvictionListener, EvictionListenerRef, RemovalCause}, -}; - -use crossbeam_channel::{Receiver, Sender, TrySendError}; -use parking_lot::Mutex; - -const CHANNEL_CAPACITY: usize = 1_024; -const SUBMIT_TASK_THRESHOLD: usize = 100; -const MAX_NOTIFICATIONS_PER_TASK: u16 = 5_000; - -pub(crate) enum RemovalNotifier { - Blocking(BlockingRemovalNotifier), - ThreadPool(ThreadPoolRemovalNotifier), -} - -impl RemovalNotifier { - pub(crate) fn new( - listener: EvictionListener, - conf: notification::Configuration, - cache_name: Option, - ) -> Self { - match conf.delivery_mode() { - DeliveryMode::Immediate => { - Self::Blocking(BlockingRemovalNotifier::new(listener, cache_name)) - } - DeliveryMode::Queued => { - Self::ThreadPool(ThreadPoolRemovalNotifier::new(listener, cache_name)) - } - } - } - - pub(crate) fn is_blocking(&self) -> bool { - matches!(self, RemovalNotifier::Blocking(_)) - } - - pub(crate) fn is_batching_supported(&self) -> bool { - matches!(self, RemovalNotifier::ThreadPool(_)) - } - - pub(crate) fn notify(&self, key: Arc, value: V, cause: RemovalCause) - where - K: Send + Sync + 'static, - V: Send + Sync + 'static, - { - match self { - RemovalNotifier::Blocking(notifier) => notifier.notify(key, value, cause), - RemovalNotifier::ThreadPool(notifier) => { - notifier.add_single_notification(key, value, cause) - } - } - } - - pub(crate) fn batch_notify(&self, entries: Vec>) - where - K: Send + Sync + 'static, - V: Send + Sync + 'static, - { - match self { - RemovalNotifier::Blocking(_) => unreachable!(), - RemovalNotifier::ThreadPool(notifier) => notifier.add_multiple_notifications(entries), - } - } - - pub(crate) fn sync(&self) - where - K: Send + Sync + 'static, - V: Send + Sync + 'static, - { - match self { - RemovalNotifier::Blocking(_) => unreachable!(), - RemovalNotifier::ThreadPool(notifier) => notifier.submit_task(), - } - } -} +use crate::notification::{EvictionListener, RemovalCause}; -pub(crate) struct BlockingRemovalNotifier { +pub(crate) struct RemovalNotifier { listener: EvictionListener, is_enabled: AtomicBool, #[cfg(feature = "logging")] cache_name: Option, } -impl BlockingRemovalNotifier { - fn new(listener: EvictionListener, _cache_name: Option) -> Self { +impl RemovalNotifier { + pub(crate) fn new(listener: EvictionListener, _cache_name: Option) -> Self { Self { listener, is_enabled: AtomicBool::new(true), @@ -103,7 +22,7 @@ impl BlockingRemovalNotifier { } } - fn notify(&self, key: Arc, value: V, cause: RemovalCause) { + pub(crate) fn notify(&self, key: Arc, value: V, cause: RemovalCause) { use std::panic::{catch_unwind, AssertUnwindSafe}; if !self.is_enabled.load(Ordering::Acquire) { @@ -123,259 +42,6 @@ impl BlockingRemovalNotifier { } } -pub(crate) struct ThreadPoolRemovalNotifier { - snd: Sender>, - state: Arc>, - thread_pool: Arc, -} - -impl Drop for ThreadPoolRemovalNotifier { - fn drop(&mut self) { - let state = &self.state; - // Disallow to create and run a notification task by now. - state.shutdown(); - - // Wait for the notification task to finish. (busy loop) - while state.is_running() { - std::thread::sleep(Duration::from_millis(1)); - } - - ThreadPoolRegistry::release_pool(&self.thread_pool); - } -} - -impl ThreadPoolRemovalNotifier { - fn new(listener: EvictionListener, _cache_name: Option) -> Self { - let (snd, rcv) = crossbeam_channel::bounded(CHANNEL_CAPACITY); - let thread_pool = ThreadPoolRegistry::acquire_pool(PoolName::RemovalNotifier); - let state = NotifierState { - task_lock: Default::default(), - rcv, - listener, - #[cfg(feature = "logging")] - cache_name: _cache_name, - is_enabled: AtomicBool::new(true), - is_running: Default::default(), - is_shutting_down: Default::default(), - }; - Self { - snd, - state: Arc::new(state), - thread_pool, - } - } -} - -impl ThreadPoolRemovalNotifier -where - K: Send + Sync + 'static, - V: Send + Sync + 'static, -{ - fn add_single_notification(&self, key: Arc, value: V, cause: RemovalCause) { - let entry = RemovedEntries::new_single(key, value, cause); - self.send_entries(entry) - .expect("Failed to send notification"); - } - - fn add_multiple_notifications(&self, entries: Vec>) { - let entries = RemovedEntries::new_multi(entries); - self.send_entries(entries) - .expect("Failed to send notification"); - } - - fn send_entries( - &self, - entries: RemovedEntries, - ) -> Result<(), TrySendError>> { - let mut entries = entries; - loop { - self.submit_task_if_necessary(); - match self.snd.try_send(entries) { - Ok(()) => break, - Err(TrySendError::Full(entries1)) => { - entries = entries1; - std::thread::sleep(Duration::from_millis(WRITE_RETRY_INTERVAL_MICROS)); - } - Err(e @ TrySendError::Disconnected(_)) => return Err(e), - } - } - Ok(()) - } - - fn submit_task(&self) { - // TODO: Use compare and exchange to ensure it was false. - - let state = &self.state; - - if state.is_running() || !state.is_enabled() || state.is_shutting_down() { - return; - } - state.set_running(true); - - let task = NotificationTask::new(state); - self.thread_pool.pool.execute(move || { - task.execute(); - }); - } - - fn submit_task_if_necessary(&self) { - if self.snd.len() >= SUBMIT_TASK_THRESHOLD && !self.state.is_running() { - self.submit_task(); // TODO: Error handling? - } - } -} - -struct NotificationTask { - state: Arc>, -} - -impl NotificationTask { - fn new(state: &Arc>) -> Self { - Self { - state: Arc::clone(state), - } - } - - fn execute(&self) { - // Only one task can be executed at a time for a cache segment. - let task_lock = self.state.task_lock.lock(); - let mut count = 0u16; - let mut is_enabled = self.state.is_enabled(); - - if !is_enabled { - return; - } - - while let Ok(entries) = self.state.rcv.try_recv() { - match entries { - RemovedEntries::Single(entry) => { - let result = self.notify(&self.state.listener, entry); - if result.is_err() { - is_enabled = false; - break; - } - count += 1; - } - RemovedEntries::Multi(entries) => { - for entry in entries { - let result = self.notify(&self.state.listener, entry); - if result.is_err() { - is_enabled = false; - break; - } - if self.state.is_shutting_down() { - break; - } - count += 1; - } - } - } - - if count > MAX_NOTIFICATIONS_PER_TASK || self.state.is_shutting_down() { - break; - } - } - - if !is_enabled { - self.state.set_enabled(false); - } - - std::mem::drop(task_lock); - self.state.set_running(false); - } - - /// Returns `Ok(())` when calling the listener succeeded. Returns - /// `Err(panic_payload)` when the listener panicked. - fn notify( - &self, - listener: EvictionListenerRef<'_, K, V>, - entry: RemovedEntry, - ) -> Result<(), Box> { - use std::panic::{catch_unwind, AssertUnwindSafe}; - - let RemovedEntry { key, value, cause } = entry; - let listener_clo = || (listener)(key, value, cause); - - // Safety: It is safe to assert unwind safety here because we will not - // call the listener again if it has been panicked. - // - #[allow(clippy::let_and_return)] - // https://rust-lang.github.io/rust-clippy/master/index.html#let_and_return - let result = catch_unwind(AssertUnwindSafe(listener_clo)); - #[cfg(feature = "logging")] - { - if let Err(payload) = &result { - log_panic(&**payload, self.state.cache_name.as_deref()); - } - } - result - } -} - -struct NotifierState { - task_lock: Mutex<()>, - rcv: Receiver>, - listener: EvictionListener, - #[cfg(feature = "logging")] - cache_name: Option, - is_enabled: AtomicBool, - is_running: AtomicBool, - is_shutting_down: AtomicBool, -} - -impl NotifierState { - fn is_enabled(&self) -> bool { - self.is_enabled.load(Ordering::Acquire) - } - - fn set_enabled(&self, value: bool) { - self.is_enabled.store(value, Ordering::Release); - } - - fn is_running(&self) -> bool { - self.is_running.load(Ordering::Acquire) - } - - fn set_running(&self, value: bool) { - self.is_running.store(value, Ordering::Release); - } - - fn is_shutting_down(&self) -> bool { - self.is_shutting_down.load(Ordering::Acquire) - } - - fn shutdown(&self) { - self.is_shutting_down.store(true, Ordering::Release); - } -} - -pub(crate) struct RemovedEntry { - key: Arc, - value: V, - cause: RemovalCause, -} - -impl RemovedEntry { - pub(crate) fn new(key: Arc, value: V, cause: RemovalCause) -> Self { - Self { key, value, cause } - } -} - -enum RemovedEntries { - Single(RemovedEntry), - Multi(Vec>), -} - -impl RemovedEntries { - fn new_single(key: Arc, value: V, cause: RemovalCause) -> Self { - Self::Single(RemovedEntry::new(key, value, cause)) - } - - fn new_multi(entries: Vec>) -> Self { - Self::Multi(entries) - } -} - #[cfg(feature = "logging")] fn log_panic(payload: &(dyn std::any::Any + Send + 'static), cache_name: Option<&str>) { // Try to downcast the payload into &str or String. diff --git a/src/sync/builder.rs b/src/sync/builder.rs index 0e95b1c3..326176e1 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -1,7 +1,7 @@ use super::{Cache, SegmentedCache}; use crate::{ common::{builder_utils, concurrent::Weigher}, - notification::{self, EvictionListener, RemovalCause}, + notification::{EvictionListener, RemovalCause}, policy::ExpirationPolicy, Expiry, }; @@ -54,10 +54,8 @@ pub struct CacheBuilder { num_segments: Option, weigher: Option>, eviction_listener: Option>, - eviction_listener_conf: Option, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, - thread_pool_enabled: bool, cache_type: PhantomData, } @@ -74,11 +72,8 @@ where num_segments: None, weigher: None, eviction_listener: None, - eviction_listener_conf: None, expiration_policy: Default::default(), invalidator_enabled: false, - // TODO: Change this to `false` in Moka v0.12.0 or v0.13.0. - thread_pool_enabled: true, cache_type: Default::default(), } } @@ -116,10 +111,8 @@ where num_segments: Some(num_segments), weigher: self.weigher, eviction_listener: self.eviction_listener, - eviction_listener_conf: self.eviction_listener_conf, expiration_policy: self.expiration_policy, invalidator_enabled: self.invalidator_enabled, - thread_pool_enabled: self.thread_pool_enabled, cache_type: PhantomData, } } @@ -145,10 +138,8 @@ where build_hasher, self.weigher, self.eviction_listener, - self.eviction_listener_conf, self.expiration_policy, self.invalidator_enabled, - builder_utils::housekeeper_conf(self.thread_pool_enabled), ) } @@ -233,10 +224,8 @@ where hasher, self.weigher, self.eviction_listener, - self.eviction_listener_conf, self.expiration_policy, self.invalidator_enabled, - builder_utils::housekeeper_conf(self.thread_pool_enabled), ) } } @@ -268,10 +257,8 @@ where build_hasher, self.weigher, self.eviction_listener, - self.eviction_listener_conf, self.expiration_policy, self.invalidator_enabled, - builder_utils::housekeeper_conf(self.thread_pool_enabled), ) } @@ -358,10 +345,8 @@ where hasher, self.weigher, self.eviction_listener, - self.eviction_listener_conf, self.expiration_policy, self.invalidator_enabled, - builder_utils::housekeeper_conf(true), ) } } @@ -424,35 +409,6 @@ impl CacheBuilder { ) -> Self { Self { eviction_listener: Some(Arc::new(listener)), - eviction_listener_conf: Some(Default::default()), - ..self - } - } - - /// Sets the eviction listener closure to the cache with a custom - /// [`Configuration`][conf]. Use this method if you want to change the delivery - /// mode to the queued mode. - /// - /// The closure should take `Arc`, `V` and [`RemovalCause`][removal-cause] as - /// the arguments. - /// - /// # Panics - /// - /// It is very important to make the listener closure not to panic. Otherwise, - /// the cache will stop calling the listener after a panic. This is an intended - /// behavior because the cache cannot know whether is is memory safe or not to - /// call the panicked lister again. - /// - /// [removal-cause]: ../notification/enum.RemovalCause.html - /// [conf]: ../notification/struct.Configuration.html - pub fn eviction_listener_with_conf( - self, - listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, - conf: notification::Configuration, - ) -> Self { - Self { - eviction_listener: Some(Arc::new(listener)), - eviction_listener_conf: Some(conf), ..self } } @@ -515,22 +471,6 @@ impl CacheBuilder { ..self } } - - /// Specify whether or not to enable the thread pool for housekeeping tasks. - /// These tasks include removing expired entries and updating the LRU queue and - /// LFU filter. `true` to enable and `false` to disable. (Default: `true`) - /// - /// If disabled, the housekeeping tasks will be executed by a client thread when - /// necessary. - /// - /// NOTE: The default value will be changed to `false` in a future release - /// (v0.12.0 or v0.13.0). - pub fn thread_pool_enabled(self, v: bool) -> Self { - Self { - thread_pool_enabled: v, - ..self - } - } } #[cfg(test)] @@ -570,8 +510,6 @@ mod tests { #[test] fn build_segmented_cache() { - use crate::notification; - // SegmentCache let cache = CacheBuilder::new(100).segments(15).build(); let policy = cache.policy(); @@ -584,16 +522,12 @@ mod tests { cache.insert('b', "Bob"); assert_eq!(cache.get(&'b'), Some("Bob")); - let notification_conf = notification::Configuration::builder() - .delivery_mode(notification::DeliveryMode::Queued) - .build(); - let listener = move |_key, _value, _cause| (); let builder = CacheBuilder::new(400) .time_to_live(Duration::from_secs(45 * 60)) .time_to_idle(Duration::from_secs(15 * 60)) - .eviction_listener_with_conf(listener, notification_conf) + .eviction_listener(listener) .name("tracked_sessions") // Call segments() at the end to check all field values in the current // builder struct are copied to the new builder: @@ -601,7 +535,6 @@ mod tests { .segments(24); assert!(builder.eviction_listener.is_some()); - assert!(builder.eviction_listener_conf.is_some()); let cache = builder.build(); let policy = cache.policy(); diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 2a5e6da6..315db335 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1,17 +1,15 @@ use super::{ value_initializer::{InitResult, ValueInitializer}, - CacheBuilder, ConcurrentCacheExt, OwnedKeyEntrySelector, RefKeyEntrySelector, + CacheBuilder, OwnedKeyEntrySelector, RefKeyEntrySelector, }; use crate::{ common::{ concurrent::{ - constants::{MAX_SYNC_REPEATS, WRITE_RETRY_INTERVAL_MICROS}, - housekeeper::{self, InnerSync}, - Weigher, WriteOp, + constants::WRITE_RETRY_INTERVAL_MICROS, housekeeper::InnerSync, Weigher, WriteOp, }, time::Instant, }, - notification::{self, EvictionListener}, + notification::EvictionListener, policy::ExpirationPolicy, sync::{Iter, PredicateId}, sync_base::{ @@ -57,8 +55,6 @@ use std::{ /// panic](#you-should-avoid-eviction-listener-to-panic) /// - [Delivery modes for eviction listener](#delivery-modes-for-eviction-listener) /// - [`Immediate` mode](#immediate-mode) -/// - [`Queued` mode](#queued-mode) -/// - [Example: `Queued` Delivery Mode](#example-queued-delivery-mode) /// /// # Example: `insert`, `get` and `invalidate` /// @@ -406,6 +402,7 @@ use std::{ /// // /// // [dependencies] /// // anyhow = "1.0" +/// // uuid = { version = "1.1", features = ["v4"] } /// /// use moka::{sync::Cache, notification}; /// @@ -416,6 +413,7 @@ use std::{ /// sync::{Arc, RwLock}, /// time::Duration, /// }; +/// use uuid::Uuid; /// /// /// The DataFileManager writes, reads and removes data files. /// struct DataFileManager { @@ -471,13 +469,18 @@ use std::{ /// fn main() -> anyhow::Result<()> { /// // Create an instance of the DataFileManager and wrap it with /// // Arc> so it can be shared across threads. -/// let file_mgr = DataFileManager::new(std::env::temp_dir()); +/// let mut base_dir = std::env::temp_dir(); +/// base_dir.push(Uuid::new_v4().as_hyphenated().to_string()); +/// println!("base_dir: {:?}", base_dir); +/// std::fs::create_dir(&base_dir)?; +/// +/// let file_mgr = DataFileManager::new(base_dir); /// let file_mgr = Arc::new(RwLock::new(file_mgr)); /// /// let file_mgr1 = Arc::clone(&file_mgr); /// /// // Create an eviction lister closure. -/// let listener = move |k, v: PathBuf, cause| { +/// let eviction_listener = move |k, v: PathBuf, cause| { /// // Try to remove the data file at the path `v`. /// println!( /// "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", @@ -504,7 +507,7 @@ use std::{ /// let cache = Cache::builder() /// .max_capacity(100) /// .time_to_live(Duration::from_secs(2)) -/// .eviction_listener(listener) +/// .eviction_listener(eviction_listener) /// .build(); /// /// // Insert an entry to the cache. @@ -541,6 +544,8 @@ use std::{ /// // remove the file. /// std::thread::sleep(Duration::from_secs(5)); /// +/// cache.run_pending_tasks(); +/// /// Ok(()) /// } /// ``` @@ -610,199 +615,6 @@ use std::{ /// - This mode adds almost no performance overhead to cache write operations as it /// does not use the per-key lock. /// -/// ### Example: `Queued` Delivery Mode -/// -/// Because the `Immediate` mode is the default mode for `sync` caches, the previous -/// example was using it implicitly. -/// -/// The following is the same example but modified for the `Queued` delivery mode. -/// (Showing changed lines only) -/// -/// ```rust -/// // Cargo.toml -/// // -/// // [dependencies] -/// // anyhow = "1.0" -/// // uuid = { version = "1.1", features = ["v4"] } -/// -/// use moka::{sync::Cache, notification}; -/// -/// # use anyhow::{anyhow, Context}; -/// # use std::{ -/// # fs, io, -/// # path::{Path, PathBuf}, -/// # sync::{Arc, RwLock}, -/// # time::Duration, -/// # }; -/// // Use UUID crate to generate a random file name. -/// use uuid::Uuid; -/// -/// # struct DataFileManager { -/// # base_dir: PathBuf, -/// # file_count: usize, -/// # } -/// # -/// impl DataFileManager { -/// # fn new(base_dir: PathBuf) -> Self { -/// # Self { -/// # base_dir, -/// # file_count: 0, -/// # } -/// # } -/// # -/// fn write_data_file( -/// &mut self, -/// _key: impl AsRef, -/// contents: String -/// ) -> io::Result { -/// // We do not use the key for the filename anymore. Instead, we -/// // use UUID to generate a unique filename for each call. -/// loop { -/// // Generate a file path with unique file name. -/// let mut path = self.base_dir.to_path_buf(); -/// path.push(Uuid::new_v4().as_hyphenated().to_string()); -/// -/// if path.exists() { -/// continue; // This path is already taken by others. Retry. -/// } -/// -/// // We have got a unique file path, so create the file at -/// // the path and write the contents to the file. -/// fs::write(&path, contents)?; -/// self.file_count += 1; -/// println!("Created a data file at {:?} (file count: {})", path, self.file_count); -/// -/// // Return the path. -/// return Ok(path); -/// } -/// } -/// -/// // Other associate functions and methods are unchanged. -/// # -/// # fn read_data_file(&self, path: impl AsRef) -> io::Result { -/// # fs::read_to_string(path) -/// # } -/// # -/// # fn remove_data_file(&mut self, path: impl AsRef) -> io::Result<()> { -/// # fs::remove_file(path.as_ref())?; -/// # self.file_count -= 1; -/// # println!( -/// # "Removed a data file at {:?} (file count: {})", -/// # path.as_ref(), -/// # self.file_count -/// # ); -/// # -/// # Ok(()) -/// # } -/// } -/// -/// fn main() -> anyhow::Result<()> { -/// // (Omitted unchanged lines) -/// -/// # let file_mgr = DataFileManager::new(std::env::temp_dir()); -/// # let file_mgr = Arc::new(RwLock::new(file_mgr)); -/// # -/// # let file_mgr1 = Arc::clone(&file_mgr); -/// # -/// // Create an eviction lister closure. -/// // let listener = ... -/// -/// # let listener = move |k, v: PathBuf, cause| { -/// # println!( -/// # "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", -/// # k, v, cause -/// # ); -/// # -/// # match file_mgr1.write() { -/// # Err(_e) => { -/// # eprintln!("The lock has been poisoned"); -/// # } -/// # Ok(mut mgr) => { -/// # if let Err(_e) = mgr.remove_data_file(v.as_path()) { -/// # eprintln!("Failed to remove a data file at {:?}", v); -/// # } -/// # } -/// # } -/// # }; -/// # -/// // Create a listener configuration with Queued delivery mode. -/// let listener_conf = notification::Configuration::builder() -/// .delivery_mode(notification::DeliveryMode::Queued) -/// .build(); -/// -/// // Create the cache. -/// let cache = Cache::builder() -/// .max_capacity(100) -/// .time_to_live(Duration::from_secs(2)) -/// // Set the eviction listener with the configuration. -/// .eviction_listener_with_conf(listener, listener_conf) -/// .build(); -/// -/// // Insert an entry to the cache. -/// // ... -/// # println!("== try_get_with()"); -/// # let key = "user1"; -/// # let path = cache -/// # .try_get_with(key, || -> anyhow::Result<_> { -/// # let mut mgr = file_mgr -/// # .write() -/// # .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; -/// # let path = mgr -/// # .write_data_file(key, "user data".into()) -/// # .with_context(|| format!("Failed to create a data file"))?; -/// # Ok(path) -/// # }) -/// # .map_err(|e| anyhow!("{}", e))?; -/// # -/// // Read the data file at the path and print the contents. -/// // ... -/// # println!("\n== read_data_file()"); -/// # { -/// # let mgr = file_mgr -/// # .read() -/// # .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; -/// # let contents = mgr -/// # .read_data_file(path.as_path()) -/// # .with_context(|| format!("Failed to read data from {:?}", path))?; -/// # println!("contents: {}", contents); -/// # } -/// # -/// // Sleep for five seconds. -/// // ... -/// # std::thread::sleep(Duration::from_secs(5)); -/// -/// Ok(()) -/// } -/// ``` -/// -/// As you can see, `DataFileManager::write_data_file` method no longer uses the -/// cache key for the file name. Instead, it generates a UUID-based unique file name -/// on each call. This kind of treatment will be needed for `Queued` mode because -/// notifications will be delivered with some delay. -/// -/// For example, a user thread could do the followings: -/// -/// 1. `insert` an entry, and create a file. -/// 2. The entry is evicted due to size constraint: -/// - This will trigger an eviction notification but it will be fired some time -/// later. -/// - The notification listener will remove the file when it is called, but we -/// cannot predict when the call would be made. -/// 3. `insert` the entry again, and create the file again. -/// -/// In `Queued` mode, the notification of the eviction at step 2 can be delivered -/// either before or after the re-`insert` at step 3. If the `write_data_file` method -/// does not generate unique file name on each call and the notification has not been -/// delivered before step 3, the user thread could overwrite the file created at step -/// 1. And then the notification will be delivered and the eviction listener will -/// remove a wrong file created at step 3 (instead of the correct one created at step -/// 1). This will cause the cache entires and the files on the filesystem to become -/// out of sync. -/// -/// Generating unique file names prevents this problem, as the user thread will never -/// overwrite the file created at step 1 and the eviction lister will never remove a -/// wrong file. -/// pub struct Cache { base: BaseCache, value_initializer: Arc>, @@ -896,11 +708,9 @@ impl Cache { /// println!("{}", cache.entry_count()); // -> 0 /// println!("{}", cache.weighted_size()); // -> 0 /// - /// // To mitigate the inaccuracy, bring `ConcurrentCacheExt` trait to - /// // the scope so we can use `sync` method. - /// use moka::sync::ConcurrentCacheExt; - /// // Call `sync` to run pending internal tasks. - /// cache.sync(); + /// // To mitigate the inaccuracy, Call `run_pending_tasks` method to run + /// // pending internal tasks. + /// cache.run_pending_tasks(); /// /// // Followings will print the actual numbers. /// println!("{}", cache.entry_count()); // -> 3 @@ -935,7 +745,6 @@ where /// [builder-struct]: ./struct.CacheBuilder.html pub fn new(max_capacity: u64) -> Self { let build_hasher = RandomState::default(); - let housekeeper_conf = housekeeper::Configuration::new_thread_pool(true); Self::with_everything( None, Some(max_capacity), @@ -943,10 +752,8 @@ where build_hasher, None, None, - None, Default::default(), false, - housekeeper_conf, ) } @@ -974,10 +781,8 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_listener_conf: Option, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, - housekeeper_conf: housekeeper::Configuration, ) -> Self { Self { base: BaseCache::new( @@ -987,10 +792,8 @@ where build_hasher.clone(), weigher, eviction_listener, - eviction_listener_conf, expiration_policy, invalidator_enabled, - housekeeper_conf, ), value_initializer: Arc::new(ValueInitializer::with_hasher(build_hasher)), } @@ -1785,7 +1588,7 @@ where // Lock the key for removal if blocking removal notification is enabled. let mut kl = None; let mut klg = None; - if self.base.is_removal_notifier_enabled() && self.base.is_blocking_removal_notification() { + if self.base.is_removal_notifier_enabled() { // To lock the key, we have to get Arc for key (&Q). // // TODO: Enhance this if possible. This is rather hack now because @@ -1945,6 +1748,13 @@ where pub fn iter(&self) -> Iter<'_, K, V> { Iter::with_single_cache_segment(&self.base, self.num_cht_segments()) } + + /// Performs any pending maintenance operations needed by the cache. + pub fn run_pending_tasks(&self) { + if let Some(hk) = &self.base.housekeeper { + hk.run_pending_tasks(&*self.base.inner); + } + } } impl<'a, K, V, S> IntoIterator for &'a Cache @@ -1962,17 +1772,6 @@ where } } -impl ConcurrentCacheExt for Cache -where - K: Hash + Eq + Send + Sync + 'static, - V: Clone + Send + Sync + 'static, - S: BuildHasher + Clone + Send + Sync + 'static, -{ - fn sync(&self) { - self.base.inner.sync(MAX_SYNC_REPEATS); - } -} - // // Iterator support // @@ -2010,7 +1809,7 @@ where ch: &Sender>, op: WriteOp, now: Instant, - housekeeper: Option<&HouseKeeperArc>, + housekeeper: Option<&HouseKeeperArc>, ) -> Result<(), TrySendError>> { let mut op = op; @@ -2019,7 +1818,7 @@ where // - We are doing a busy-loop here. We were originally calling `ch.send(op)?`, // but we got a notable performance degradation. loop { - BaseCache::apply_reads_writes_if_needed(inner, ch, now, housekeeper); + BaseCache::::apply_reads_writes_if_needed(inner, ch, now, housekeeper); match ch.try_send(op) { Ok(()) => break, Err(TrySendError::Full(op1)) => { @@ -2065,15 +1864,9 @@ where // To see the debug prints, run test as `cargo test -- --nocapture` #[cfg(test)] mod tests { - use super::{Cache, ConcurrentCacheExt}; + use super::Cache; use crate::{ - common::time::Clock, - notification::{ - self, - macros::{assert_eq_with_mode, assert_with_mode}, - DeliveryMode, RemovalCause, - }, - policy::test_utils::ExpiryCallCounters, + common::time::Clock, notification::RemovalCause, policy::test_utils::ExpiryCallCounters, Expiry, }; @@ -2096,7 +1889,7 @@ mod tests { assert!(!cache.contains_key(&0)); assert!(cache.get(&0).is_none()); - cache.sync(); + cache.run_pending_tasks(); assert!(!cache.contains_key(&0)); assert!(cache.get(&0).is_none()); assert_eq!(cache.entry_count(), 0) @@ -2104,227 +1897,211 @@ mod tests { #[test] fn basic_single_thread() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - fn run_test(delivery_mode: DeliveryMode) { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(3) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", "alice"); - cache.insert("b", "bob"); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - cache.sync(); - // counts: a -> 1, b -> 1 - - cache.insert("c", "cindy"); - assert_eq_with_mode!(cache.get(&"c"), Some("cindy"), delivery_mode); - assert_with_mode!(cache.contains_key(&"c"), delivery_mode); - // counts: a -> 1, b -> 1, c -> 1 - cache.sync(); - - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - cache.sync(); - // counts: a -> 2, b -> 2, c -> 1 - - // "d" should not be admitted because its frequency is too low. - cache.insert("d", "david"); // count: d -> 0 - expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 1 - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - - cache.insert("d", "david"); - expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 2 - - // "d" should be admitted and "c" should be evicted - // because d's frequency is higher than c's. - cache.insert("d", "dennis"); - expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - assert_eq_with_mode!(cache.get(&"c"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), Some("dennis"), delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); - assert_with_mode!(cache.contains_key(&"d"), delivery_mode); - - cache.invalidate(&"b"); - expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); - assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); - - assert!(cache.remove(&"b").is_none()); - assert_eq!(cache.remove(&"d"), Some("dennis")); - expected.push((Arc::new("d"), "dennis", RemovalCause::Explicit)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); - assert!(!cache.contains_key(&"d")); - - verify_notification_vec(&cache, actual, &expected, delivery_mode); - assert_with_mode!(cache.key_locks_map_is_empty(), delivery_mode); - } + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(3) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", "alice"); + cache.insert("b", "bob"); + assert_eq!(cache.get(&"a"), Some("alice")); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert_eq!(cache.get(&"b"), Some("bob")); + cache.run_pending_tasks(); + // counts: a -> 1, b -> 1 + + cache.insert("c", "cindy"); + assert_eq!(cache.get(&"c"), Some("cindy")); + assert!(cache.contains_key(&"c")); + // counts: a -> 1, b -> 1, c -> 1 + cache.run_pending_tasks(); + + assert!(cache.contains_key(&"a")); + assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"b"), Some("bob")); + assert!(cache.contains_key(&"b")); + cache.run_pending_tasks(); + // counts: a -> 2, b -> 2, c -> 1 + + // "d" should not be admitted because its frequency is too low. + cache.insert("d", "david"); // count: d -> 0 + expected.push((Arc::new("d"), "david", RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"d"), None); // d -> 1 + assert!(!cache.contains_key(&"d")); + + cache.insert("d", "david"); + expected.push((Arc::new("d"), "david", RemovalCause::Size)); + cache.run_pending_tasks(); + assert!(!cache.contains_key(&"d")); + assert_eq!(cache.get(&"d"), None); // d -> 2 + + // "d" should be admitted and "c" should be evicted + // because d's frequency is higher than c's. + cache.insert("d", "dennis"); + expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"b"), Some("bob")); + assert_eq!(cache.get(&"c"), None); + assert_eq!(cache.get(&"d"), Some("dennis")); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert!(!cache.contains_key(&"c")); + assert!(cache.contains_key(&"d")); + + cache.invalidate(&"b"); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"b"), None); + assert!(!cache.contains_key(&"b")); + + assert!(cache.remove(&"b").is_none()); + assert_eq!(cache.remove(&"d"), Some("dennis")); + expected.push((Arc::new("d"), "dennis", RemovalCause::Explicit)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"d"), None); + assert!(!cache.contains_key(&"d")); + + verify_notification_vec(&cache, actual, &expected); + assert!(cache.key_locks_map_is_empty()); } #[test] fn size_aware_eviction() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); + let weigher = |_k: &&str, v: &(&str, u32)| v.1; - fn run_test(delivery_mode: DeliveryMode) { - let weigher = |_k: &&str, v: &(&str, u32)| v.1; + let alice = ("alice", 10); + let bob = ("bob", 15); + let bill = ("bill", 20); + let cindy = ("cindy", 5); + let david = ("david", 15); + let dennis = ("dennis", 15); - let alice = ("alice", 10); - let bob = ("bob", 15); - let bill = ("bill", 20); - let cindy = ("cindy", 5); - let david = ("david", 15); - let dennis = ("dennis", 15); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(31) - .weigher(weigher) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", alice); - cache.insert("b", bob); - assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); - cache.sync(); - // order (LRU -> MRU) and counts: a -> 1, b -> 1 - - cache.insert("c", cindy); - assert_eq_with_mode!(cache.get(&"c"), Some(cindy), delivery_mode); - assert_with_mode!(cache.contains_key(&"c"), delivery_mode); - // order and counts: a -> 1, b -> 1, c -> 1 - cache.sync(); - - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - cache.sync(); - // order and counts: c -> 1, a -> 2, b -> 2 - - // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). - // "d" must have higher count than 3, which is the aggregated count - // of "a" and "c". - cache.insert("d", david); // count: d -> 0 - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 1 - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 2 - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 3 - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 4 - - // Finally "d" should be admitted by evicting "c" and "a". - cache.insert("d", dennis); - expected.push((Arc::new("c"), cindy, RemovalCause::Size)); - expected.push((Arc::new("a"), alice, RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); - assert_eq_with_mode!(cache.get(&"c"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), Some(dennis), delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); - assert_with_mode!(cache.contains_key(&"d"), delivery_mode); - - // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). - cache.insert("b", bill); - expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); - expected.push((Arc::new("d"), dennis, RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"b"), Some(bill), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - - // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). - cache.insert("a", alice); - cache.insert("b", bob); - expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - - // Verify the sizes. - assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); - assert_eq_with_mode!(cache.weighted_size(), 25, delivery_mode); - - verify_notification_vec(&cache, actual, &expected, delivery_mode); - assert_with_mode!(cache.key_locks_map_is_empty(), delivery_mode); - } + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(31) + .weigher(weigher) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", alice); + cache.insert("b", bob); + assert_eq!(cache.get(&"a"), Some(alice)); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert_eq!(cache.get(&"b"), Some(bob)); + cache.run_pending_tasks(); + // order (LRU -> MRU) and counts: a -> 1, b -> 1 + + cache.insert("c", cindy); + assert_eq!(cache.get(&"c"), Some(cindy)); + assert!(cache.contains_key(&"c")); + // order and counts: a -> 1, b -> 1, c -> 1 + cache.run_pending_tasks(); + + assert!(cache.contains_key(&"a")); + assert_eq!(cache.get(&"a"), Some(alice)); + assert_eq!(cache.get(&"b"), Some(bob)); + assert!(cache.contains_key(&"b")); + cache.run_pending_tasks(); + // order and counts: c -> 1, a -> 2, b -> 2 + + // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). + // "d" must have higher count than 3, which is the aggregated count + // of "a" and "c". + cache.insert("d", david); // count: d -> 0 + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"d"), None); // d -> 1 + assert!(!cache.contains_key(&"d")); + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.run_pending_tasks(); + assert!(!cache.contains_key(&"d")); + assert_eq!(cache.get(&"d"), None); // d -> 2 + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"d"), None); // d -> 3 + assert!(!cache.contains_key(&"d")); + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.run_pending_tasks(); + assert!(!cache.contains_key(&"d")); + assert_eq!(cache.get(&"d"), None); // d -> 4 + + // Finally "d" should be admitted by evicting "c" and "a". + cache.insert("d", dennis); + expected.push((Arc::new("c"), cindy, RemovalCause::Size)); + expected.push((Arc::new("a"), alice, RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"b"), Some(bob)); + assert_eq!(cache.get(&"c"), None); + assert_eq!(cache.get(&"d"), Some(dennis)); + assert!(!cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert!(!cache.contains_key(&"c")); + assert!(cache.contains_key(&"d")); + + // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). + cache.insert("b", bill); + expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); + expected.push((Arc::new("d"), dennis, RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"b"), Some(bill)); + assert_eq!(cache.get(&"d"), None); + assert!(cache.contains_key(&"b")); + assert!(!cache.contains_key(&"d")); + + // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). + cache.insert("a", alice); + cache.insert("b", bob); + expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"a"), Some(alice)); + assert_eq!(cache.get(&"b"), Some(bob)); + assert_eq!(cache.get(&"d"), None); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert!(!cache.contains_key(&"d")); + + // Verify the sizes. + assert_eq!(cache.entry_count(), 2); + assert_eq!(cache.weighted_size(), 25); + + verify_notification_vec(&cache, actual, &expected); + assert!(cache.key_locks_map_is_empty()); } #[test] @@ -2356,359 +2133,322 @@ mod tests { #[test] fn invalidate_all() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - fn run_test(delivery_mode: DeliveryMode) { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(100) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", "alice"); - cache.insert("b", "bob"); - cache.insert("c", "cindy"); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - assert_eq_with_mode!(cache.get(&"c"), Some("cindy"), delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(cache.contains_key(&"c"), delivery_mode); - - // `cache.sync()` is no longer needed here before invalidating. The last - // modified timestamp of the entries were updated when they were inserted. - // https://github.com/moka-rs/moka/issues/155 - - cache.invalidate_all(); - expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); - expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); - expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); - cache.sync(); - - cache.insert("d", "david"); - cache.sync(); - - assert_with_mode!(cache.get(&"a").is_none(), delivery_mode); - assert_with_mode!(cache.get(&"b").is_none(), delivery_mode); - assert_with_mode!(cache.get(&"c").is_none(), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), Some("david"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); - assert_with_mode!(cache.contains_key(&"d"), delivery_mode); - - verify_notification_vec(&cache, actual, &expected, delivery_mode); - } + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", "alice"); + cache.insert("b", "bob"); + cache.insert("c", "cindy"); + assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"b"), Some("bob")); + assert_eq!(cache.get(&"c"), Some("cindy")); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert!(cache.contains_key(&"c")); + + // `cache.run_pending_tasks()` is no longer needed here before invalidating. The last + // modified timestamp of the entries were updated when they were inserted. + // https://github.com/moka-rs/moka/issues/155 + + cache.invalidate_all(); + expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); + cache.run_pending_tasks(); + + cache.insert("d", "david"); + cache.run_pending_tasks(); + + assert!(cache.get(&"a").is_none()); + assert!(cache.get(&"b").is_none()); + assert!(cache.get(&"c").is_none()); + assert_eq!(cache.get(&"d"), Some("david")); + assert!(!cache.contains_key(&"a")); + assert!(!cache.contains_key(&"b")); + assert!(!cache.contains_key(&"c")); + assert!(cache.contains_key(&"d")); + + verify_notification_vec(&cache, actual, &expected); } #[test] fn invalidate_entries_if() -> Result<(), Box> { - run_test(DeliveryMode::Immediate)?; - run_test(DeliveryMode::Queued)?; + use std::collections::HashSet; - fn run_test(delivery_mode: DeliveryMode) -> Result<(), Box> { - use std::collections::HashSet; + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(100) - .support_invalidation_closures() - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); - - let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert(0, "alice"); - cache.insert(1, "bob"); - cache.insert(2, "alex"); - cache.sync(); - - mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); - - assert_eq_with_mode!(cache.get(&0), Some("alice"), delivery_mode); - assert_eq_with_mode!(cache.get(&1), Some("bob"), delivery_mode); - assert_eq_with_mode!(cache.get(&2), Some("alex"), delivery_mode); - assert_with_mode!(cache.contains_key(&0), delivery_mode); - assert_with_mode!(cache.contains_key(&1), delivery_mode); - assert_with_mode!(cache.contains_key(&2), delivery_mode); - - let names = ["alice", "alex"].iter().cloned().collect::>(); - cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; - assert_eq_with_mode!(cache.base.invalidation_predicate_count(), 1, delivery_mode); - expected.push((Arc::new(0), "alice", RemovalCause::Explicit)); - expected.push((Arc::new(2), "alex", RemovalCause::Explicit)); - - mock.increment(Duration::from_secs(5)); // 10 secs from the start. - - cache.insert(3, "alice"); - - // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) - cache.sync(); // To submit the invalidation task. - std::thread::sleep(Duration::from_millis(200)); - cache.sync(); // To process the task result. - std::thread::sleep(Duration::from_millis(200)); - - assert_with_mode!(cache.get(&0).is_none(), delivery_mode); - assert_with_mode!(cache.get(&2).is_none(), delivery_mode); - assert_eq_with_mode!(cache.get(&1), Some("bob"), delivery_mode); - // This should survive as it was inserted after calling invalidate_entries_if. - assert_eq_with_mode!(cache.get(&3), Some("alice"), delivery_mode); - - assert_with_mode!(!cache.contains_key(&0), delivery_mode); - assert_with_mode!(cache.contains_key(&1), delivery_mode); - assert_with_mode!(!cache.contains_key(&2), delivery_mode); - assert_with_mode!(cache.contains_key(&3), delivery_mode); - - assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); - assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); - - mock.increment(Duration::from_secs(5)); // 15 secs from the start. - - cache.invalidate_entries_if(|_k, &v| v == "alice")?; - cache.invalidate_entries_if(|_k, &v| v == "bob")?; - assert_eq_with_mode!(cache.invalidation_predicate_count(), 2, delivery_mode); - // key 1 was inserted before key 3. - expected.push((Arc::new(1), "bob", RemovalCause::Explicit)); - expected.push((Arc::new(3), "alice", RemovalCause::Explicit)); - - // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) - cache.sync(); // To submit the invalidation task. - std::thread::sleep(Duration::from_millis(200)); - cache.sync(); // To process the task result. - std::thread::sleep(Duration::from_millis(200)); - - assert_with_mode!(cache.get(&1).is_none(), delivery_mode); - assert_with_mode!(cache.get(&3).is_none(), delivery_mode); - - assert_with_mode!(!cache.contains_key(&1), delivery_mode); - assert_with_mode!(!cache.contains_key(&3), delivery_mode); - - assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); - assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); - - verify_notification_vec(&cache, actual, &expected, delivery_mode); - - Ok(()) - } + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .support_invalidation_closures() + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert(0, "alice"); + cache.insert(1, "bob"); + cache.insert(2, "alex"); + cache.run_pending_tasks(); + + mock.increment(Duration::from_secs(5)); // 5 secs from the start. + cache.run_pending_tasks(); + + assert_eq!(cache.get(&0), Some("alice")); + assert_eq!(cache.get(&1), Some("bob")); + assert_eq!(cache.get(&2), Some("alex")); + assert!(cache.contains_key(&0)); + assert!(cache.contains_key(&1)); + assert!(cache.contains_key(&2)); + + let names = ["alice", "alex"].iter().cloned().collect::>(); + cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; + assert_eq!(cache.base.invalidation_predicate_count(), 1); + expected.push((Arc::new(0), "alice", RemovalCause::Explicit)); + expected.push((Arc::new(2), "alex", RemovalCause::Explicit)); + + mock.increment(Duration::from_secs(5)); // 10 secs from the start. + + cache.insert(3, "alice"); + + // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) + cache.run_pending_tasks(); // To submit the invalidation task. + std::thread::sleep(Duration::from_millis(200)); + cache.run_pending_tasks(); // To process the task result. + std::thread::sleep(Duration::from_millis(200)); + + assert!(cache.get(&0).is_none()); + assert!(cache.get(&2).is_none()); + assert_eq!(cache.get(&1), Some("bob")); + // This should survive as it was inserted after calling invalidate_entries_if. + assert_eq!(cache.get(&3), Some("alice")); + + assert!(!cache.contains_key(&0)); + assert!(cache.contains_key(&1)); + assert!(!cache.contains_key(&2)); + assert!(cache.contains_key(&3)); + + assert_eq!(cache.entry_count(), 2); + assert_eq!(cache.invalidation_predicate_count(), 0); + + mock.increment(Duration::from_secs(5)); // 15 secs from the start. + + cache.invalidate_entries_if(|_k, &v| v == "alice")?; + cache.invalidate_entries_if(|_k, &v| v == "bob")?; + assert_eq!(cache.invalidation_predicate_count(), 2); + // key 1 was inserted before key 3. + expected.push((Arc::new(1), "bob", RemovalCause::Explicit)); + expected.push((Arc::new(3), "alice", RemovalCause::Explicit)); + + // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) + cache.run_pending_tasks(); // To submit the invalidation task. + std::thread::sleep(Duration::from_millis(200)); + cache.run_pending_tasks(); // To process the task result. + std::thread::sleep(Duration::from_millis(200)); + + assert!(cache.get(&1).is_none()); + assert!(cache.get(&3).is_none()); + + assert!(!cache.contains_key(&1)); + assert!(!cache.contains_key(&3)); + + assert_eq!(cache.entry_count(), 0); + assert_eq!(cache.invalidation_predicate_count(), 0); + + verify_notification_vec(&cache, actual, &expected); Ok(()) } #[test] fn time_to_live() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); - - fn run_test(delivery_mode: DeliveryMode) { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(100) - .time_to_live(Duration::from_secs(10)) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .time_to_live(Duration::from_secs(10)) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); - let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); - // Make the cache exterior immutable. - let cache = cache; + // Make the cache exterior immutable. + let cache = cache; - cache.insert("a", "alice"); - cache.sync(); + cache.insert("a", "alice"); + cache.run_pending_tasks(); - mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 5 secs from the start. + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_eq!(cache.get(&"a"), Some("alice")); + assert!(cache.contains_key(&"a")); - mock.increment(Duration::from_secs(5)); // 10 secs. - expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); + mock.increment(Duration::from_secs(5)); // 10 secs. + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); + assert_eq!(cache.get(&"a"), None); + assert!(!cache.contains_key(&"a")); - assert_eq_with_mode!(cache.iter().count(), 0, delivery_mode); + assert_eq!(cache.iter().count(), 0); - cache.sync(); - assert_with_mode!(cache.is_table_empty(), delivery_mode); + cache.run_pending_tasks(); + assert!(cache.is_table_empty()); - cache.insert("b", "bob"); - cache.sync(); + cache.insert("b", "bob"); + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + assert_eq!(cache.entry_count(), 1); - mock.increment(Duration::from_secs(5)); // 15 secs. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 15 secs. + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + assert_eq!(cache.get(&"b"), Some("bob")); + assert!(cache.contains_key(&"b")); + assert_eq!(cache.entry_count(), 1); - cache.insert("b", "bill"); - expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); - cache.sync(); + cache.insert("b", "bill"); + expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); + cache.run_pending_tasks(); - mock.increment(Duration::from_secs(5)); // 20 secs - cache.sync(); + mock.increment(Duration::from_secs(5)); // 20 secs + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.get(&"b"), Some("bill"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + assert_eq!(cache.get(&"b"), Some("bill")); + assert!(cache.contains_key(&"b")); + assert_eq!(cache.entry_count(), 1); - mock.increment(Duration::from_secs(5)); // 25 secs - expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); + mock.increment(Duration::from_secs(5)); // 25 secs + expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); + assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"b"), None); + assert!(!cache.contains_key(&"a")); + assert!(!cache.contains_key(&"b")); - assert_eq_with_mode!(cache.iter().count(), 0, delivery_mode); + assert_eq!(cache.iter().count(), 0); - cache.sync(); - assert_with_mode!(cache.is_table_empty(), delivery_mode); + cache.run_pending_tasks(); + assert!(cache.is_table_empty()); - verify_notification_vec(&cache, actual, &expected, delivery_mode); - } + verify_notification_vec(&cache, actual, &expected); } #[test] fn time_to_idle() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); - - fn run_test(delivery_mode: DeliveryMode) { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(100) - .time_to_idle(Duration::from_secs(10)) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .time_to_idle(Duration::from_secs(10)) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); - let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); - // Make the cache exterior immutable. - let cache = cache; + // Make the cache exterior immutable. + let cache = cache; - cache.insert("a", "alice"); - cache.sync(); + cache.insert("a", "alice"); + cache.run_pending_tasks(); - mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 5 secs from the start. + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); + assert_eq!(cache.get(&"a"), Some("alice")); - mock.increment(Duration::from_secs(5)); // 10 secs. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 10 secs. + cache.run_pending_tasks(); - cache.insert("b", "bob"); - cache.sync(); + cache.insert("b", "bob"); + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); + assert_eq!(cache.entry_count(), 2); - mock.increment(Duration::from_secs(2)); // 12 secs. - cache.sync(); + mock.increment(Duration::from_secs(2)); // 12 secs. + cache.run_pending_tasks(); - // contains_key does not reset the idle timer for the key. - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - cache.sync(); + // contains_key does not reset the idle timer for the key. + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); + assert_eq!(cache.entry_count(), 2); - mock.increment(Duration::from_secs(3)); // 15 secs. - expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); + mock.increment(Duration::from_secs(3)); // 15 secs. + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"b"), Some("bob")); + assert!(!cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); - assert_eq_with_mode!(cache.iter().count(), 1, delivery_mode); + assert_eq!(cache.iter().count(), 1); - cache.sync(); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + cache.run_pending_tasks(); + assert_eq!(cache.entry_count(), 1); - mock.increment(Duration::from_secs(10)); // 25 secs - expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); + mock.increment(Duration::from_secs(10)); // 25 secs + expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); + assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"b"), None); + assert!(!cache.contains_key(&"a")); + assert!(!cache.contains_key(&"b")); - assert_eq_with_mode!(cache.iter().count(), 0, delivery_mode); + assert_eq!(cache.iter().count(), 0); - cache.sync(); - assert_with_mode!(cache.is_table_empty(), delivery_mode); + cache.run_pending_tasks(); + assert!(cache.is_table_empty()); - verify_notification_vec(&cache, actual, &expected, delivery_mode); - } + verify_notification_vec(&cache, actual, &expected); } #[test] fn time_to_live_by_expiry_type() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); - // Define an expiry type. struct MyExpiry { counters: Arc, @@ -2743,104 +2483,96 @@ mod tests { } } - fn run_test(delivery_mode: DeliveryMode) { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - // Create expiry counters and the expiry. - let expiry_counters = Arc::new(ExpiryCallCounters::default()); - let expiry = MyExpiry::new(Arc::clone(&expiry_counters)); + // Create expiry counters and the expiry. + let expiry_counters = Arc::new(ExpiryCallCounters::default()); + let expiry = MyExpiry::new(Arc::clone(&expiry_counters)); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(100) - .expire_after(expiry) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .expire_after(expiry) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); - let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); - // Make the cache exterior immutable. - let cache = cache; + // Make the cache exterior immutable. + let cache = cache; - cache.insert("a", "alice"); - expiry_counters.incl_expected_creations(); - cache.sync(); + cache.insert("a", "alice"); + expiry_counters.incl_expected_creations(); + cache.run_pending_tasks(); - mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 5 secs from the start. + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); + assert_eq!(cache.get(&"a"), Some("alice")); + assert!(cache.contains_key(&"a")); - mock.increment(Duration::from_secs(5)); // 10 secs. - expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); + mock.increment(Duration::from_secs(5)); // 10 secs. + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); + assert_eq!(cache.get(&"a"), None); + assert!(!cache.contains_key(&"a")); - assert_eq_with_mode!(cache.iter().count(), 0, delivery_mode); + assert_eq!(cache.iter().count(), 0); - cache.sync(); - assert_with_mode!(cache.is_table_empty(), delivery_mode); + cache.run_pending_tasks(); + assert!(cache.is_table_empty()); - cache.insert("b", "bob"); - expiry_counters.incl_expected_creations(); - cache.sync(); + cache.insert("b", "bob"); + expiry_counters.incl_expected_creations(); + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + assert_eq!(cache.entry_count(), 1); - mock.increment(Duration::from_secs(5)); // 15 secs. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 15 secs. + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + assert_eq!(cache.get(&"b"), Some("bob")); + assert!(cache.contains_key(&"b")); + assert_eq!(cache.entry_count(), 1); - cache.insert("b", "bill"); - expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); - expiry_counters.incl_expected_updates(); - cache.sync(); + cache.insert("b", "bill"); + expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); + expiry_counters.incl_expected_updates(); + cache.run_pending_tasks(); - mock.increment(Duration::from_secs(5)); // 20 secs - cache.sync(); + mock.increment(Duration::from_secs(5)); // 20 secs + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.get(&"b"), Some("bill"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + assert_eq!(cache.get(&"b"), Some("bill")); + assert!(cache.contains_key(&"b")); + assert_eq!(cache.entry_count(), 1); - mock.increment(Duration::from_secs(5)); // 25 secs - expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); + mock.increment(Duration::from_secs(5)); // 25 secs + expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); + assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"b"), None); + assert!(!cache.contains_key(&"a")); + assert!(!cache.contains_key(&"b")); - assert_eq_with_mode!(cache.iter().count(), 0, delivery_mode); + assert_eq!(cache.iter().count(), 0); - cache.sync(); - assert_with_mode!(cache.is_table_empty(), delivery_mode); + cache.run_pending_tasks(); + assert!(cache.is_table_empty()); - expiry_counters.verify(); - verify_notification_vec(&cache, actual, &expected, delivery_mode); - } + expiry_counters.verify(); + verify_notification_vec(&cache, actual, &expected); } #[test] fn time_to_idle_by_expiry_type() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); - // Define an expiry type. struct MyExpiry { counters: Arc, @@ -2866,93 +2598,88 @@ mod tests { } } - fn run_test(delivery_mode: DeliveryMode) { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - // Create expiry counters and the expiry. - let expiry_counters = Arc::new(ExpiryCallCounters::default()); - let expiry = MyExpiry::new(Arc::clone(&expiry_counters)); + // Create expiry counters and the expiry. + let expiry_counters = Arc::new(ExpiryCallCounters::default()); + let expiry = MyExpiry::new(Arc::clone(&expiry_counters)); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(100) - .expire_after(expiry) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(100) + .expire_after(expiry) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); - let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)); - // Make the cache exterior immutable. - let cache = cache; + // Make the cache exterior immutable. + let cache = cache; - cache.insert("a", "alice"); - cache.sync(); + cache.insert("a", "alice"); + cache.run_pending_tasks(); - mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 5 secs from the start. + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - expiry_counters.incl_expected_reads(); + assert_eq!(cache.get(&"a"), Some("alice")); + expiry_counters.incl_expected_reads(); - mock.increment(Duration::from_secs(5)); // 10 secs. - cache.sync(); + mock.increment(Duration::from_secs(5)); // 10 secs. + cache.run_pending_tasks(); - cache.insert("b", "bob"); - cache.sync(); + cache.insert("b", "bob"); + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); + assert_eq!(cache.entry_count(), 2); - mock.increment(Duration::from_secs(2)); // 12 secs. - cache.sync(); + mock.increment(Duration::from_secs(2)); // 12 secs. + cache.run_pending_tasks(); - // contains_key does not reset the idle timer for the key. - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - cache.sync(); + // contains_key does not reset the idle timer for the key. + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + cache.run_pending_tasks(); - assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); + assert_eq!(cache.entry_count(), 2); - mock.increment(Duration::from_secs(3)); // 15 secs. - expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); + mock.increment(Duration::from_secs(3)); // 15 secs. + expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - expiry_counters.incl_expected_reads(); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); + assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"b"), Some("bob")); + expiry_counters.incl_expected_reads(); + assert!(!cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); - assert_eq_with_mode!(cache.iter().count(), 1, delivery_mode); + assert_eq!(cache.iter().count(), 1); - cache.sync(); - assert_eq_with_mode!(cache.entry_count(), 1, delivery_mode); + cache.run_pending_tasks(); + assert_eq!(cache.entry_count(), 1); - mock.increment(Duration::from_secs(10)); // 25 secs - expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); + mock.increment(Duration::from_secs(10)); // 25 secs + expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); - assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); + assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"b"), None); + assert!(!cache.contains_key(&"a")); + assert!(!cache.contains_key(&"b")); - assert_eq_with_mode!(cache.iter().count(), 0, delivery_mode); + assert_eq!(cache.iter().count(), 0); - cache.sync(); - assert_with_mode!(cache.is_table_empty(), delivery_mode); + cache.run_pending_tasks(); + assert!(cache.is_table_empty()); - expiry_counters.verify(); - verify_notification_vec(&cache, actual, &expected, delivery_mode); - } + expiry_counters.verify(); + verify_notification_vec(&cache, actual, &expected); } /// Verify that the `Expiry::expire_after_read()` method is called in `get_with` @@ -3021,18 +2748,18 @@ mod tests { // The key is not present. cache.get_with("a", || "alice"); expiry_counters.incl_expected_creations(); - cache.sync(); + cache.run_pending_tasks(); // The key is present. cache.get_with("a", || "alex"); expiry_counters.incl_expected_reads(); - cache.sync(); + cache.run_pending_tasks(); // The key is not present. cache.invalidate("a"); cache.get_with("a", || "amanda"); expiry_counters.incl_expected_creations(); - cache.sync(); + cache.run_pending_tasks(); expiry_counters.verify(); } @@ -4167,69 +3894,61 @@ mod tests { #[test] fn test_removal_notifications() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - fn run_test(delivery_mode: DeliveryMode) { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(3) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert('a', "alice"); - cache.invalidate(&'a'); - expected.push((Arc::new('a'), "alice", RemovalCause::Explicit)); - - cache.sync(); - assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); - - cache.insert('b', "bob"); - cache.insert('c', "cathy"); - cache.insert('d', "david"); - cache.sync(); - assert_eq_with_mode!(cache.entry_count(), 3, delivery_mode); - - // This will be rejected due to the size constraint. - cache.insert('e', "emily"); - expected.push((Arc::new('e'), "emily", RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.entry_count(), 3, delivery_mode); - - // Raise the popularity of 'e' so it will be accepted next time. - cache.get(&'e'); - cache.sync(); - - // Retry. - cache.insert('e', "eliza"); - // and the LRU entry will be evicted. - expected.push((Arc::new('b'), "bob", RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.entry_count(), 3, delivery_mode); - - // Replace an existing entry. - cache.insert('d', "dennis"); - expected.push((Arc::new('d'), "david", RemovalCause::Replaced)); - cache.sync(); - assert_eq_with_mode!(cache.entry_count(), 3, delivery_mode); - - verify_notification_vec(&cache, actual, &expected, delivery_mode); - } + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(3) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert('a', "alice"); + cache.invalidate(&'a'); + expected.push((Arc::new('a'), "alice", RemovalCause::Explicit)); + + cache.run_pending_tasks(); + assert_eq!(cache.entry_count(), 0); + + cache.insert('b', "bob"); + cache.insert('c', "cathy"); + cache.insert('d', "david"); + cache.run_pending_tasks(); + assert_eq!(cache.entry_count(), 3); + + // This will be rejected due to the size constraint. + cache.insert('e', "emily"); + expected.push((Arc::new('e'), "emily", RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.entry_count(), 3); + + // Raise the popularity of 'e' so it will be accepted next time. + cache.get(&'e'); + cache.run_pending_tasks(); + + // Retry. + cache.insert('e', "eliza"); + // and the LRU entry will be evicted. + expected.push((Arc::new('b'), "bob", RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.entry_count(), 3); + + // Replace an existing entry. + cache.insert('d', "dennis"); + expected.push((Arc::new('d'), "david", RemovalCause::Replaced)); + cache.run_pending_tasks(); + assert_eq!(cache.entry_count(), 3); + + verify_notification_vec(&cache, actual, &expected); } #[test] @@ -4240,13 +3959,10 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(DeliveryMode::Immediate) - .build(); // Create a cache with the eviction listener and also TTL and TTI. let mut cache = Cache::builder() - .eviction_listener_with_conf(listener, listener_conf) + .eviction_listener(listener) .time_to_live(Duration::from_secs(7)) .time_to_idle(Duration::from_secs(5)) .build(); @@ -4259,7 +3975,7 @@ mod tests { let cache = cache; cache.insert("alice", "a0"); - cache.sync(); + cache.run_pending_tasks(); // Now alice (a0) has been expired by the idle timeout (TTI). mock.increment(Duration::from_secs(6)); @@ -4281,11 +3997,11 @@ mod tests { a.clear(); } - cache.sync(); + cache.run_pending_tasks(); mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&"alice"), Some("a1")); - cache.sync(); + cache.run_pending_tasks(); // Now alice has been expired by time-to-live (TTL). mock.increment(Duration::from_secs(4)); @@ -4304,7 +4020,7 @@ mod tests { a.clear(); } - cache.sync(); + cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 1); @@ -4315,7 +4031,7 @@ mod tests { // This invalidate will internally remove alice (a2). cache.invalidate(&"alice"); - cache.sync(); + cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 0); { @@ -4327,17 +4043,17 @@ mod tests { // Re-insert, and this time, make it expired by the TTL. cache.insert("alice", "a3"); - cache.sync(); + cache.run_pending_tasks(); mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&"alice"), Some("a3")); - cache.sync(); + cache.run_pending_tasks(); mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&"alice"), None); assert_eq!(cache.entry_count(), 1); // This invalidate will internally remove alice (a2). cache.invalidate(&"alice"); - cache.sync(); + cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 0); @@ -4387,13 +4103,10 @@ mod tests { sleep(Duration::from_millis(300)); a0.lock().push(Event::EndNotify(v, cause)); }; - let listener_conf = notification::Configuration::builder() - .delivery_mode(DeliveryMode::Immediate) - .build(); // Create a cache with the eviction listener and also TTL 500 ms. let mut cache = Cache::builder() - .eviction_listener_with_conf(listener, listener_conf) + .eviction_listener(listener) .time_to_live(Duration::from_millis(500)) .build(); cache.reconfigure_for_testing(); @@ -4434,7 +4147,7 @@ mod tests { cache.insert(KEY, "a0"); // Call `sync` to set the last modified for the KEY immediately so that // this entry should expire in 1000 ms from now. - cache.sync(); + cache.run_pending_tasks(); // 0500: Insert value a1 -> expired a0 (N-A0) let thread1 = { @@ -4493,58 +4206,50 @@ mod tests { #[cfg(feature = "logging")] let _ = env_logger::builder().is_test(true).try_init(); - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); - fn run_test(delivery_mode: DeliveryMode) { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); + // Create an eviction listener that panics when it see + // a value "panic now!". + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| { + if v == "panic now!" { + panic!("Panic now!"); + } + a1.lock().push((k, v, cause)) + }; - // Create an eviction listener that panics when it see - // a value "panic now!". - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| { - if v == "panic now!" { - panic!("Panic now!"); - } - a1.lock().push((k, v, cause)) - }; - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .name("My Sync Cache") - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - // Insert an okay value. - cache.insert("alice", "a0"); - cache.sync(); - - // Insert a value that will cause the eviction listener to panic. - cache.insert("alice", "panic now!"); - expected.push((Arc::new("alice"), "a0", RemovalCause::Replaced)); - cache.sync(); - - // Insert an okay value. This will replace the previous - // value "panic now!" so the eviction listener will panic. - cache.insert("alice", "a2"); - cache.sync(); - // No more removal notification should be sent. - - // Invalidate the okay value. - cache.invalidate(&"alice"); - cache.sync(); - - verify_notification_vec(&cache, actual, &expected, delivery_mode); - } + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .name("My Sync Cache") + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + // Insert an okay value. + cache.insert("alice", "a0"); + cache.run_pending_tasks(); + + // Insert a value that will cause the eviction listener to panic. + cache.insert("alice", "panic now!"); + expected.push((Arc::new("alice"), "a0", RemovalCause::Replaced)); + cache.run_pending_tasks(); + + // Insert an okay value. This will replace the previous + // value "panic now!" so the eviction listener will panic. + cache.insert("alice", "a2"); + cache.run_pending_tasks(); + // No more removal notification should be sent. + + // Invalidate the okay value. + cache.invalidate(&"alice"); + cache.run_pending_tasks(); + + verify_notification_vec(&cache, actual, &expected); } // This test ensures that the `contains_key`, `get` and `invalidate` can use @@ -4604,12 +4309,12 @@ mod tests { let value = Arc::new(Value::new(vec![0u8; 1024], &counters)); cache.insert(key, value); counters.incl_inserted(); - cache.sync(); + cache.run_pending_tasks(); } let eviction_count = KEYS - MAX_CAPACITY; - cache.sync(); + cache.run_pending_tasks(); assert_eq!(counters.inserted(), KEYS, "inserted"); assert_eq!(counters.value_created(), KEYS, "value_created"); assert_eq!(counters.evicted(), eviction_count, "evicted"); @@ -4618,10 +4323,10 @@ mod tests { for key in 0..KEYS { cache.invalidate(&key); - cache.sync(); + cache.run_pending_tasks(); } - cache.sync(); + cache.run_pending_tasks(); assert_eq!(counters.inserted(), KEYS, "inserted"); assert_eq!(counters.value_created(), KEYS, "value_created"); assert_eq!(counters.evicted(), eviction_count, "evicted"); @@ -4632,70 +4337,6 @@ mod tests { assert_eq!(counters.value_dropped(), KEYS, "value_dropped"); } - // Ignored by default. This test cannot run in parallel with other tests. - #[test] - #[ignore] - fn enabling_and_disabling_thread_pools() { - use crate::common::concurrent::thread_pool::{PoolName::*, ThreadPoolRegistry}; - - // Enable the housekeeper pool. - { - let cache = Cache::builder().thread_pool_enabled(true).build(); - cache.insert('a', "a"); - let enabled_pools = ThreadPoolRegistry::enabled_pools(); - assert_eq!(enabled_pools, &[Housekeeper]); - } - - // Enable the housekeeper and invalidator pools. - { - let cache = Cache::builder() - .thread_pool_enabled(true) - .support_invalidation_closures() - .build(); - cache.insert('a', "a"); - let enabled_pools = ThreadPoolRegistry::enabled_pools(); - assert_eq!(enabled_pools, &[Housekeeper, Invalidator]); - } - - // Queued delivery mode: Enable the housekeeper and removal notifier pools. - { - let listener = |_k, _v, _cause| {}; - let listener_conf = notification::Configuration::builder() - .delivery_mode(DeliveryMode::Queued) - .build(); - let cache = Cache::builder() - .thread_pool_enabled(true) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.insert('a', "a"); - let enabled_pools = ThreadPoolRegistry::enabled_pools(); - assert_eq!(enabled_pools, &[Housekeeper, RemovalNotifier]); - } - - // Immediate delivery mode: Enable only the housekeeper pool. - { - let listener = |_k, _v, _cause| {}; - let listener_conf = notification::Configuration::builder() - .delivery_mode(DeliveryMode::Immediate) - .build(); - let cache = Cache::builder() - .thread_pool_enabled(true) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.insert('a', "a"); - let enabled_pools = ThreadPoolRegistry::enabled_pools(); - assert_eq!(enabled_pools, &[Housekeeper]); - } - - // Disable all pools. - { - let cache = Cache::builder().thread_pool_enabled(false).build(); - cache.insert('a', "a"); - let enabled_pools = ThreadPoolRegistry::enabled_pools(); - assert!(enabled_pools.is_empty()); - } - } - #[test] fn test_debug_format() { let cache = Cache::new(10); @@ -4717,7 +4358,6 @@ mod tests { cache: &Cache, actual: Arc>>>, expected: &[NotificationTuple], - delivery_mode: DeliveryMode, ) where K: std::hash::Hash + Eq + std::fmt::Debug + Send + Sync + 'static, V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, @@ -4728,7 +4368,7 @@ mod tests { let mut retries = 0; loop { // Ensure all scheduled notifications have been processed. - cache.sync(); + cache.run_pending_tasks(); std::thread::sleep(Duration::from_millis(500)); let actual = &*actual.lock(); @@ -4737,21 +4377,12 @@ mod tests { retries += 1; continue; } else { - assert_eq!( - actual.len(), - expected.len(), - "Retries exhausted (delivery mode: {:?})", - delivery_mode - ); + assert_eq!(actual.len(), expected.len(), "Retries exhausted",); } } for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); + assert_eq!(actual, expected, "expected[{}]", i,); } break; diff --git a/src/sync/segment.rs b/src/sync/segment.rs index c1728ad3..937f82b5 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -1,9 +1,7 @@ -use super::{ - cache::Cache, CacheBuilder, ConcurrentCacheExt, OwnedKeyEntrySelector, RefKeyEntrySelector, -}; +use super::{cache::Cache, CacheBuilder, OwnedKeyEntrySelector, RefKeyEntrySelector}; use crate::{ - common::concurrent::{housekeeper, Weigher}, - notification::{self, EvictionListener}, + common::concurrent::Weigher, + notification::EvictionListener, policy::ExpirationPolicy, sync_base::iter::{Iter, ScanningGet}, Entry, Policy, PredicateError, @@ -105,10 +103,8 @@ where build_hasher, None, None, - None, Default::default(), false, - housekeeper::Configuration::new_thread_pool(true), ) } @@ -162,11 +158,9 @@ impl SegmentedCache { /// println!("{}", cache.entry_count()); // -> 0 /// println!("{}", cache.weighted_size()); // -> 0 /// - /// // To mitigate the inaccuracy, bring `ConcurrentCacheExt` trait to - /// // the scope so we can use `sync` method. - /// use moka::sync::ConcurrentCacheExt; - /// // Call `sync` to run pending internal tasks. - /// cache.sync(); + /// // To mitigate the inaccuracy, call `run_pending_tasks` method to run + /// // pending internal tasks. + /// cache.run_pending_tasks(); /// /// // Followings will print the actual numbers. /// println!("{}", cache.entry_count()); // -> 3 @@ -214,10 +208,8 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_listener_conf: Option, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, - housekeeper_conf: housekeeper::Configuration, ) -> Self { Self { inner: Arc::new(Inner::new( @@ -228,10 +220,8 @@ where build_hasher, weigher, eviction_listener, - eviction_listener_conf, expiration_policy, invalidator_enabled, - housekeeper_conf, )), } } @@ -613,6 +603,13 @@ where Iter::with_multiple_cache_segments(segments, num_cht_segments) } + /// Performs any pending maintenance operations needed by the cache. + pub fn run_pending_tasks(&self) { + for segment in self.inner.segments.iter() { + segment.run_pending_tasks(); + } + } + // /// This is used by unit tests to get consistent result. // #[cfg(test)] // pub(crate) fn reconfigure_for_testing(&mut self) { @@ -638,19 +635,6 @@ where } } -impl ConcurrentCacheExt for SegmentedCache -where - K: Hash + Eq + Send + Sync + 'static, - V: Clone + Send + Sync + 'static, - S: BuildHasher + Clone + Send + Sync + 'static, -{ - fn sync(&self) { - for segment in self.inner.segments.iter() { - segment.sync(); - } - } -} - // For unit tests. #[cfg(test)] impl SegmentedCache @@ -737,10 +721,8 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_listener_conf: Option, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, - housekeeper_conf: housekeeper::Configuration, ) -> Self { assert!(num_segments > 0); @@ -761,10 +743,8 @@ where build_hasher.clone(), weigher.as_ref().map(Arc::clone), eviction_listener.as_ref().map(Arc::clone), - eviction_listener_conf.clone(), expiration_policy.clone(), invalidator_enabled, - housekeeper_conf.clone(), ) }) .collect::>(); @@ -806,12 +786,8 @@ where #[cfg(test)] mod tests { - use super::{ConcurrentCacheExt, SegmentedCache}; - use crate::notification::{ - self, - macros::{assert_eq_with_mode, assert_with_mode}, - DeliveryMode, RemovalCause, - }; + use super::SegmentedCache; + use crate::notification::RemovalCause; use parking_lot::Mutex; use std::{sync::Arc, time::Duration}; @@ -827,7 +803,7 @@ mod tests { assert!(!cache.contains_key(&0)); assert!(cache.get(&0).is_none()); - cache.sync(); + cache.run_pending_tasks(); assert!(!cache.contains_key(&0)); assert!(cache.get(&0).is_none()); assert_eq!(cache.entry_count(), 0) @@ -835,96 +811,88 @@ mod tests { #[test] fn basic_single_thread() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); - - fn run_test(delivery_mode: DeliveryMode) { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); - - // Create a cache with the eviction listener. - let mut cache = SegmentedCache::builder(1) - .max_capacity(3) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", "alice"); - cache.insert("b", "bob"); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - cache.sync(); - // counts: a -> 1, b -> 1 - - cache.insert("c", "cindy"); - assert_eq_with_mode!(cache.get(&"c"), Some("cindy"), delivery_mode); - assert_with_mode!(cache.contains_key(&"c"), delivery_mode); - // counts: a -> 1, b -> 1, c -> 1 - cache.sync(); - - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - cache.sync(); - // counts: a -> 2, b -> 2, c -> 1 - - // "d" should not be admitted because its frequency is too low. - cache.insert("d", "david"); // count: d -> 0 - expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 1 - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - - cache.insert("d", "david"); - expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 2 - - // "d" should be admitted and "c" should be evicted - // because d's frequency is higher than c's. - cache.insert("d", "dennis"); - expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - assert_eq_with_mode!(cache.get(&"c"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), Some("dennis"), delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); - assert_with_mode!(cache.contains_key(&"d"), delivery_mode); - - cache.invalidate(&"b"); - expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"b"), None, delivery_mode); - assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); - - assert!(cache.remove(&"b").is_none()); - assert_eq!(cache.remove(&"d"), Some("dennis")); - expected.push((Arc::new("d"), "dennis", RemovalCause::Explicit)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); - assert!(!cache.contains_key(&"d")); - - verify_notification_vec(&cache, actual, &expected, delivery_mode); - assert_with_mode!(cache.key_locks_map_is_empty(), delivery_mode); - } + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = SegmentedCache::builder(1) + .max_capacity(3) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", "alice"); + cache.insert("b", "bob"); + assert_eq!(cache.get(&"a"), Some("alice")); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert_eq!(cache.get(&"b"), Some("bob")); + cache.run_pending_tasks(); + // counts: a -> 1, b -> 1 + + cache.insert("c", "cindy"); + assert_eq!(cache.get(&"c"), Some("cindy")); + assert!(cache.contains_key(&"c")); + // counts: a -> 1, b -> 1, c -> 1 + cache.run_pending_tasks(); + + assert!(cache.contains_key(&"a")); + assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"b"), Some("bob")); + assert!(cache.contains_key(&"b")); + cache.run_pending_tasks(); + // counts: a -> 2, b -> 2, c -> 1 + + // "d" should not be admitted because its frequency is too low. + cache.insert("d", "david"); // count: d -> 0 + expected.push((Arc::new("d"), "david", RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"d"), None); // d -> 1 + assert!(!cache.contains_key(&"d")); + + cache.insert("d", "david"); + expected.push((Arc::new("d"), "david", RemovalCause::Size)); + cache.run_pending_tasks(); + assert!(!cache.contains_key(&"d")); + assert_eq!(cache.get(&"d"), None); // d -> 2 + + // "d" should be admitted and "c" should be evicted + // because d's frequency is higher than c's. + cache.insert("d", "dennis"); + expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"b"), Some("bob")); + assert_eq!(cache.get(&"c"), None); + assert_eq!(cache.get(&"d"), Some("dennis")); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert!(!cache.contains_key(&"c")); + assert!(cache.contains_key(&"d")); + + cache.invalidate(&"b"); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"b"), None); + assert!(!cache.contains_key(&"b")); + + assert!(cache.remove(&"b").is_none()); + assert_eq!(cache.remove(&"d"), Some("dennis")); + expected.push((Arc::new("d"), "dennis", RemovalCause::Explicit)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"d"), None); + assert!(!cache.contains_key(&"d")); + + verify_notification_vec(&cache, actual, &expected); + assert!(cache.key_locks_map_is_empty()); } #[test] @@ -942,139 +910,131 @@ mod tests { cache.insert("c", "cindy"); assert_eq!(cache.iter().count(), 3); - cache.sync(); + cache.run_pending_tasks(); assert_eq!(cache.iter().count(), 3); } #[test] fn size_aware_eviction() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); - - fn run_test(delivery_mode: DeliveryMode) { - let weigher = |_k: &&str, v: &(&str, u32)| v.1; - - let alice = ("alice", 10); - let bob = ("bob", 15); - let bill = ("bill", 20); - let cindy = ("cindy", 5); - let david = ("david", 15); - let dennis = ("dennis", 15); - - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); - - // Create a cache with the eviction listener. - let mut cache = SegmentedCache::builder(1) - .max_capacity(31) - .weigher(weigher) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", alice); - cache.insert("b", bob); - assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); - cache.sync(); - // order (LRU -> MRU) and counts: a -> 1, b -> 1 - - cache.insert("c", cindy); - assert_eq_with_mode!(cache.get(&"c"), Some(cindy), delivery_mode); - assert_with_mode!(cache.contains_key(&"c"), delivery_mode); - // order and counts: a -> 1, b -> 1, c -> 1 - cache.sync(); - - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - cache.sync(); - // order and counts: c -> 1, a -> 2, b -> 2 - - // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). - // "d" must have higher count than 3, which is the aggregated count - // of "a" and "c". - cache.insert("d", david); // count: d -> 0 - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 1 - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 2 - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 3 - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - - cache.insert("d", david); - expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); // d -> 4 - - // Finally "d" should be admitted by evicting "c" and "a". - cache.insert("d", dennis); - expected.push((Arc::new("c"), cindy, RemovalCause::Size)); - expected.push((Arc::new("a"), alice, RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"a"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); - assert_eq_with_mode!(cache.get(&"c"), None, delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), Some(dennis), delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); - assert_with_mode!(cache.contains_key(&"d"), delivery_mode); - - // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). - cache.insert("b", bill); - expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); - expected.push((Arc::new("d"), dennis, RemovalCause::Size)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"b"), Some(bill), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - - // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). - cache.insert("a", alice); - cache.insert("b", bob); - expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); - cache.sync(); - assert_eq_with_mode!(cache.get(&"a"), Some(alice), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some(bob), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), None, delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"d"), delivery_mode); - - // Verify the sizes. - assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); - assert_eq_with_mode!(cache.weighted_size(), 25, delivery_mode); - - verify_notification_vec(&cache, actual, &expected, delivery_mode); - assert_with_mode!(cache.key_locks_map_is_empty(), delivery_mode); - } + let weigher = |_k: &&str, v: &(&str, u32)| v.1; + + let alice = ("alice", 10); + let bob = ("bob", 15); + let bill = ("bill", 20); + let cindy = ("cindy", 5); + let david = ("david", 15); + let dennis = ("dennis", 15); + + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = SegmentedCache::builder(1) + .max_capacity(31) + .weigher(weigher) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", alice); + cache.insert("b", bob); + assert_eq!(cache.get(&"a"), Some(alice)); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert_eq!(cache.get(&"b"), Some(bob)); + cache.run_pending_tasks(); + // order (LRU -> MRU) and counts: a -> 1, b -> 1 + + cache.insert("c", cindy); + assert_eq!(cache.get(&"c"), Some(cindy)); + assert!(cache.contains_key(&"c")); + // order and counts: a -> 1, b -> 1, c -> 1 + cache.run_pending_tasks(); + + assert!(cache.contains_key(&"a")); + assert_eq!(cache.get(&"a"), Some(alice)); + assert_eq!(cache.get(&"b"), Some(bob)); + assert!(cache.contains_key(&"b")); + cache.run_pending_tasks(); + // order and counts: c -> 1, a -> 2, b -> 2 + + // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). + // "d" must have higher count than 3, which is the aggregated count + // of "a" and "c". + cache.insert("d", david); // count: d -> 0 + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"d"), None); // d -> 1 + assert!(!cache.contains_key(&"d")); + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.run_pending_tasks(); + assert!(!cache.contains_key(&"d")); + assert_eq!(cache.get(&"d"), None); // d -> 2 + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"d"), None); // d -> 3 + assert!(!cache.contains_key(&"d")); + + cache.insert("d", david); + expected.push((Arc::new("d"), david, RemovalCause::Size)); + cache.run_pending_tasks(); + assert!(!cache.contains_key(&"d")); + assert_eq!(cache.get(&"d"), None); // d -> 4 + + // Finally "d" should be admitted by evicting "c" and "a". + cache.insert("d", dennis); + expected.push((Arc::new("c"), cindy, RemovalCause::Size)); + expected.push((Arc::new("a"), alice, RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"b"), Some(bob)); + assert_eq!(cache.get(&"c"), None); + assert_eq!(cache.get(&"d"), Some(dennis)); + assert!(!cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert!(!cache.contains_key(&"c")); + assert!(cache.contains_key(&"d")); + + // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). + cache.insert("b", bill); + expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); + expected.push((Arc::new("d"), dennis, RemovalCause::Size)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"b"), Some(bill)); + assert_eq!(cache.get(&"d"), None); + assert!(cache.contains_key(&"b")); + assert!(!cache.contains_key(&"d")); + + // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). + cache.insert("a", alice); + cache.insert("b", bob); + expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); + cache.run_pending_tasks(); + assert_eq!(cache.get(&"a"), Some(alice)); + assert_eq!(cache.get(&"b"), Some(bob)); + assert_eq!(cache.get(&"d"), None); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert!(!cache.contains_key(&"d")); + + // Verify the sizes. + assert_eq!(cache.entry_count(), 2); + assert_eq!(cache.weighted_size(), 25); + + verify_notification_vec(&cache, actual, &expected); + assert!(cache.key_locks_map_is_empty()); } #[test] @@ -1095,7 +1055,7 @@ mod tests { std::thread::spawn(move || { cache.insert(10, format!("{}-100", id)); cache.get(&10); - cache.sync(); + cache.run_pending_tasks(); cache.insert(20, format!("{}-200", id)); cache.invalidate(&10); }) @@ -1104,7 +1064,7 @@ mod tests { handles.into_iter().for_each(|h| h.join().expect("Failed")); - cache.sync(); + cache.run_pending_tasks(); assert!(cache.get(&10).is_none()); assert!(cache.get(&20).is_some()); @@ -1114,188 +1074,162 @@ mod tests { #[test] fn invalidate_all() { - run_test(DeliveryMode::Immediate); - run_test(DeliveryMode::Queued); - - fn run_test(delivery_mode: DeliveryMode) { - use std::collections::HashMap; - - // The following `HashMap`s will hold actual and expected notifications. - // Note: We use `HashMap` here as the order of invalidations is non-deterministic. - let actual = Arc::new(Mutex::new(HashMap::new())); - let mut expected = HashMap::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| { - a1.lock().insert(k, (v, cause)); - }; - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); - - // Create a cache with the eviction listener. - let mut cache = SegmentedCache::builder(4) - .max_capacity(100) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert("a", "alice"); - cache.insert("b", "bob"); - cache.insert("c", "cindy"); - assert_eq_with_mode!(cache.get(&"a"), Some("alice"), delivery_mode); - assert_eq_with_mode!(cache.get(&"b"), Some("bob"), delivery_mode); - assert_eq_with_mode!(cache.get(&"c"), Some("cindy"), delivery_mode); - assert_with_mode!(cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(cache.contains_key(&"c"), delivery_mode); - - // `cache.sync()` is no longer needed here before invalidating. The last - // modified timestamp of the entries were updated when they were inserted. - // https://github.com/moka-rs/moka/issues/155 - - cache.invalidate_all(); - expected.insert(Arc::new("a"), ("alice", RemovalCause::Explicit)); - expected.insert(Arc::new("b"), ("bob", RemovalCause::Explicit)); - expected.insert(Arc::new("c"), ("cindy", RemovalCause::Explicit)); - cache.sync(); - - cache.insert("d", "david"); - cache.sync(); - - assert_with_mode!(cache.get(&"a").is_none(), delivery_mode); - assert_with_mode!(cache.get(&"b").is_none(), delivery_mode); - assert_with_mode!(cache.get(&"c").is_none(), delivery_mode); - assert_eq_with_mode!(cache.get(&"d"), Some("david"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"a"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"b"), delivery_mode); - assert_with_mode!(!cache.contains_key(&"c"), delivery_mode); - assert_with_mode!(cache.contains_key(&"d"), delivery_mode); - - verify_notification_map(&cache, actual, &expected, delivery_mode); - } + use std::collections::HashMap; + + // The following `HashMap`s will hold actual and expected notifications. + // Note: We use `HashMap` here as the order of invalidations is non-deterministic. + let actual = Arc::new(Mutex::new(HashMap::new())); + let mut expected = HashMap::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| { + a1.lock().insert(k, (v, cause)); + }; + + // Create a cache with the eviction listener. + let mut cache = SegmentedCache::builder(4) + .max_capacity(100) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", "alice"); + cache.insert("b", "bob"); + cache.insert("c", "cindy"); + assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"b"), Some("bob")); + assert_eq!(cache.get(&"c"), Some("cindy")); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert!(cache.contains_key(&"c")); + + // `cache.run_pending_tasks()` is no longer needed here before invalidating. The last + // modified timestamp of the entries were updated when they were inserted. + // https://github.com/moka-rs/moka/issues/155 + + cache.invalidate_all(); + expected.insert(Arc::new("a"), ("alice", RemovalCause::Explicit)); + expected.insert(Arc::new("b"), ("bob", RemovalCause::Explicit)); + expected.insert(Arc::new("c"), ("cindy", RemovalCause::Explicit)); + cache.run_pending_tasks(); + + cache.insert("d", "david"); + cache.run_pending_tasks(); + + assert!(cache.get(&"a").is_none()); + assert!(cache.get(&"b").is_none()); + assert!(cache.get(&"c").is_none()); + assert_eq!(cache.get(&"d"), Some("david")); + assert!(!cache.contains_key(&"a")); + assert!(!cache.contains_key(&"b")); + assert!(!cache.contains_key(&"c")); + assert!(cache.contains_key(&"d")); + + verify_notification_map(&cache, actual, &expected); } #[test] fn invalidate_entries_if() -> Result<(), Box> { - run_test(DeliveryMode::Immediate)?; - run_test(DeliveryMode::Queued)?; - - fn run_test(delivery_mode: DeliveryMode) -> Result<(), Box> { - use std::collections::{HashMap, HashSet}; - - const SEGMENTS: usize = 4; - - // The following `HashMap`s will hold actual and expected notifications. - // Note: We use `HashMap` here as the order of invalidations is non-deterministic. - let actual = Arc::new(Mutex::new(HashMap::new())); - let mut expected = HashMap::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| { - a1.lock().insert(k, (v, cause)); - }; - let listener_conf = notification::Configuration::builder() - .delivery_mode(delivery_mode) - .build(); - - // Create a cache with the eviction listener. - let mut cache = SegmentedCache::builder(SEGMENTS) - .max_capacity(100) - .support_invalidation_closures() - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.reconfigure_for_testing(); - - let mut mock = cache.create_mock_expiration_clock(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.insert(0, "alice"); - cache.insert(1, "bob"); - cache.insert(2, "alex"); - cache.sync(); - mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); - - assert_eq_with_mode!(cache.get(&0), Some("alice"), delivery_mode); - assert_eq_with_mode!(cache.get(&1), Some("bob"), delivery_mode); - assert_eq_with_mode!(cache.get(&2), Some("alex"), delivery_mode); - assert_with_mode!(cache.contains_key(&0), delivery_mode); - assert_with_mode!(cache.contains_key(&1), delivery_mode); - assert_with_mode!(cache.contains_key(&2), delivery_mode); - - let names = ["alice", "alex"].iter().cloned().collect::>(); - cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; - assert_eq_with_mode!( - cache.invalidation_predicate_count(), - SEGMENTS, - delivery_mode - ); - expected.insert(Arc::new(0), ("alice", RemovalCause::Explicit)); - expected.insert(Arc::new(2), ("alex", RemovalCause::Explicit)); - - mock.increment(Duration::from_secs(5)); // 10 secs from the start. - - cache.insert(3, "alice"); - - // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) - cache.sync(); // To submit the invalidation task. - std::thread::sleep(Duration::from_millis(200)); - cache.sync(); // To process the task result. - std::thread::sleep(Duration::from_millis(200)); - - assert_with_mode!(cache.get(&0).is_none(), delivery_mode); - assert_with_mode!(cache.get(&2).is_none(), delivery_mode); - assert_eq_with_mode!(cache.get(&1), Some("bob"), delivery_mode); - // This should survive as it was inserted after calling invalidate_entries_if. - assert_eq_with_mode!(cache.get(&3), Some("alice"), delivery_mode); - - assert_with_mode!(!cache.contains_key(&0), delivery_mode); - assert_with_mode!(cache.contains_key(&1), delivery_mode); - assert_with_mode!(!cache.contains_key(&2), delivery_mode); - assert_with_mode!(cache.contains_key(&3), delivery_mode); - - assert_eq_with_mode!(cache.entry_count(), 2, delivery_mode); - assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); - - mock.increment(Duration::from_secs(5)); // 15 secs from the start. - - cache.invalidate_entries_if(|_k, &v| v == "alice")?; - cache.invalidate_entries_if(|_k, &v| v == "bob")?; - assert_eq_with_mode!( - cache.invalidation_predicate_count(), - SEGMENTS * 2, - delivery_mode - ); - expected.insert(Arc::new(1), ("bob", RemovalCause::Explicit)); - expected.insert(Arc::new(3), ("alice", RemovalCause::Explicit)); - - // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) - cache.sync(); // To submit the invalidation task. - std::thread::sleep(Duration::from_millis(200)); - cache.sync(); // To process the task result. - std::thread::sleep(Duration::from_millis(200)); - - assert_with_mode!(cache.get(&1).is_none(), delivery_mode); - assert_with_mode!(cache.get(&3).is_none(), delivery_mode); - - assert_with_mode!(!cache.contains_key(&1), delivery_mode); - assert_with_mode!(!cache.contains_key(&3), delivery_mode); - - assert_eq_with_mode!(cache.entry_count(), 0, delivery_mode); - assert_eq_with_mode!(cache.invalidation_predicate_count(), 0, delivery_mode); - - verify_notification_map(&cache, actual, &expected, delivery_mode); - - Ok(()) - } + use std::collections::{HashMap, HashSet}; + + const SEGMENTS: usize = 4; + + // The following `HashMap`s will hold actual and expected notifications. + // Note: We use `HashMap` here as the order of invalidations is non-deterministic. + let actual = Arc::new(Mutex::new(HashMap::new())); + let mut expected = HashMap::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| { + a1.lock().insert(k, (v, cause)); + }; + + // Create a cache with the eviction listener. + let mut cache = SegmentedCache::builder(SEGMENTS) + .max_capacity(100) + .support_invalidation_closures() + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + let mut mock = cache.create_mock_expiration_clock(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert(0, "alice"); + cache.insert(1, "bob"); + cache.insert(2, "alex"); + cache.run_pending_tasks(); + mock.increment(Duration::from_secs(5)); // 5 secs from the start. + cache.run_pending_tasks(); + + assert_eq!(cache.get(&0), Some("alice")); + assert_eq!(cache.get(&1), Some("bob")); + assert_eq!(cache.get(&2), Some("alex")); + assert!(cache.contains_key(&0)); + assert!(cache.contains_key(&1)); + assert!(cache.contains_key(&2)); + + let names = ["alice", "alex"].iter().cloned().collect::>(); + cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; + assert_eq!(cache.invalidation_predicate_count(), SEGMENTS,); + expected.insert(Arc::new(0), ("alice", RemovalCause::Explicit)); + expected.insert(Arc::new(2), ("alex", RemovalCause::Explicit)); + + mock.increment(Duration::from_secs(5)); // 10 secs from the start. + + cache.insert(3, "alice"); + + // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) + cache.run_pending_tasks(); // To submit the invalidation task. + std::thread::sleep(Duration::from_millis(200)); + cache.run_pending_tasks(); // To process the task result. + std::thread::sleep(Duration::from_millis(200)); + + assert!(cache.get(&0).is_none()); + assert!(cache.get(&2).is_none()); + assert_eq!(cache.get(&1), Some("bob")); + // This should survive as it was inserted after calling invalidate_entries_if. + assert_eq!(cache.get(&3), Some("alice")); + + assert!(!cache.contains_key(&0)); + assert!(cache.contains_key(&1)); + assert!(!cache.contains_key(&2)); + assert!(cache.contains_key(&3)); + + assert_eq!(cache.entry_count(), 2); + assert_eq!(cache.invalidation_predicate_count(), 0); + + mock.increment(Duration::from_secs(5)); // 15 secs from the start. + + cache.invalidate_entries_if(|_k, &v| v == "alice")?; + cache.invalidate_entries_if(|_k, &v| v == "bob")?; + assert_eq!(cache.invalidation_predicate_count(), SEGMENTS * 2,); + expected.insert(Arc::new(1), ("bob", RemovalCause::Explicit)); + expected.insert(Arc::new(3), ("alice", RemovalCause::Explicit)); + + // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) + cache.run_pending_tasks(); // To submit the invalidation task. + std::thread::sleep(Duration::from_millis(200)); + cache.run_pending_tasks(); // To process the task result. + std::thread::sleep(Duration::from_millis(200)); + + assert!(cache.get(&1).is_none()); + assert!(cache.get(&3).is_none()); + + assert!(!cache.contains_key(&1)); + assert!(!cache.contains_key(&3)); + + assert_eq!(cache.entry_count(), 0); + assert_eq!(cache.invalidation_predicate_count(), 0); + + verify_notification_map(&cache, actual, &expected); Ok(()) } @@ -1939,12 +1873,12 @@ mod tests { let value = Arc::new(Value::new(vec![0u8; 1024], &counters)); cache.insert(key, value); counters.incl_inserted(); - cache.sync(); + cache.run_pending_tasks(); } let eviction_count = KEYS - MAX_CAPACITY; - cache.sync(); + cache.run_pending_tasks(); assert_eq!(counters.inserted(), KEYS, "inserted"); assert_eq!(counters.value_created(), KEYS, "value_created"); assert_eq!(counters.evicted(), eviction_count, "evicted"); @@ -1953,10 +1887,10 @@ mod tests { for key in 0..KEYS { cache.invalidate(&key); - cache.sync(); + cache.run_pending_tasks(); } - cache.sync(); + cache.run_pending_tasks(); assert_eq!(counters.inserted(), KEYS, "inserted"); assert_eq!(counters.value_created(), KEYS, "value_created"); assert_eq!(counters.evicted(), eviction_count, "evicted"); @@ -1967,76 +1901,6 @@ mod tests { assert_eq!(counters.value_dropped(), KEYS, "value_dropped"); } - // Ignored by default. This test cannot run in parallel with other tests. - #[test] - #[ignore] - fn enabling_and_disabling_thread_pools() { - use crate::common::concurrent::thread_pool::{PoolName::*, ThreadPoolRegistry}; - - const NUM_SEGMENTS: usize = 4; - - // Enable the housekeeper pool. - { - let cache = SegmentedCache::builder(NUM_SEGMENTS) - .thread_pool_enabled(true) - .build(); - cache.insert('a', "a"); - let enabled_pools = ThreadPoolRegistry::enabled_pools(); - assert_eq!(enabled_pools, &[Housekeeper]); - } - - // Enable the housekeeper and invalidator pools. - { - let cache = SegmentedCache::builder(NUM_SEGMENTS) - .thread_pool_enabled(true) - .support_invalidation_closures() - .build(); - cache.insert('a', "a"); - let enabled_pools = ThreadPoolRegistry::enabled_pools(); - assert_eq!(enabled_pools, &[Housekeeper, Invalidator]); - } - - // Queued delivery mode: Enable the housekeeper and removal notifier pools. - { - let listener = |_k, _v, _cause| {}; - let listener_conf = notification::Configuration::builder() - .delivery_mode(DeliveryMode::Queued) - .build(); - let cache = SegmentedCache::builder(NUM_SEGMENTS) - .thread_pool_enabled(true) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.insert('a', "a"); - let enabled_pools = ThreadPoolRegistry::enabled_pools(); - assert_eq!(enabled_pools, &[Housekeeper, RemovalNotifier]); - } - - // Immediate delivery mode: Enable only the housekeeper pool. - { - let listener = |_k, _v, _cause| {}; - let listener_conf = notification::Configuration::builder() - .delivery_mode(DeliveryMode::Immediate) - .build(); - let cache = SegmentedCache::builder(NUM_SEGMENTS) - .thread_pool_enabled(true) - .eviction_listener_with_conf(listener, listener_conf) - .build(); - cache.insert('a', "a"); - let enabled_pools = ThreadPoolRegistry::enabled_pools(); - assert_eq!(enabled_pools, &[Housekeeper]); - } - - // Disable all pools. - { - let cache = SegmentedCache::builder(NUM_SEGMENTS) - .thread_pool_enabled(false) - .build(); - cache.insert('a', "a"); - let enabled_pools = ThreadPoolRegistry::enabled_pools(); - assert!(enabled_pools.is_empty()); - } - } - #[test] fn test_debug_format() { let cache = SegmentedCache::new(10, 4); @@ -2059,7 +1923,6 @@ mod tests { cache: &SegmentedCache, actual: Arc>>>, expected: &[NotificationTriple], - delivery_mode: DeliveryMode, ) where K: std::hash::Hash + Eq + std::fmt::Debug + Send + Sync + 'static, V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, @@ -2076,24 +1939,15 @@ mod tests { if actual.len() != expected.len() { if retries <= MAX_RETRIES { retries += 1; - cache.sync(); + cache.run_pending_tasks(); continue; } else { - assert_eq!( - actual.len(), - expected.len(), - "Retries exhausted (delivery mode: {:?})", - delivery_mode - ); + assert_eq!(actual.len(), expected.len(), "Retries exhausted",); } } for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { - assert_eq!( - actual, expected, - "expected[{}] (delivery mode: {:?})", - i, delivery_mode - ); + assert_eq!(actual, expected, "expected[{}]", i); } break; @@ -2104,7 +1958,6 @@ mod tests { cache: &SegmentedCache, actual: Arc, NotificationPair>>>, expected: &std::collections::HashMap, NotificationPair>, - delivery_mode: DeliveryMode, ) where K: std::hash::Hash + Eq + std::fmt::Display + Send + Sync + 'static, V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, @@ -2121,15 +1974,10 @@ mod tests { if actual.len() != expected.len() { if retries <= MAX_RETRIES { retries += 1; - cache.sync(); + cache.run_pending_tasks(); continue; } else { - assert_eq!( - actual.len(), - expected.len(), - "Retries exhausted (delivery mode: {:?})", - delivery_mode - ); + assert_eq!(actual.len(), expected.len(), "Retries exhausted",); } } @@ -2137,9 +1985,8 @@ mod tests { assert_eq!( actual.get(actual_key), expected.get(actual_key), - "expected[{}] (delivery mode: {:?})", + "expected[{}]", actual_key, - delivery_mode ); } diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index ffd86610..5f39c43b 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -1,5 +1,5 @@ use super::{ - invalidator::{GetOrRemoveEntry, InvalidationResult, Invalidator, KeyDateLite, PredicateFun}, + invalidator::{GetOrRemoveEntry, Invalidator, KeyDateLite, PredicateFun}, iter::ScanningGet, key_lock::{KeyLock, KeyLockMap}, PredicateId, @@ -11,12 +11,11 @@ use crate::{ concurrent::{ atomic_time::AtomicInstant, constants::{ - READ_LOG_FLUSH_POINT, READ_LOG_SIZE, WRITE_LOG_FLUSH_POINT, - WRITE_LOG_LOW_WATER_MARK, WRITE_LOG_SIZE, + READ_LOG_FLUSH_POINT, READ_LOG_SIZE, WRITE_LOG_FLUSH_POINT, WRITE_LOG_SIZE, }, deques::Deques, entry_info::EntryInfo, - housekeeper::{self, Housekeeper, InnerSync, SyncPace}, + housekeeper::{Housekeeper, InnerSync}, AccessTime, KeyHash, KeyHashDate, KvEntry, OldEntryInfo, ReadOp, ValueEntry, Weigher, WriteOp, }, @@ -26,11 +25,7 @@ use crate::{ timer_wheel::{ReschedulingResult, TimerWheel}, CacheRegion, }, - notification::{ - self, - notifier::{RemovalNotifier, RemovedEntry}, - EvictionListener, RemovalCause, - }, + notification::{notifier::RemovalNotifier, EvictionListener, RemovalCause}, policy::ExpirationPolicy, Entry, Expiry, Policy, PredicateError, }; @@ -52,13 +47,13 @@ use std::{ }; use triomphe::Arc as TrioArc; -pub(crate) type HouseKeeperArc = Arc>>; +pub(crate) type HouseKeeperArc = Arc; pub(crate) struct BaseCache { pub(crate) inner: Arc>, read_op_ch: Sender>, pub(crate) write_op_ch: Sender>, - pub(crate) housekeeper: Option>, + pub(crate) housekeeper: Option, } impl Clone for BaseCache { @@ -109,11 +104,6 @@ impl BaseCache { self.inner.is_removal_notifier_enabled() } - #[inline] - pub(crate) fn is_blocking_removal_notification(&self) -> bool { - self.inner.is_blocking_removal_notification() - } - #[inline] pub(crate) fn current_time_from_expiration_clock(&self) -> Instant { self.inner.current_time_from_expiration_clock() @@ -153,10 +143,8 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_listener_conf: Option, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, - housekeeper_conf: housekeeper::Configuration, ) -> Self { let (r_size, w_size) = if max_capacity == Some(0) { (0, 0) @@ -174,21 +162,17 @@ where build_hasher, weigher, eviction_listener, - eviction_listener_conf, r_rcv, w_rcv, expiration_policy, invalidator_enabled, )); - if invalidator_enabled { - inner.set_invalidator(&inner); - } - let housekeeper = Housekeeper::new(Arc::downgrade(&inner), housekeeper_conf); + Self { inner, read_op_ch: r_snd, write_op_ch: w_snd, - housekeeper: Some(Arc::new(housekeeper)), + housekeeper: Some(Arc::new(Housekeeper::default())), } } @@ -401,13 +385,13 @@ where inner: &impl InnerSync, ch: &Sender>, now: Instant, - housekeeper: Option<&HouseKeeperArc>, + housekeeper: Option<&HouseKeeperArc>, ) { let w_len = ch.len(); if let Some(hk) = housekeeper { if Self::should_apply_writes(hk, w_len, now) { - hk.try_sync(inner); + hk.try_run_pending_tasks(inner); } } } @@ -617,18 +601,18 @@ where if let Some(hk) = &self.housekeeper { if Self::should_apply_reads(hk, len, now) { - hk.try_sync(inner); + hk.try_run_pending_tasks(inner); } } } #[inline] - fn should_apply_reads(hk: &HouseKeeperArc, ch_len: usize, now: Instant) -> bool { + fn should_apply_reads(hk: &HouseKeeperArc, ch_len: usize, now: Instant) -> bool { hk.should_apply_reads(ch_len, now) } #[inline] - fn should_apply_writes(hk: &HouseKeeperArc, ch_len: usize, now: Instant) -> bool { + fn should_apply_writes(hk: &HouseKeeperArc, ch_len: usize, now: Instant) -> bool { hk.should_apply_writes(ch_len, now) } } @@ -737,16 +721,20 @@ where } pub(crate) fn reconfigure_for_testing(&mut self) { - // Stop the housekeeping job that may cause sync() method to return earlier. - if let Some(housekeeper) = &self.housekeeper { - housekeeper.stop_periodical_sync_job(); - } // Enable the frequency sketch. self.inner.enable_frequency_sketch_for_testing(); + // Disable auto clean up of pending tasks. + if let Some(hk) = &self.housekeeper { + hk.disable_auto_run(); + } } pub(crate) fn set_expiration_clock(&self, clock: Option) { self.inner.set_expiration_clock(clock); + if let Some(hk) = &self.housekeeper { + let now = self.current_time_from_expiration_clock(); + hk.reset_run_after(now); + } } pub(crate) fn key_locks_map_is_empty(&self) -> bool { @@ -757,7 +745,6 @@ where struct EvictionState<'a, K, V> { counters: EvictionCounters, notifier: Option<&'a RemovalNotifier>, - removed_entries: Option>>, } impl<'a, K, V> EvictionState<'a, K, V> { @@ -766,18 +753,9 @@ impl<'a, K, V> EvictionState<'a, K, V> { weighted_size: u64, notifier: Option<&'a RemovalNotifier>, ) -> Self { - let removed_entries = notifier.and_then(|n| { - if n.is_batching_supported() { - Some(Vec::new()) - } else { - None - } - }); - Self { counters: EvictionCounters::new(entry_count, weighted_size), notifier, - removed_entries, } } @@ -796,23 +774,10 @@ impl<'a, K, V> EvictionState<'a, K, V> { { debug_assert!(self.is_notifier_enabled()); - if let Some(removed) = &mut self.removed_entries { - removed.push(RemovedEntry::new(key, entry.value.clone(), cause)); - } else if let Some(notifier) = self.notifier { + if let Some(notifier) = self.notifier { notifier.notify(key, entry.value.clone(), cause); } } - - fn notify_multiple_removals(&mut self) - where - K: Send + Sync + 'static, - V: Send + Sync + 'static, - { - if let (Some(notifier), Some(removed)) = (self.notifier, self.removed_entries.take()) { - notifier.batch_notify(removed); - notifier.sync(); - } - } } struct EvictionCounters { @@ -935,7 +900,7 @@ pub(crate) struct Inner { removal_notifier: Option>, key_locks: Option>, invalidator_enabled: bool, - invalidator: RwLock>>, + invalidator: Option>, clocks: Clocks, } @@ -968,14 +933,6 @@ impl Inner { self.removal_notifier.is_some() } - #[inline] - fn is_blocking_removal_notification(&self) -> bool { - self.removal_notifier - .as_ref() - .map(|rn| rn.is_blocking()) - .unwrap_or_default() - } - fn maybe_key_lock(&self, key: &Arc) -> Option> where K: Hash + Eq, @@ -1061,7 +1018,6 @@ where build_hasher: S, weigher: Option>, eviction_listener: Option>, - eviction_listener_conf: Option, read_op_ch: Receiver>, write_op_ch: Receiver>, expiration_policy: ExpirationPolicy, @@ -1089,21 +1045,19 @@ where let timer_wheel = Mutex::new(TimerWheel::new(now)); let (removal_notifier, key_locks) = if let Some(listener) = eviction_listener { - let rn = RemovalNotifier::new( - listener, - eviction_listener_conf.unwrap_or_default(), - name.clone(), - ); - if rn.is_blocking() { - let kl = KeyLockMap::with_hasher(build_hasher.clone()); - (Some(rn), Some(kl)) - } else { - (Some(rn), None) - } + let rn = RemovalNotifier::new(listener, name.clone()); + let kl = KeyLockMap::with_hasher(build_hasher.clone()); + (Some(rn), Some(kl)) } else { (None, None) }; + let invalidator = if invalidator_enabled { + Some(Invalidator::new(build_hasher.clone())) + } else { + None + }; + Self { name, max_capacity, @@ -1123,16 +1077,11 @@ where removal_notifier, key_locks, invalidator_enabled, - // When enabled, this field will be set later via the set_invalidator method. - invalidator: Default::default(), + invalidator, clocks, } } - fn set_invalidator(&self, self_ref: &Arc) { - *self.invalidator.write() = Some(Invalidator::new(Arc::downgrade(&Arc::clone(self_ref)))); - } - #[inline] fn hash(&self, key: &Q) -> u64 where @@ -1192,7 +1141,7 @@ where predicate: PredicateFun, registered_at: Instant, ) -> Result { - if let Some(inv) = &*self.invalidator.read() { + if let Some(inv) = &self.invalidator { inv.register_predicate(predicate, registered_at) } else { Err(PredicateError::InvalidationClosuresDisabled) @@ -1201,9 +1150,12 @@ where /// Returns `true` if the entry is invalidated by `invalidate_entries_if` method. #[inline] - fn is_invalidated_entry(&self, key: &Arc, entry: &TrioArc>) -> bool { + fn is_invalidated_entry(&self, key: &Arc, entry: &TrioArc>) -> bool + where + V: Clone, + { if self.invalidator_enabled { - if let Some(inv) = &*self.invalidator.read() { + if let Some(inv) = &self.invalidator { return inv.apply_predicates(key, entry); } } @@ -1216,7 +1168,7 @@ where } } -impl GetOrRemoveEntry for Arc> +impl GetOrRemoveEntry for Inner where K: Hash + Eq, S: BuildHasher, @@ -1273,9 +1225,24 @@ where V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { - fn sync(&self, max_repeats: usize) -> Option { + fn run_pending_tasks(&self, max_repeats: usize) { + self.do_run_pending_tasks(max_repeats); + } + + fn now(&self) -> Instant { + self.current_time_from_expiration_clock() + } +} + +impl Inner +where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher + Clone + Send + Sync + 'static, +{ + fn do_run_pending_tasks(&self, max_repeats: usize) { if self.max_capacity == Some(0) { - return None; + return; } let mut deqs = self.deques.lock(); @@ -1325,17 +1292,15 @@ where ); } - if self.invalidator_enabled { - if let Some(invalidator) = &*self.invalidator.read() { - if !invalidator.is_empty() && !invalidator.is_task_running() { - self.invalidate_entries( - invalidator, - &mut deqs, - &mut timer_wheel, - batch_size::INVALIDATION_BATCH_SIZE, - &mut eviction_state, - ); - } + if let Some(invalidator) = &self.invalidator { + if !invalidator.is_empty() { + self.invalidate_entries( + invalidator, + &mut deqs, + &mut timer_wheel, + batch_size::INVALIDATION_BATCH_SIZE, + &mut eviction_state, + ); } } @@ -1351,8 +1316,6 @@ where ); } - eviction_state.notify_multiple_removals(); - debug_assert_eq!(self.entry_count.load(), current_ec); debug_assert_eq!(self.weighted_size.load(), current_ws); self.entry_count.store(eviction_state.counters.entry_count); @@ -1360,19 +1323,6 @@ where .store(eviction_state.counters.weighted_size); crossbeam_epoch::pin().flush(); - - if should_process_logs { - Some(SyncPace::Fast) - } else if self.write_op_ch.len() <= WRITE_LOG_LOW_WATER_MARK { - Some(SyncPace::Normal) - } else { - // Keep the current pace. - None - } - } - - fn now(&self) -> Instant { - self.current_time_from_expiration_clock() } } @@ -2097,73 +2047,53 @@ where eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, - { - self.process_invalidation_result(invalidator, deqs, timer_wheel, eviction_state); - self.submit_invalidation_task(invalidator, &mut deqs.write_order, batch_size); - } - - fn process_invalidation_result( - &self, - invalidator: &Invalidator, - deqs: &mut Deques, - timer_wheel: &mut TimerWheel, - eviction_state: &mut EvictionState<'_, K, V>, - ) where - V: Clone, - { - if let Some(InvalidationResult { - invalidated, - is_done, - }) = invalidator.task_result() - { - for KvEntry { key: _key, entry } in invalidated { - Self::handle_remove(deqs, timer_wheel, entry, &mut eviction_state.counters); - } - if is_done { - deqs.write_order.reset_cursor(); - } - } - } - - fn submit_invalidation_task( - &self, - invalidator: &Invalidator, - write_order: &mut Deque>, - batch_size: usize, - ) where - V: Clone, { let now = self.current_time_from_expiration_clock(); // If the write order queue is empty, we are done and can remove the predicates // that have been registered by now. - if write_order.len() == 0 { + if deqs.write_order.len() == 0 { invalidator.remove_predicates_registered_before(now); return; } let mut candidates = Vec::with_capacity(batch_size); - let mut iter = write_order.peekable(); let mut len = 0; - - while len < batch_size { - if let Some(kd) = iter.next() { - if !kd.is_dirty() { - if let Some(ts) = kd.last_modified() { - let key = kd.key(); - let hash = self.hash(key); - candidates.push(KeyDateLite::new(key, hash, ts)); - len += 1; + let has_next; + { + let iter = &mut deqs.write_order.peekable(); + + while len < batch_size { + if let Some(kd) = iter.next() { + if !kd.is_dirty() { + if let Some(ts) = kd.last_modified() { + let key = kd.key(); + let hash = self.hash(key); + candidates.push(KeyDateLite::new(key, hash, ts)); + len += 1; + } } + } else { + break; } - } else { - break; } + + has_next = iter.peek().is_some(); } - if len > 0 { - let is_truncated = len == batch_size && iter.peek().is_some(); - invalidator.submit_task(candidates, is_truncated); + if len == 0 { + return; + } + + let is_truncated = len == batch_size && has_next; + let (invalidated, is_done) = + invalidator.scan_and_invalidate(self, candidates, is_truncated); + + for KvEntry { key: _key, entry } in invalidated { + Self::handle_remove(deqs, timer_wheel, entry, &mut eviction_state.counters); + } + if is_done { + deqs.write_order.reset_cursor(); } } @@ -2328,7 +2258,6 @@ where { fn invalidation_predicate_count(&self) -> usize { self.invalidator - .read() .as_ref() .map(|inv| inv.predicate_count()) .unwrap_or(0) @@ -2487,7 +2416,7 @@ fn is_expired_by_ttl( #[cfg(test)] mod tests { - use crate::{common::concurrent::housekeeper, policy::ExpirationPolicy}; + use crate::policy::ExpirationPolicy; use super::BaseCache; @@ -2507,10 +2436,8 @@ mod tests { RandomState::default(), None, None, - None, Default::default(), false, - housekeeper::Configuration::new_thread_pool(true), ); cache.inner.enable_frequency_sketch_for_testing(); assert_eq!( @@ -2595,19 +2522,19 @@ mod tests { ($cache:ident, $key:ident, $hash:ident, $mock:ident, $duration_secs:expr) => { // Increment the time. $mock.increment(Duration::from_millis($duration_secs * 1000 - 1)); - $cache.inner.sync(1); + $cache.inner.run_pending_tasks(1); assert!($cache.contains_key_with_hash(&$key, $hash)); assert_eq!($cache.entry_count(), 1); // Increment the time by 1ms (3). The entry should be expired. $mock.increment(Duration::from_millis(1)); - $cache.inner.sync(1); + $cache.inner.run_pending_tasks(1); assert!(!$cache.contains_key_with_hash(&$key, $hash)); // Increment the time again to ensure the entry has been evicted from the // cache. $mock.increment(Duration::from_secs(1)); - $cache.inner.sync(1); + $cache.inner.run_pending_tasks(1); assert_eq!($cache.entry_count(), 0); }; } @@ -2854,14 +2781,12 @@ mod tests { RandomState::default(), None, None, - None, ExpirationPolicy::new( Some(Duration::from_secs(TTL)), Some(Duration::from_secs(TTI)), expiry, ), false, - housekeeper::Configuration::new_blocking(), ); cache.reconfigure_for_testing(); @@ -2891,7 +2816,7 @@ mod tests { insert(&cache, key, hash, value); // Run a sync to register the entry to the internal data structures including // the timer wheel. - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_eq!(cache.entry_count(), 1); assert_expiry!(cache, key, hash, mock, 1); @@ -2913,12 +2838,12 @@ mod tests { ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); let inserted_at = current_time(&cache); insert(&cache, key, hash, value); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(1)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); // Read the entry (2). @@ -2937,7 +2862,7 @@ mod tests { .map(Entry::into_value), Some(value) ); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_expiry!(cache, key, hash, mock, 3); @@ -2959,12 +2884,12 @@ mod tests { ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); let inserted_at = current_time(&cache); insert(&cache, key, hash, value); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(1)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); // Read the entry (2). @@ -2983,11 +2908,11 @@ mod tests { .map(Entry::into_value), Some(value) ); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); // Increment the time. mock.increment(Duration::from_secs(2)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); @@ -3002,7 +2927,7 @@ mod tests { Some(3), ); insert(&cache, key, hash, value); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_eq!(cache.entry_count(), 1); assert_expiry!(cache, key, hash, mock, 3); @@ -3025,12 +2950,12 @@ mod tests { ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); let inserted_at = current_time(&cache); insert(&cache, key, hash, value); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(1)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); @@ -3050,11 +2975,11 @@ mod tests { .map(Entry::into_value), Some(value) ); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); // Increment the time. mock.increment(Duration::from_secs(2)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); @@ -3069,7 +2994,7 @@ mod tests { None, ); insert(&cache, key, hash, value); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_eq!(cache.entry_count(), 1); assert_expiry!(cache, key, hash, mock, 7); @@ -3091,12 +3016,12 @@ mod tests { ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(8)); let inserted_at = current_time(&cache); insert(&cache, key, hash, value); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(5)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); @@ -3116,7 +3041,7 @@ mod tests { .map(Entry::into_value), Some(value) ); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_expiry!(cache, key, hash, mock, 7); @@ -3138,12 +3063,12 @@ mod tests { ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(8)); let inserted_at = current_time(&cache); insert(&cache, key, hash, value); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(5)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); @@ -3163,11 +3088,11 @@ mod tests { .map(Entry::into_value), Some(value) ); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); // Increment the time. mock.increment(Duration::from_secs(6)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); @@ -3187,7 +3112,7 @@ mod tests { .map(Entry::into_value), Some(value) ); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_expiry!(cache, key, hash, mock, 5); @@ -3208,12 +3133,12 @@ mod tests { *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(9)); insert(&cache, key, hash, value); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(6)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); @@ -3229,12 +3154,12 @@ mod tests { ); let updated_at = current_time(&cache); insert(&cache, key, hash, value); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(6)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); @@ -3254,11 +3179,11 @@ mod tests { .map(Entry::into_value), Some(value) ); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); // Increment the time. mock.increment(Duration::from_secs(6)); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); @@ -3278,7 +3203,7 @@ mod tests { .map(Entry::into_value), Some(value) ); - cache.inner.sync(1); + cache.inner.run_pending_tasks(1); assert_expiry!(cache, key, hash, mock, 4); } diff --git a/src/sync_base/invalidator.rs b/src/sync_base/invalidator.rs index 4ff0435d..2ad17075 100644 --- a/src/sync_base/invalidator.rs +++ b/src/sync_base/invalidator.rs @@ -1,32 +1,27 @@ -use super::{base_cache::Inner, PredicateId, PredicateIdStr}; +use super::{PredicateId, PredicateIdStr}; use crate::{ common::{ - concurrent::{ - thread_pool::{PoolName, ThreadPool, ThreadPoolRegistry}, - unsafe_weak_pointer::UnsafeWeakPointer, - AccessTime, KvEntry, ValueEntry, - }, + concurrent::{AccessTime, KvEntry, ValueEntry}, time::Instant, }, PredicateError, }; -use parking_lot::{Mutex, RwLock}; +use parking_lot::{Mutex, MutexGuard}; use std::{ - collections::HashMap, hash::{BuildHasher, Hash}, - marker::PhantomData, sync::{ atomic::{AtomicBool, Ordering}, - Arc, Weak, + Arc, }, - time::Duration, }; use triomphe::Arc as TrioArc; use uuid::Uuid; pub(crate) type PredicateFun = Arc bool + Send + Sync + 'static>; +const PREDICATE_MAP_NUM_SEGMENTS: usize = 16; + pub(crate) trait GetOrRemoveEntry { fn get_value_entry(&self, key: &Arc, hash: u64) -> Option>>; @@ -67,53 +62,57 @@ impl KeyDateLite { } } -pub(crate) struct InvalidationResult { - pub(crate) invalidated: Vec>, - pub(crate) is_done: bool, -} +// pub(crate) struct InvalidationResult { +// pub(crate) invalidated: Vec>, +// pub(crate) is_done: bool, +// } -impl InvalidationResult { - fn new(invalidated: Vec>, is_done: bool) -> Self { - Self { - invalidated, - is_done, - } - } -} +// impl InvalidationResult { +// fn new(invalidated: Vec>, is_done: bool) -> Self { +// Self { +// invalidated, +// is_done, +// } +// } +// } pub(crate) struct Invalidator { - predicates: RwLock>>, + predicates: crate::cht::SegmentedHashMap, S>, is_empty: AtomicBool, - scan_context: Arc>, - thread_pool: Arc, + scan_context: Arc>, } -impl Drop for Invalidator { - fn drop(&mut self) { - let ctx = &self.scan_context; - // Disallow to create and run a scanning task by now. - ctx.is_shutting_down.store(true, Ordering::Release); +// impl Drop for Invalidator { +// fn drop(&mut self) { +// let ctx = &self.scan_context; +// // Disallow to create and run a scanning task by now. +// ctx.is_shutting_down.store(true, Ordering::Release); - // Wait for the scanning task to finish. (busy loop) - while ctx.is_running.load(Ordering::Acquire) { - std::thread::sleep(Duration::from_millis(1)); - } - - ThreadPoolRegistry::release_pool(&self.thread_pool); - } -} +// // Wait for the scanning task to finish. (busy loop) +// while ctx.is_running.load(Ordering::Acquire) { +// std::thread::sleep(Duration::from_millis(1)); +// } +// } +// } // // Crate public methods. // impl Invalidator { - pub(crate) fn new(cache: Weak>) -> Self { - let thread_pool = ThreadPoolRegistry::acquire_pool(PoolName::Invalidator); + pub(crate) fn new(hasher: S) -> Self + where + S: BuildHasher, + { + const CAPACITY: usize = 0; + let predicates = crate::cht::SegmentedHashMap::with_num_segments_capacity_and_hasher( + PREDICATE_MAP_NUM_SEGMENTS, + CAPACITY, + hasher, + ); Self { - predicates: RwLock::new(HashMap::new()), + predicates, is_empty: AtomicBool::new(true), - scan_context: Arc::new(ScanContext::new(cache)), - thread_pool, + scan_context: Arc::new(ScanContext::default()), } } @@ -121,18 +120,23 @@ impl Invalidator { self.is_empty.load(Ordering::Acquire) } - pub(crate) fn remove_predicates_registered_before(&self, ts: Instant) { - let mut pred_map = self.predicates.write(); + pub(crate) fn remove_predicates_registered_before(&self, ts: Instant) + where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher, + { + let pred_map = &self.predicates; let removing_ids = pred_map .iter() .filter(|(_, pred)| pred.registered_at <= ts) .map(|(id, _)| id) - .cloned() .collect::>(); for id in removing_ids { - pred_map.remove(&id); + let hash = pred_map.hash(&id); + pred_map.remove(hash, |k| k == &id); } if pred_map.is_empty() { @@ -144,20 +148,26 @@ impl Invalidator { &self, predicate: PredicateFun, registered_at: Instant, - ) -> Result { + ) -> Result + where + K: Hash + Eq, + S: BuildHasher, + { const MAX_RETRY: usize = 1_000; let mut tries = 0; - let mut preds = self.predicates.write(); + let preds = &self.predicates; while tries < MAX_RETRY { let id = Uuid::new_v4().as_hyphenated().to_string(); - if preds.contains_key(&id) { + + let hash = preds.hash(&id); + if preds.contains_key(hash, |k| k == &id) { tries += 1; continue; // Retry } let pred = Predicate::new(&id, predicate, registered_at); - preds.insert(id.clone(), pred); + preds.insert_entry_and(id.clone(), hash, pred, |_, _| ()); self.is_empty.store(false, Ordering::Release); return Ok(id); @@ -171,63 +181,106 @@ impl Invalidator { // This method will be called by the get method of Cache. #[inline] - pub(crate) fn apply_predicates(&self, key: &Arc, entry: &TrioArc>) -> bool { + pub(crate) fn apply_predicates(&self, key: &Arc, entry: &TrioArc>) -> bool + where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher, + { if self.is_empty() { false } else if let Some(ts) = entry.last_modified() { - Self::do_apply_predicates(self.predicates.read().values(), key, &entry.value, ts) + Self::do_apply_predicates( + self.predicates.iter().map(|(_, v)| v), + key, + &entry.value, + ts, + ) } else { false } } - pub(crate) fn is_task_running(&self) -> bool { - self.scan_context.is_running.load(Ordering::Acquire) - } + // pub(crate) fn is_task_running(&self) -> bool { + // self.scan_context.is_running.load(Ordering::Acquire) + // } - pub(crate) fn submit_task(&self, candidates: Vec>, is_truncated: bool) + pub(crate) fn scan_and_invalidate( + &self, + cache: &C, + candidates: Vec>, + is_truncated: bool, + ) -> (Vec>, bool) where + C: GetOrRemoveEntry, K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, - S: BuildHasher + Send + Sync + 'static, + S: BuildHasher, { - let ctx = &self.scan_context; - - // Do not submit a task if this invalidator is about to be dropped. - if ctx.is_shutting_down.load(Ordering::Acquire) { - return; + // let ctx = &self.scan_context; + + // // Do not submit a task if this invalidator is about to be dropped. + // if ctx.is_shutting_down.load(Ordering::Acquire) { + // return; + // } + + // // Ensure there is no pending task and result. + // assert!(!self.is_task_running()); + // assert!(ctx.result.lock().is_none()); + + // // Populate ctx.predicates if it is empty. + // { + // let mut ps = ctx.predicates.lock(); + // if ps.is_empty() { + // *ps = self.predicates.read().values().cloned().collect(); + // } + // } + + // self.scan_context.is_running.store(true, Ordering::Release); + + // let task = ScanTask::new(&self.scan_context, candidates, is_truncated); + // self.thread_pool.pool.execute(move || { + // task.execute(); + // }); + + let mut predicates = self.scan_context.predicates.lock(); + if predicates.is_empty() { + *predicates = self.predicates.iter().map(|(_k, v)| v).collect(); } - // Ensure there is no pending task and result. - assert!(!self.is_task_running()); - assert!(ctx.result.lock().is_none()); + let mut invalidated = Vec::default(); + let mut newest_timestamp = None; - // Populate ctx.predicates if it is empty. - { - let mut ps = ctx.predicates.lock(); - if ps.is_empty() { - *ps = self.predicates.read().values().cloned().collect(); + for candidate in &candidates { + let key = &candidate.key; + let hash = candidate.hash; + let ts = candidate.timestamp; + if self.apply(&predicates, cache, key, hash, ts) { + if let Some(entry) = Self::invalidate(cache, key, hash, ts) { + invalidated.push(KvEntry { + key: Arc::clone(key), + entry, + }) + } } + newest_timestamp = Some(ts); } - self.scan_context.is_running.store(true, Ordering::Release); + self.remove_finished_predicates(predicates, is_truncated, newest_timestamp); - let task = ScanTask::new(&self.scan_context, candidates, is_truncated); - self.thread_pool.pool.execute(move || { - task.execute(); - }); + (invalidated, self.predicates.is_empty()) } - pub(crate) fn task_result(&self) -> Option> { - assert!(!self.is_task_running()); - let ctx = &self.scan_context; + // pub(crate) fn task_result(&self) -> Option> { + // assert!(!self.is_task_running()); + // let ctx = &self.scan_context; - ctx.result.lock().take().map(|result| { - self.remove_finished_predicates(ctx, &result); - let is_done = ctx.predicates.lock().is_empty(); - InvalidationResult::new(result.invalidated, is_done) - }) - } + // ctx.result.lock().take().map(|result| { + // self.remove_finished_predicates(ctx, &result); + // let is_done = ctx.predicates.lock().is_empty(); + // InvalidationResult::new(result.invalidated, is_done) + // }) + // } } // @@ -235,9 +288,9 @@ impl Invalidator { // impl Invalidator { #[inline] - fn do_apply_predicates<'a, I>(predicates: I, key: &'a K, value: &'a V, ts: Instant) -> bool + fn do_apply_predicates(predicates: I, key: &K, value: &V, ts: Instant) -> bool where - I: Iterator>, + I: Iterator>, { for predicate in predicates { if predicate.is_applicable(ts) && predicate.apply(key, value) { @@ -247,11 +300,18 @@ impl Invalidator { false } - fn remove_finished_predicates(&self, ctx: &ScanContext, result: &ScanResult) { - let mut predicates = ctx.predicates.lock(); - - if result.is_truncated { - if let Some(ts) = result.newest_timestamp { + fn remove_finished_predicates( + &self, + mut predicates: MutexGuard<'_, Vec>>, + is_truncated: bool, + newest_timestamp: Option, + ) where + K: Hash + Eq, + S: BuildHasher, + { + let predicates = &mut *predicates; + if is_truncated { + if let Some(ts) = newest_timestamp { let (active, finished): (Vec<_>, Vec<_>) = predicates.drain(..).partition(|p| p.is_applicable(ts)); @@ -259,23 +319,78 @@ impl Invalidator { self.remove_predicates(&finished); // Set the active predicates to the scan context. *predicates = active; + } else { + unreachable!(); } } else { // Remove all the predicates from the predicate registry and scan context. - self.remove_predicates(&predicates); + self.remove_predicates(predicates); predicates.clear(); } } - fn remove_predicates(&self, predicates: &[Predicate]) { - let mut pred_map = self.predicates.write(); + fn remove_predicates(&self, predicates: &[Predicate]) + where + K: Hash + Eq, + S: BuildHasher, + { + let pred_map = &self.predicates; predicates.iter().for_each(|p| { - pred_map.remove(p.id()); + let hash = pred_map.hash(p.id()); + pred_map.remove(hash, |k| k == p.id()); }); + if pred_map.is_empty() { self.is_empty.store(true, Ordering::Release); } } + + fn apply( + &self, + predicates: &[Predicate], + cache: &C, + key: &Arc, + hash: u64, + ts: Instant, + ) -> bool + where + C: GetOrRemoveEntry, + { + if let Some(entry) = cache.get_value_entry(key, hash) { + if let Some(lm) = entry.last_modified() { + if lm == ts { + return Invalidator::<_, _, S>::do_apply_predicates( + predicates.iter().cloned(), + key, + &entry.value, + lm, + ); + } + } + } + + false + } + + fn invalidate( + cache: &C, + key: &Arc, + hash: u64, + ts: Instant, + ) -> Option>> + where + C: GetOrRemoveEntry, + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + { + cache.remove_key_value_if(key, hash, |_, v| { + if let Some(lm) = v.last_modified() { + lm == ts + } else { + false + } + }) + } } // @@ -284,28 +399,18 @@ impl Invalidator { #[cfg(test)] impl Invalidator { pub(crate) fn predicate_count(&self) -> usize { - self.predicates.read().len() + self.predicates.len() } } -struct ScanContext { +struct ScanContext { predicates: Mutex>>, - cache: Mutex>>, - result: Mutex>>, - is_running: AtomicBool, - is_shutting_down: AtomicBool, - _marker: PhantomData, } -impl ScanContext { - fn new(cache: Weak>) -> Self { +impl Default for ScanContext { + fn default() -> Self { Self { predicates: Mutex::new(Vec::default()), - cache: Mutex::new(UnsafeWeakPointer::from_weak_arc(cache)), - result: Mutex::new(None), - is_running: AtomicBool::new(false), - is_shutting_down: AtomicBool::new(false), - _marker: PhantomData, } } } @@ -347,149 +452,3 @@ impl Predicate { (self.f)(key, value) } } - -struct ScanTask { - scan_context: Arc>, - candidates: Vec>, - is_truncated: bool, -} - -impl ScanTask -where - K: Hash + Eq, - S: BuildHasher, -{ - fn new( - scan_context: &Arc>, - candidates: Vec>, - is_truncated: bool, - ) -> Self { - Self { - scan_context: Arc::clone(scan_context), - candidates, - is_truncated, - } - } - - fn execute(&self) - where - K: Send + Sync + 'static, - V: Clone + Send + Sync + 'static, - { - let cache_lock = self.scan_context.cache.lock(); - - // Restore the Weak pointer to Inner. - let weak = unsafe { cache_lock.as_weak_arc() }; - if let Some(inner_cache) = weak.upgrade() { - // TODO: Protect this call with catch_unwind(). - *self.scan_context.result.lock() = Some(self.do_execute(&inner_cache)); - - // Change this flag here (before downgrading the Arc to a Weak) to avoid a (soft) - // deadlock. (forget_arc might trigger to drop the cache, which is in turn to drop - // this invalidator. To do it, this flag must be false, otherwise dropping self - // will be blocked forever) - self.scan_context.is_running.store(false, Ordering::Release); - // Avoid to drop the Arc>. - UnsafeWeakPointer::forget_arc(inner_cache); - } else { - *self.scan_context.result.lock() = Some(ScanResult::default()); - self.scan_context.is_running.store(false, Ordering::Release); - // Avoid to drop the Weak>. - UnsafeWeakPointer::forget_weak_arc(weak); - } - } - - fn do_execute(&self, cache: &Arc) -> ScanResult - where - Arc: GetOrRemoveEntry, - K: Send + Sync + 'static, - V: Clone + Send + Sync + 'static, - { - let predicates = self.scan_context.predicates.lock(); - let mut invalidated = Vec::default(); - let mut newest_timestamp = None; - - for candidate in &self.candidates { - let key = &candidate.key; - let hash = candidate.hash; - let ts = candidate.timestamp; - if Self::apply(&predicates, cache, key, hash, ts) { - if let Some(entry) = Self::invalidate(cache, key, hash, ts) { - invalidated.push(KvEntry { - key: Arc::clone(key), - entry, - }) - } - } - newest_timestamp = Some(ts); - } - - ScanResult { - invalidated, - is_truncated: self.is_truncated, - newest_timestamp, - } - } - - fn apply( - predicates: &[Predicate], - cache: &Arc, - key: &Arc, - hash: u64, - ts: Instant, - ) -> bool - where - Arc: GetOrRemoveEntry, - { - if let Some(entry) = cache.get_value_entry(key, hash) { - if let Some(lm) = entry.last_modified() { - if lm == ts { - return Invalidator::<_, _, S>::do_apply_predicates( - predicates.iter(), - key, - &entry.value, - lm, - ); - } - } - } - - false - } - - fn invalidate( - cache: &Arc, - key: &Arc, - hash: u64, - ts: Instant, - ) -> Option>> - where - Arc: GetOrRemoveEntry, - K: Send + Sync + 'static, - V: Clone + Send + Sync + 'static, - { - cache.remove_key_value_if(key, hash, |_, v| { - if let Some(lm) = v.last_modified() { - lm == ts - } else { - false - } - }) - } -} - -struct ScanResult { - invalidated: Vec>, - is_truncated: bool, - newest_timestamp: Option, -} - -impl Default for ScanResult { - fn default() -> Self { - Self { - invalidated: Vec::default(), - is_truncated: false, - newest_timestamp: None, - } - } -}