From ec3ed289448fd43cbfb37004e36aa345e7154df7 Mon Sep 17 00:00:00 2001 From: "Peter C. S. Scholtens" Date: Fri, 19 May 2023 10:03:57 +0200 Subject: [PATCH] An examples directory is added to allow cargo to run the given examples. The earlier documented sync and async examples are added as also a size-aware-eviction and an eviction-listener example to demonstrate the subtleties of the cache behavior. For CI purposes the required-features of these examples are defined in Cargo.toml. Update README.md --- Cargo.toml | 16 ++++++++++ README.md | 19 ++++++++++++ examples/async_example.rs | 54 +++++++++++++++++++++++++++++++++ examples/eviction_listener.rs | 48 +++++++++++++++++++++++++++++ examples/size_aware_eviction.rs | 16 ++++++++++ examples/sync_example.rs | 53 ++++++++++++++++++++++++++++++++ 6 files changed, 206 insertions(+) create mode 100644 examples/async_example.rs create mode 100644 examples/eviction_listener.rs create mode 100644 examples/size_aware_eviction.rs create mode 100644 examples/sync_example.rs diff --git a/Cargo.toml b/Cargo.toml index ce32046b..9a9cbf9c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,3 +120,19 @@ rustc_version = "0.4.0" # Build the doc with some features enabled. features = ["future"] rustdoc-args = ["--cfg", "docsrs"] + +[[example]] +name = "async_example" +required-features = ["future"] + +[[example]] +name = "sync_example" +required-features = ["sync"] + +[[example]] +name = "eviction_listener" +required-features = ["sync"] + +[[example]] +name = "size_aware_eviction" +required-features = ["sync"] diff --git a/README.md b/README.md index 0bd9fcda..c7797754 100644 --- a/README.md +++ b/README.md @@ -230,6 +230,13 @@ fn main() { } ``` +You can try the synchronous example by cloning the repository and running the following +cargo instruction: + +``` +cargo run --example sync_example +``` + If you want to atomically initialize and insert a value when the key is not present, you might want to check [the document][doc-sync-cache] for other insertion methods `get_with` and `try_get_with`. @@ -323,6 +330,12 @@ async fn main() { } ``` +You can try the asynchronous example by cloning the repository and running the following +cargo instruction: +``` +cargo run --features="future" --example async_example +``` + If you want to atomically initialize and insert a value when the key is not present, you might want to check [the document][doc-future-cache] for other insertion methods `get_with` and `try_get_with`. @@ -389,6 +402,12 @@ fn main() { Note that weighted sizes are not used when making eviction selections. +You can try the size aware eviction example by cloning the repository and running the following +cargo instruction: +``` +cargo run --example size_aware_eviction +``` + ## Expiration Policies diff --git a/examples/async_example.rs b/examples/async_example.rs new file mode 100644 index 00000000..c8dd1baa --- /dev/null +++ b/examples/async_example.rs @@ -0,0 +1,54 @@ +// Use the asynchronous cache. +use moka::future::Cache; + +#[tokio::main] +async fn main() { + const NUM_TASKS: usize = 16; + const NUM_KEYS_PER_TASK: usize = 64; + + fn value(n: usize) -> String { + format!("value {}", n) + } + + // Create a cache that can store up to 10,000 entries. + let cache = Cache::new(10_000); + + // Spawn async tasks and write to and read from the cache. + let tasks: Vec<_> = (0..NUM_TASKS) + .map(|i| { + // To share the same cache across the async tasks, clone it. + // This is a cheap operation. + let my_cache = cache.clone(); + let start = i * NUM_KEYS_PER_TASK; + let end = (i + 1) * NUM_KEYS_PER_TASK; + + tokio::spawn(async move { + // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) + for key in start..end { + // insert() is an async method, so await it. + my_cache.insert(key, value(key)).await; + // get() returns Option, a clone of the stored value. + assert_eq!(my_cache.get(&key), Some(value(key))); + } + + // Invalidate every 4 element of the inserted entries. + for key in (start..end).step_by(4) { + // invalidate() is an async method, so await it. + my_cache.invalidate(&key).await; + } + }) + }) + .collect(); + + // Wait for all tasks to complete. + futures_util::future::join_all(tasks).await; + + // Verify the result. + for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { + if key % 4 == 0 { + assert_eq!(cache.get(&key), None); + } else { + assert_eq!(cache.get(&key), Some(value(key))); + } + } +} diff --git a/examples/eviction_listener.rs b/examples/eviction_listener.rs new file mode 100644 index 00000000..02f52b20 --- /dev/null +++ b/examples/eviction_listener.rs @@ -0,0 +1,48 @@ +use moka::sync::Cache; +use moka::sync::ConcurrentCacheExt; +use std::thread::sleep; +use std::time::Duration; + +fn main() { + // Make an artifically small cache and 1-second ttl to observe eviction listener. + { + let cache = Cache::builder() + .max_capacity(2) + .time_to_live(Duration::from_secs(1)) + .eviction_listener(|key, value, cause| { + println!("Evicted ({:?},{:?}) because {:?}", key, value, cause) + }) + .build(); + // Overload capacity of the cache. + cache.insert(&0, "zero".to_string()); + cache.insert(&1, "one".to_string()); + cache.insert(&2, "twice".to_string()); + // Due to race condition spilled over maybe evicted twice by cause + // Replaced and Size. + cache.insert(&2, "two".to_string()); + // With 1-second ttl, keys 0 and 1 will be evicted if we wait long enough. + sleep(Duration::from_secs(2)); + println!("Wake up!"); + cache.insert(&3, "three".to_string()); + cache.insert(&4, "four".to_string()); + cache.insert(&5, "five".to_string()); + let _ = cache.remove(&3); + cache.invalidate(&4); + cache.invalidate_all(); + loop { + // Synchronization is limited to at most 500 entries for each call. + cache.sync(); + // Check if all is done. Calling entry_count() requires calling sync() first! + if cache.entry_count() == 0 { + break; + } + } + cache.insert(&6, "six".to_string()); + // When cache is dropped eviction listener is not called. Eiher + // call invalidate_all() or wait longer than ttl. + sleep(Duration::from_secs(2)); + println!("Cache structure removed."); + } + sleep(Duration::from_secs(1)); + println!("Exit program."); +} diff --git a/examples/size_aware_eviction.rs b/examples/size_aware_eviction.rs new file mode 100644 index 00000000..d874efd6 --- /dev/null +++ b/examples/size_aware_eviction.rs @@ -0,0 +1,16 @@ +use std::convert::TryInto; +use moka::sync::Cache; + +fn main() { + let cache = Cache::builder() + // A weigher closure takes &K and &V and returns a u32 representing the + // relative size of the entry. Here, we use the byte length of the value + // String as the size. + .weigher(|_key, value: &String| -> u32 { + value.len().try_into().unwrap_or(u32::MAX) + }) + // This cache will hold up to 32MiB of values. + .max_capacity(32 * 1024 * 1024) + .build(); + cache.insert(0, "zero".to_string()); +} diff --git a/examples/sync_example.rs b/examples/sync_example.rs new file mode 100644 index 00000000..5947f506 --- /dev/null +++ b/examples/sync_example.rs @@ -0,0 +1,53 @@ +// Use the synchronous cache. +use moka::sync::Cache; + +use std::thread; + +fn value(n: usize) -> String { + format!("value {}", n) +} + +fn main() { + const NUM_THREADS: usize = 16; + const NUM_KEYS_PER_THREAD: usize = 64; + + // Create a cache that can store up to 10,000 entries. + let cache = Cache::new(10_000); + + // Spawn threads and read and update the cache simultaneously. + let threads: Vec<_> = (0..NUM_THREADS) + .map(|i| { + // To share the same cache across the threads, clone it. + // This is a cheap operation. + let my_cache = cache.clone(); + let start = i * NUM_KEYS_PER_THREAD; + let end = (i + 1) * NUM_KEYS_PER_THREAD; + + thread::spawn(move || { + // Insert 64 entries. (NUM_KEYS_PER_THREAD = 64) + for key in start..end { + my_cache.insert(key, value(key)); + // get() returns Option, a clone of the stored value. + assert_eq!(my_cache.get(&key), Some(value(key))); + } + + // Invalidate every 4 element of the inserted entries. + for key in (start..end).step_by(4) { + my_cache.invalidate(&key); + } + }) + }) + .collect(); + + // Wait for all threads to complete. + threads.into_iter().for_each(|t| t.join().expect("Failed")); + + // Verify the result. + for key in 0..(NUM_THREADS * NUM_KEYS_PER_THREAD) { + if key % 4 == 0 { + assert_eq!(cache.get(&key), None); + } else { + assert_eq!(cache.get(&key), Some(value(key))); + } + } +}