Skip to content

Commit

Permalink
uses lazy LRU cache for ReedSolomonCache (#4367)
Browse files Browse the repository at this point in the history
Lazy LRU is generally faster and allows immutable shared access on the
read path.
  • Loading branch information
behzadnouri authored Jan 13, 2025
1 parent 4f78628 commit 0f616a1
Showing 1 changed file with 7 additions and 11 deletions.
18 changes: 7 additions & 11 deletions ledger/src/shredder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ use {
self, Error, ProcessShredsStats, Shred, ShredData, ShredFlags, DATA_SHREDS_PER_FEC_BLOCK,
},
itertools::Itertools,
lazy_lru::LruCache,
lazy_static::lazy_static,
lru::LruCache,
rayon::{prelude::*, ThreadPool},
reed_solomon_erasure::{
galois_8::ReedSolomon,
Expand All @@ -17,7 +17,7 @@ use {
std::{
borrow::Borrow,
fmt::Debug,
sync::{Arc, Mutex},
sync::{Arc, RwLock},
},
};

Expand All @@ -39,7 +39,7 @@ pub(crate) const ERASURE_BATCH_SIZE: [usize; 33] = [
];

pub struct ReedSolomonCache(
Mutex<LruCache<(/*data_shards:*/ usize, /*parity_shards:*/ usize), Arc<ReedSolomon>>>,
RwLock<LruCache<(/*data_shards:*/ usize, /*parity_shards:*/ usize), Arc<ReedSolomon>>>,
);

#[derive(Debug)]
Expand Down Expand Up @@ -424,26 +424,22 @@ impl ReedSolomonCache {
parity_shards: usize,
) -> Result<Arc<ReedSolomon>, reed_solomon_erasure::Error> {
let key = (data_shards, parity_shards);
{
let mut cache = self.0.lock().unwrap();
if let Some(entry) = cache.get(&key) {
return Ok(entry.clone());
}
if let Some(entry) = self.0.read().unwrap().get(&key).cloned() {
return Ok(entry);
}
let entry = ReedSolomon::new(data_shards, parity_shards)?;
let entry = Arc::new(entry);
{
let entry = entry.clone();
let mut cache = self.0.lock().unwrap();
cache.put(key, entry);
self.0.write().unwrap().put(key, entry);
}
Ok(entry)
}
}

impl Default for ReedSolomonCache {
fn default() -> Self {
Self(Mutex::new(LruCache::new(Self::CAPACITY)))
Self(RwLock::new(LruCache::new(Self::CAPACITY)))
}
}

Expand Down

0 comments on commit 0f616a1

Please sign in to comment.