Skip to content
This repository has been archived by the owner on Jan 22, 2025. It is now read-only.

Commit

Permalink
fix merge conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
brooksprumo committed Sep 11, 2023
1 parent f2e100c commit d542486
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 81 deletions.
68 changes: 9 additions & 59 deletions runtime/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1396,13 +1396,7 @@ pub struct AccountsDb {
/// Set of storage paths to pick from
pub(crate) paths: Vec<PathBuf>,

<<<<<<< HEAD:runtime/src/accounts_db.rs
/// Directories for account hash calculations, within base_working_path
full_accounts_hash_cache_path: PathBuf,
incremental_accounts_hash_cache_path: PathBuf,
=======
accounts_hash_cache_path: PathBuf,
>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/accounts_db.rs
transient_accounts_hash_cache_path: PathBuf,

// used by tests
Expand Down Expand Up @@ -2374,23 +2368,10 @@ impl AccountsDb {
(accounts_hash_cache_path, None)
} else {
let temp_dir = TempDir::new().expect("new tempdir");
let cache_path = temp_dir.path().join(Self::DEFAULT_ACCOUNTS_HASH_CACHE_DIR);
let cache_path = temp_dir.path().to_path_buf();
(cache_path, Some(temp_dir))
};

<<<<<<< HEAD:runtime/src/accounts_db.rs
=======
let accounts_hash_cache_path = accounts_hash_cache_path.unwrap_or_else(|| {
let accounts_hash_cache_path =
base_working_path.join(Self::DEFAULT_ACCOUNTS_HASH_CACHE_DIR);
if !accounts_hash_cache_path.exists() {
fs_err::create_dir(&accounts_hash_cache_path)
.expect("create accounts hash cache dir");
}
accounts_hash_cache_path
});

>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/accounts_db.rs
let mut bank_hash_stats = HashMap::new();
bank_hash_stats.insert(0, BankHashStats::default());

Expand Down Expand Up @@ -2419,17 +2400,9 @@ impl AccountsDb {
write_cache_limit_bytes: None,
write_version: AtomicU64::new(0),
paths: vec![],
<<<<<<< HEAD:runtime/src/accounts_db.rs
full_accounts_hash_cache_path: accounts_hash_cache_path.join("full"),
incremental_accounts_hash_cache_path: accounts_hash_cache_path.join("incremental"),
transient_accounts_hash_cache_path: accounts_hash_cache_path.join("transient"),
temp_accounts_hash_cache_path,
=======
base_working_path,
base_working_temp_dir,
transient_accounts_hash_cache_path: accounts_hash_cache_path.join("transient"),
accounts_hash_cache_path,
>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/accounts_db.rs
temp_accounts_hash_cache_path,
shrink_paths: RwLock::new(None),
temp_paths: None,
file_size: DEFAULT_FILE_SIZE,
Expand Down Expand Up @@ -7541,7 +7514,7 @@ impl AccountsDb {
fn get_cache_hash_data(
accounts_hash_cache_path: PathBuf,
config: &CalcAccountsHashConfig<'_>,
kind: CalcAccountsHashKind,
flavor: CalcAccountsHashFlavor,
slot: Slot,
) -> CacheHashData {
let accounts_hash_cache_path = if !config.store_detailed_debug_info_on_failure {
Expand All @@ -7554,7 +7527,10 @@ impl AccountsDb {
_ = std::fs::remove_dir_all(&failed_dir);
failed_dir
};
CacheHashData::new(accounts_hash_cache_path, kind == CalcAccountsHashKind::Full)
CacheHashData::new(
accounts_hash_cache_path,
flavor == CalcAccountsHashFlavor::Full,
)
}

// modeled after calculate_accounts_delta_hash
Expand All @@ -7569,12 +7545,7 @@ impl AccountsDb {
config,
storages,
stats,
<<<<<<< HEAD:runtime/src/accounts_db.rs
CalcAccountsHashFlavor::Full,
self.full_accounts_hash_cache_path.clone(),
=======
CalcAccountsHashKind::Full,
>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/accounts_db.rs
)?;
let AccountsHashEnum::Full(accounts_hash) = accounts_hash else {
panic!("calculate_accounts_hash_from_storages must return a FullAccountsHash");
Expand All @@ -7601,12 +7572,7 @@ impl AccountsDb {
config,
storages,
stats,
<<<<<<< HEAD:runtime/src/accounts_db.rs
CalcAccountsHashFlavor::Incremental,
self.incremental_accounts_hash_cache_path.clone(),
=======
CalcAccountsHashKind::Incremental,
>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/accounts_db.rs
)?;
let AccountsHashEnum::Incremental(incremental_accounts_hash) = accounts_hash else {
panic!("calculate_incremental_accounts_hash must return an IncrementalAccountsHash");
Expand All @@ -7619,15 +7585,8 @@ impl AccountsDb {
config: &CalcAccountsHashConfig<'_>,
storages: &SortedStorages<'_>,
mut stats: HashStats,
<<<<<<< HEAD:runtime/src/accounts_db.rs
flavor: CalcAccountsHashFlavor,
accounts_hash_cache_path: PathBuf,
) -> Result<(AccountsHashEnum, u64), AccountsHashVerificationError> {
=======
kind: CalcAccountsHashKind,
) -> Result<(AccountsHashKind, u64), AccountsHashVerificationError> {
let total_time = Measure::start("");
>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/accounts_db.rs
let _guard = self.active_stats.activate(ActiveStatItem::Hash);
stats.oldest_root = storages.range().start;

Expand All @@ -7637,17 +7596,8 @@ impl AccountsDb {
let use_bg_thread_pool = config.use_bg_thread_pool;
let accounts_hash_cache_path = self.accounts_hash_cache_path.clone();
let scan_and_hash = || {
<<<<<<< HEAD:runtime/src/accounts_db.rs
let cache_hash_data = Self::get_cache_hash_data(accounts_hash_cache_path, config, slot);
=======
let (cache_hash_data, cache_hash_data_us) = measure_us!(Self::get_cache_hash_data(
accounts_hash_cache_path,
config,
kind,
slot
));
stats.cache_hash_data_us += cache_hash_data_us;
>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/accounts_db.rs
let cache_hash_data =
Self::get_cache_hash_data(accounts_hash_cache_path, config, flavor, slot);

let bounds = Range {
start: 0,
Expand Down
25 changes: 3 additions & 22 deletions runtime/src/cache_hash_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -142,27 +142,17 @@ impl CacheHashDataFile {

pub struct CacheHashData {
cache_dir: PathBuf,
<<<<<<< HEAD:runtime/src/cache_hash_data.rs
pre_existing_cache_files: Arc<Mutex<PreExistingCacheFiles>>,
pub stats: Arc<Mutex<CacheHashDataStats>>,
=======
pre_existing_cache_files: Arc<Mutex<HashSet<PathBuf>>>,
should_delete_old_cache_files_on_drop: bool,
pub stats: Arc<CacheHashDataStats>,
>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/cache_hash_data.rs
pub stats: Arc<Mutex<CacheHashDataStats>>,
}

impl Drop for CacheHashData {
fn drop(&mut self) {
<<<<<<< HEAD:runtime/src/cache_hash_data.rs
self.delete_old_cache_files();
self.stats.lock().unwrap().report();
=======
if self.should_delete_old_cache_files_on_drop {
self.delete_old_cache_files();
}
self.stats.report();
>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/cache_hash_data.rs
self.stats.lock().unwrap().report();
}
}

Expand All @@ -174,14 +164,9 @@ impl CacheHashData {

let result = CacheHashData {
cache_dir,
<<<<<<< HEAD:runtime/src/cache_hash_data.rs
pre_existing_cache_files: Arc::new(Mutex::new(PreExistingCacheFiles::default())),
stats: Arc::new(Mutex::new(CacheHashDataStats::default())),
=======
pre_existing_cache_files: Arc::new(Mutex::new(HashSet::default())),
should_delete_old_cache_files_on_drop,
stats: Arc::default(),
>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/cache_hash_data.rs
stats: Arc::new(Mutex::new(CacheHashDataStats::default())),
};

result.get_cache_files();
Expand Down Expand Up @@ -288,10 +273,6 @@ impl CacheHashData {
stats.total_entries = entries;
stats.cache_file_size += capacity as usize;

<<<<<<< HEAD:runtime/src/cache_hash_data.rs
=======
fn pre_existing_cache_file_will_be_used(&self, file_name: impl AsRef<Path>) {
>>>>>>> 6298c6c31e (Shares accounts hash cache data between full and incremental (#33164)):accounts-db/src/cache_hash_data.rs
self.pre_existing_cache_files
.lock()
.unwrap()
Expand Down

0 comments on commit d542486

Please sign in to comment.