From e04e336ace7e9d50445d27acfff35ff289762395 Mon Sep 17 00:00:00 2001 From: InKryption <59504965+InKryption@users.noreply.github.com> Date: Tue, 17 Dec 2024 21:47:45 +0100 Subject: [PATCH] fix(snapshots,accountsdb): Re-design the `collapse` method (#442) --- src/accountsdb/db.zig | 216 +++++++++++++++++-------------- src/accountsdb/fuzz.zig | 16 ++- src/accountsdb/fuzz_snapshot.zig | 10 +- src/accountsdb/readme.md | 2 +- src/accountsdb/snapshots.zig | 153 +++++++++++++--------- src/bincode/hashmap.zig | 5 +- src/cmd/cmd.zig | 145 +++++++++++---------- src/rpc/server.zig | 31 +++-- 8 files changed, 327 insertions(+), 251 deletions(-) diff --git a/src/accountsdb/db.zig b/src/accountsdb/db.zig index b44f2f327..1372e1687 100644 --- a/src/accountsdb/db.zig +++ b/src/accountsdb/db.zig @@ -275,25 +275,26 @@ pub const AccountsDB = struct { /// needs to be a thread-safe allocator allocator: std.mem.Allocator, /// Must have been allocated with `self.allocator`. - combined_manifest: *FullAndIncrementalManifest, + full_inc_manifest: FullAndIncrementalManifest, n_threads: u32, validate: bool, accounts_per_file_estimate: u64, should_fastload: bool, save_index: bool, ) !SnapshotManifest { - const snapshot_fields = try combined_manifest.collapse(self.allocator); + const collapsed_manifest = try full_inc_manifest.collapse(self.allocator); + errdefer collapsed_manifest.deinit(self.allocator); if (should_fastload) { var timer = try sig.time.Timer.start(); var fastload_dir = try self.snapshot_dir.makeOpenPath("fastload_state", .{}); defer fastload_dir.close(); self.logger.info().log("fast loading accountsdb..."); - try self.fastload(fastload_dir, snapshot_fields.accounts_db_fields); + try self.fastload(fastload_dir, collapsed_manifest.accounts_db_fields); self.logger.info().logf("loaded from snapshot in {s}", .{timer.read()}); } else { const load_duration = try self.loadFromSnapshot( - snapshot_fields.accounts_db_fields, + collapsed_manifest.accounts_db_fields, n_threads, allocator, accounts_per_file_estimate, @@ -311,15 +312,20 @@ pub const AccountsDB = struct { } if (validate) { - const full_snapshot = combined_manifest.full; + const full_man = full_inc_manifest.full; + const maybe_inc_persistence = if (full_inc_manifest.incremental) |inc| + inc.bank_extra.snapshot_persistence + else + null; + var validate_timer = try sig.time.Timer.start(); try self.validateLoadFromSnapshot(.{ - .full_slot = full_snapshot.bank_fields.slot, + .full_slot = full_man.bank_fields.slot, .expected_full = .{ - .accounts_hash = snapshot_fields.accounts_db_fields.bank_hash_info.accounts_hash, - .capitalization = full_snapshot.bank_fields.capitalization, + .accounts_hash = full_man.accounts_db_fields.bank_hash_info.accounts_hash, + .capitalization = full_man.bank_fields.capitalization, }, - .expected_incremental = if (snapshot_fields.bank_extra.snapshot_persistence) |inc_persistence| .{ + .expected_incremental = if (maybe_inc_persistence) |inc_persistence| .{ .accounts_hash = inc_persistence.incremental_hash, .capitalization = inc_persistence.incremental_capitalization, } else null, @@ -327,7 +333,7 @@ pub const AccountsDB = struct { self.logger.info().logf("validated from snapshot in {s}", .{validate_timer.read()}); } - return snapshot_fields; + return collapsed_manifest; } pub fn fastload( @@ -2672,7 +2678,7 @@ pub const AccountsDB = struct { params.bank_fields.slot = params.target_slot; // ! params.bank_fields.capitalization = full_capitalization; // ! - const snapshot_fields: SnapshotManifest = .{ + const manifest: SnapshotManifest = .{ .bank_fields = params.bank_fields.*, .accounts_db_fields = .{ .file_map = serializable_file_map, @@ -2703,7 +2709,7 @@ pub const AccountsDB = struct { zstd_write_ctx.writer(), sig.version.CURRENT_CLIENT_VERSION, StatusCache.EMPTY, - &snapshot_fields, + &manifest, file_map, ); try zstd_write_ctx.finish(); @@ -2902,7 +2908,7 @@ pub const AccountsDB = struct { params.bank_fields.slot = params.target_slot; // ! - const snapshot_fields: SnapshotManifest = .{ + const manifest: SnapshotManifest = .{ .bank_fields = params.bank_fields.*, .accounts_db_fields = .{ .file_map = serializable_file_map, @@ -2933,7 +2939,7 @@ pub const AccountsDB = struct { zstd_write_ctx.writer(), sig.version.CURRENT_CLIENT_VERSION, StatusCache.EMPTY, - &snapshot_fields, + &manifest, file_map, ); try zstd_write_ctx.finish(); @@ -3193,7 +3199,7 @@ pub fn indexAndValidateAccountFile( accounts_file.number_of_accounts = number_of_accounts; } -/// All entries in `snapshot_fields.accounts_db_fields.file_map` must correspond to an entry in `file_map`, +/// All entries in `manifest.accounts_db_fields.file_map` must correspond to an entry in `file_map`, /// with the association defined by the file id (a field of the value of the former, the key of the latter). pub fn writeSnapshotTarWithFields( archive_writer: anytype, @@ -3321,26 +3327,26 @@ test "testWriteSnapshot" { const snap_files = try SnapshotFiles.find(allocator, test_data_dir); - var tmp_snap_dir_root = std.testing.tmpDir(.{}); - defer tmp_snap_dir_root.cleanup(); - const tmp_snap_dir = tmp_snap_dir_root.dir; + var tmp_dir_root = std.testing.tmpDir(.{}); + defer tmp_dir_root.cleanup(); + const snapshot_dir = tmp_dir_root.dir; { const archive_file = try test_data_dir.openFile(snap_files.full_snapshot.snapshotArchiveName().constSlice(), .{}); defer archive_file.close(); - try parallelUnpackZstdTarBall(allocator, .noop, archive_file, tmp_snap_dir, 4, true); + try parallelUnpackZstdTarBall(allocator, .noop, archive_file, snapshot_dir, 4, true); } if (snap_files.incremental()) |inc_snap| { const archive_file = try test_data_dir.openFile(inc_snap.snapshotArchiveName().constSlice(), .{}); defer archive_file.close(); - try parallelUnpackZstdTarBall(allocator, .noop, archive_file, tmp_snap_dir, 4, false); + try parallelUnpackZstdTarBall(allocator, .noop, archive_file, snapshot_dir, 4, false); } var accounts_db = try AccountsDB.init(.{ .allocator = allocator, .logger = .noop, - .snapshot_dir = tmp_snap_dir, + .snapshot_dir = snapshot_dir, .geyser_writer = null, .gossip_view = null, .index_allocation = .ram, @@ -3428,10 +3434,12 @@ fn loadTestAccountsDB( const snapshot_files = try findAndUnpackTestSnapshots(n_threads, snapshot_dir); - var snapshots = try FullAndIncrementalManifest.fromFiles(allocator, logger, snapshot_dir, snapshot_files); - errdefer snapshots.deinit(allocator); + const full_inc_manifest = + try FullAndIncrementalManifest.fromFiles(allocator, logger, snapshot_dir, snapshot_files); + errdefer full_inc_manifest.deinit(allocator); - const snapshot = try snapshots.collapse(allocator); + const manifest = try full_inc_manifest.collapse(allocator); + defer manifest.deinit(allocator); var accounts_db = try AccountsDB.init(.{ .allocator = allocator, @@ -3446,13 +3454,13 @@ fn loadTestAccountsDB( errdefer accounts_db.deinit(); _ = try accounts_db.loadFromSnapshot( - snapshot.accounts_db_fields, + manifest.accounts_db_fields, n_threads, allocator, 500, ); - return .{ accounts_db, snapshots }; + return .{ accounts_db, full_inc_manifest }; } // NOTE: this is a memory leak test - geyser correctness is tested in the geyser tests @@ -3462,11 +3470,13 @@ test "geyser stream on load" { var tmp_dir_root = std.testing.tmpDir(.{}); defer tmp_dir_root.cleanup(); - const snapdir = tmp_dir_root.dir; - const snapshot_files = try findAndUnpackTestSnapshots(2, snapdir); + const snapshot_dir = tmp_dir_root.dir; + + const snapshot_files = try findAndUnpackTestSnapshots(2, snapshot_dir); - var snapshots = try FullAndIncrementalManifest.fromFiles(allocator, logger, snapdir, snapshot_files); - errdefer snapshots.deinit(allocator); + const full_inc_manifest = + try FullAndIncrementalManifest.fromFiles(allocator, logger, snapshot_dir, snapshot_files); + defer full_inc_manifest.deinit(allocator); var geyser_exit = std.atomic.Value(bool).init(false); @@ -3497,13 +3507,13 @@ test "geyser stream on load" { defer geyser_exit.store(true, .release); - const snapshot = try snapshots.collapse(allocator); - defer snapshots.deinit(allocator); + const snapshot = try full_inc_manifest.collapse(allocator); + defer snapshot.deinit(allocator); var accounts_db = try AccountsDB.init(.{ .allocator = allocator, .logger = logger, - .snapshot_dir = snapdir, + .snapshot_dir = snapshot_dir, .geyser_writer = geyser_writer, .gossip_view = null, .index_allocation = .ram, @@ -3525,13 +3535,12 @@ test "write and read an account" { var tmp_dir_root = std.testing.tmpDir(.{}); defer tmp_dir_root.cleanup(); - const snapdir = tmp_dir_root.dir; + const snapshot_dir = tmp_dir_root.dir; - var accounts_db, var snapshots = try loadTestAccountsDB(allocator, false, 1, .noop, snapdir); - defer { - accounts_db.deinit(); - snapshots.deinit(allocator); - } + var accounts_db, const full_inc_manifest = + try loadTestAccountsDB(allocator, false, 1, .noop, snapshot_dir); + defer accounts_db.deinit(); + defer full_inc_manifest.deinit(allocator); var prng = std.rand.DefaultPrng.init(0); const pubkey = Pubkey.initRandom(prng.random()); @@ -3566,21 +3575,22 @@ test "load and validate from test snapshot" { var tmp_dir_root = std.testing.tmpDir(.{}); defer tmp_dir_root.cleanup(); - const snapdir = tmp_dir_root.dir; + const snapshot_dir = tmp_dir_root.dir; - var accounts_db, var snapshots = try loadTestAccountsDB(allocator, false, 1, .noop, snapdir); + var accounts_db, const full_inc_manifest = + try loadTestAccountsDB(allocator, false, 1, .noop, snapshot_dir); defer { accounts_db.deinit(); - snapshots.deinit(allocator); + full_inc_manifest.deinit(allocator); } try accounts_db.validateLoadFromSnapshot(.{ - .full_slot = snapshots.full.bank_fields.slot, + .full_slot = full_inc_manifest.full.bank_fields.slot, .expected_full = .{ - .accounts_hash = snapshots.full.accounts_db_fields.bank_hash_info.accounts_hash, - .capitalization = snapshots.full.bank_fields.capitalization, + .accounts_hash = full_inc_manifest.full.accounts_db_fields.bank_hash_info.accounts_hash, + .capitalization = full_inc_manifest.full.bank_fields.capitalization, }, - .expected_incremental = if (snapshots.incremental.?.bank_extra.snapshot_persistence) |inc_persistence| .{ + .expected_incremental = if (full_inc_manifest.incremental.?.bank_extra.snapshot_persistence) |inc_persistence| .{ .accounts_hash = inc_persistence.incremental_hash, .capitalization = inc_persistence.incremental_capitalization, } else null, @@ -3592,21 +3602,22 @@ test "load and validate from test snapshot using disk index" { var tmp_dir_root = std.testing.tmpDir(.{}); defer tmp_dir_root.cleanup(); - const snapdir = tmp_dir_root.dir; + const snapshot_dir = tmp_dir_root.dir; - var accounts_db, var snapshots = try loadTestAccountsDB(allocator, false, 1, .noop, snapdir); + var accounts_db, const full_inc_manifest = + try loadTestAccountsDB(allocator, false, 1, .noop, snapshot_dir); defer { accounts_db.deinit(); - snapshots.deinit(allocator); + full_inc_manifest.deinit(allocator); } try accounts_db.validateLoadFromSnapshot(.{ - .full_slot = snapshots.full.bank_fields.slot, + .full_slot = full_inc_manifest.full.bank_fields.slot, .expected_full = .{ - .accounts_hash = snapshots.full.accounts_db_fields.bank_hash_info.accounts_hash, - .capitalization = snapshots.full.bank_fields.capitalization, + .accounts_hash = full_inc_manifest.full.accounts_db_fields.bank_hash_info.accounts_hash, + .capitalization = full_inc_manifest.full.bank_fields.capitalization, }, - .expected_incremental = if (snapshots.incremental.?.bank_extra.snapshot_persistence) |inc_persistence| .{ + .expected_incremental = if (full_inc_manifest.incremental.?.bank_extra.snapshot_persistence) |inc_persistence| .{ .accounts_hash = inc_persistence.incremental_hash, .capitalization = inc_persistence.incremental_capitalization, } else null, @@ -3618,21 +3629,22 @@ test "load and validate from test snapshot parallel" { var tmp_dir_root = std.testing.tmpDir(.{}); defer tmp_dir_root.cleanup(); - const snapdir = tmp_dir_root.dir; + const snapshot_dir = tmp_dir_root.dir; - var accounts_db, var snapshots = try loadTestAccountsDB(allocator, false, 2, .noop, snapdir); + var accounts_db, const full_inc_manifest = + try loadTestAccountsDB(allocator, false, 2, .noop, snapshot_dir); defer { accounts_db.deinit(); - snapshots.deinit(allocator); + full_inc_manifest.deinit(allocator); } try accounts_db.validateLoadFromSnapshot(.{ - .full_slot = snapshots.full.bank_fields.slot, + .full_slot = full_inc_manifest.full.bank_fields.slot, .expected_full = .{ - .accounts_hash = snapshots.full.accounts_db_fields.bank_hash_info.accounts_hash, - .capitalization = snapshots.full.bank_fields.capitalization, + .accounts_hash = full_inc_manifest.full.accounts_db_fields.bank_hash_info.accounts_hash, + .capitalization = full_inc_manifest.full.bank_fields.capitalization, }, - .expected_incremental = if (snapshots.incremental.?.bank_extra.snapshot_persistence) |inc_persistence| .{ + .expected_incremental = if (full_inc_manifest.incremental.?.bank_extra.snapshot_persistence) |inc_persistence| .{ .accounts_hash = inc_persistence.incremental_hash, .capitalization = inc_persistence.incremental_capitalization, } else null, @@ -3644,16 +3656,17 @@ test "load clock sysvar" { var tmp_dir_root = std.testing.tmpDir(.{}); defer tmp_dir_root.cleanup(); - const snapdir = tmp_dir_root.dir; + const snapshot_dir = tmp_dir_root.dir; - var accounts_db, var snapshots = try loadTestAccountsDB(allocator, false, 1, .noop, snapdir); + var accounts_db, const full_inc_manifest = + try loadTestAccountsDB(allocator, false, 1, .noop, snapshot_dir); defer { accounts_db.deinit(); - snapshots.deinit(allocator); + full_inc_manifest.deinit(allocator); } - const full = snapshots.full; - const inc = snapshots.incremental; + const full = full_inc_manifest.full; + const inc = full_inc_manifest.incremental; const expected_clock: sysvars.Clock = .{ .slot = (inc orelse full).bank_fields.slot, .epoch_start_timestamp = 1733349736, @@ -3677,12 +3690,13 @@ test "load other sysvars" { var tmp_dir_root = std.testing.tmpDir(.{}); defer tmp_dir_root.cleanup(); - const snapdir = tmp_dir_root.dir; + const snapshot_dir = tmp_dir_root.dir; - var accounts_db, var snapshots = try loadTestAccountsDB(allocator, false, 1, .noop, snapdir); + var accounts_db, const full_inc_manifest = + try loadTestAccountsDB(allocator, false, 1, .noop, snapshot_dir); defer { accounts_db.deinit(); - snapshots.deinit(allocator); + full_inc_manifest.deinit(allocator); } const SlotAndHash = sig.accounts_db.snapshots.SlotAndHash; @@ -4258,16 +4272,13 @@ test "generate snapshot & update gossip snapshot hashes" { var tmp_dir_root = std.testing.tmpDir(.{}); defer tmp_dir_root.cleanup(); - const snapdir = tmp_dir_root.dir; - const snap_files = try findAndUnpackTestSnapshots(1, snapdir); + const snapshot_dir = tmp_dir_root.dir; - var all_snapshot_fields = try FullAndIncrementalManifest.fromFiles( - allocator, - .noop, - snapdir, - snap_files, - ); - defer all_snapshot_fields.deinit(allocator); + const snap_files = try findAndUnpackTestSnapshots(1, snapshot_dir); + + const full_inc_manifest = + try FullAndIncrementalManifest.fromFiles(allocator, .noop, snapshot_dir, snap_files); + defer full_inc_manifest.deinit(allocator); // mock gossip service const Queue = std.ArrayList(sig.gossip.GossipData); @@ -4278,7 +4289,7 @@ test "generate snapshot & update gossip snapshot hashes" { var accounts_db = try AccountsDB.init(.{ .allocator = allocator, .logger = .noop, - .snapshot_dir = snapdir, + .snapshot_dir = snapshot_dir, .gossip_view = .{ .my_pubkey = Pubkey.fromPublicKey(&my_keypair.public_key), .push_msg_queue = &push_msg_queue_mux, @@ -4290,25 +4301,37 @@ test "generate snapshot & update gossip snapshot hashes" { }); defer accounts_db.deinit(); - // pretend `all_snapshot_fields`/`snap_files` refers to `tmp_snap_dir`, even though the archive file isn't actually in there, just the unpacked contents. - // TODO: this is not nice, make sure the API for loading from archives outside of the snapshot dir is improved. - _ = try accounts_db.loadWithDefaults(allocator, &all_snapshot_fields, 1, true, 300, false, false); + (try accounts_db.loadWithDefaults( + allocator, + full_inc_manifest, + 1, + true, + 300, + false, + false, + )).deinit(allocator); var bank_fields = try BankFields.initRandom(allocator, random, 128); defer bank_fields.deinit(allocator); - const full_slot = all_snapshot_fields.full.accounts_db_fields.slot; + const full_slot = full_inc_manifest.full.accounts_db_fields.slot; const full_gen_result = try accounts_db.generateFullSnapshot(.{ .target_slot = full_slot, .bank_fields = &bank_fields, .lamports_per_signature = random.int(u64), .old_snapshot_action = .ignore_old, // make sure we don't delete anything in `sig.TEST_DATA_DIR` - .deprecated_stored_meta_write_version = all_snapshot_fields.full.accounts_db_fields.stored_meta_write_version, + .deprecated_stored_meta_write_version = full_inc_manifest.full.accounts_db_fields.stored_meta_write_version, }); const full_hash = full_gen_result.hash; - try std.testing.expectEqual(all_snapshot_fields.full.accounts_db_fields.bank_hash_info.accounts_hash, full_gen_result.hash); - try std.testing.expectEqual(all_snapshot_fields.full.bank_fields.capitalization, full_gen_result.capitalization); + try std.testing.expectEqual( + full_inc_manifest.full.accounts_db_fields.bank_hash_info.accounts_hash, + full_gen_result.hash, + ); + try std.testing.expectEqual( + full_inc_manifest.full.bank_fields.capitalization, + full_gen_result.capitalization, + ); { const queue, var queue_lg = push_msg_queue_mux.readWithLock(); @@ -4329,18 +4352,19 @@ test "generate snapshot & update gossip snapshot hashes" { ); } - if (all_snapshot_fields.incremental) |inc_snapshot_fields| { - const inc_slot = inc_snapshot_fields.accounts_db_fields.slot; + if (full_inc_manifest.incremental) |inc_manifest| { + const inc_slot = inc_manifest.accounts_db_fields.slot; const inc_gen_result = try accounts_db.generateIncrementalSnapshot(.{ .target_slot = inc_slot, .bank_fields = &bank_fields, .lamports_per_signature = random.int(u64), .old_snapshot_action = .ignore_old, // make sure we don't delete anything in `sig.TEST_DATA_DIR` - .deprecated_stored_meta_write_version = all_snapshot_fields.incremental.?.accounts_db_fields.stored_meta_write_version, + .deprecated_stored_meta_write_version = inc_manifest + .accounts_db_fields.stored_meta_write_version, }); const inc_hash = inc_gen_result.incremental_hash; - try std.testing.expectEqual(inc_snapshot_fields.bank_extra.snapshot_persistence, inc_gen_result); + try std.testing.expectEqual(inc_manifest.bank_extra.snapshot_persistence, inc_gen_result); try std.testing.expectEqual(full_slot, inc_gen_result.full_slot); try std.testing.expectEqual(full_gen_result.hash, inc_gen_result.full_hash); try std.testing.expectEqual(full_gen_result.capitalization, inc_gen_result.full_capitalization); @@ -4434,9 +4458,9 @@ pub const BenchmarkAccountsDBSnapshotLoad = struct { } else return error.SnapshotMissingAccountsDir; defer accounts_dir.close(); - var snapshots = try FullAndIncrementalManifest.fromFiles(allocator, logger, snapshot_dir, snapshot_files); - defer snapshots.deinit(allocator); - const snapshot = try snapshots.collapse(allocator); + const full_inc_manifest = try FullAndIncrementalManifest.fromFiles(allocator, logger, snapshot_dir, snapshot_files); + defer full_inc_manifest.deinit(allocator); + const collapsed_manifest = try full_inc_manifest.collapse(allocator); var accounts_db = try AccountsDB.init(.{ .allocator = allocator, @@ -4451,21 +4475,21 @@ pub const BenchmarkAccountsDBSnapshotLoad = struct { defer accounts_db.deinit(); const loading_duration = try accounts_db.loadFromSnapshot( - snapshot.accounts_db_fields, + collapsed_manifest.accounts_db_fields, bench_args.n_threads, allocator, 500, ); - const full_snapshot = snapshots.full; + const full_snapshot = full_inc_manifest.full; var validate_timer = try sig.time.Timer.start(); try accounts_db.validateLoadFromSnapshot(.{ .full_slot = full_snapshot.bank_fields.slot, .expected_full = .{ - .accounts_hash = snapshot.accounts_db_fields.bank_hash_info.accounts_hash, + .accounts_hash = collapsed_manifest.accounts_db_fields.bank_hash_info.accounts_hash, .capitalization = full_snapshot.bank_fields.capitalization, }, - .expected_incremental = if (snapshot.bank_extra.snapshot_persistence) |inc_persistence| .{ + .expected_incremental = if (collapsed_manifest.bank_extra.snapshot_persistence) |inc_persistence| .{ .accounts_hash = inc_persistence.incremental_hash, .capitalization = inc_persistence.incremental_capitalization, } else null, diff --git a/src/accountsdb/fuzz.zig b/src/accountsdb/fuzz.zig index d32553079..150206f66 100644 --- a/src/accountsdb/fuzz.zig +++ b/src/accountsdb/fuzz.zig @@ -303,13 +303,13 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { maybe_incremental_file_info, ); - var snapshot_fields = try sig.accounts_db.FullAndIncrementalManifest.fromFiles( + const combined_manifest = try sig.accounts_db.FullAndIncrementalManifest.fromFiles( allocator, logger, alternative_snapshot_dir, snapshot_files, ); - defer snapshot_fields.deinit(allocator); + defer combined_manifest.deinit(allocator); var alt_accounts_db = try AccountsDB.init(.{ .allocator = allocator, @@ -323,17 +323,21 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { }); defer alt_accounts_db.deinit(); - _ = try alt_accounts_db.loadWithDefaults( + (try alt_accounts_db.loadWithDefaults( allocator, - &snapshot_fields, + combined_manifest, 1, true, N_ACCOUNTS_PER_SLOT, false, false, - ); + )).deinit(allocator); + const maybe_inc_slot = if (snapshot_info.inc) |inc| inc.slot else null; - logger.info().logf("loaded and validated snapshot at slot: {} (and inc snapshot @ slot {any})", .{ full_snapshot_info.slot, maybe_inc_slot }); + logger.info().logf( + "loaded and validated snapshot at slot: {} (and inc snapshot @ slot {any})", + .{ full_snapshot_info.slot, maybe_inc_slot }, + ); } } diff --git a/src/accountsdb/fuzz_snapshot.zig b/src/accountsdb/fuzz_snapshot.zig index 84b1b8720..9a4183dba 100644 --- a/src/accountsdb/fuzz_snapshot.zig +++ b/src/accountsdb/fuzz_snapshot.zig @@ -44,13 +44,13 @@ pub fn run(args: *std.process.ArgIterator) !void { while (timer.read() < MAX_FUZZ_TIME_NS) : (i += 1) { bytes_buffer.clearRetainingCapacity(); - const snapshot_original: SnapshotManifest = try randomSnapshotFields(allocator, random); - defer snapshot_original.deinit(allocator); + const manifest_original: SnapshotManifest = try randomSnapshotManifest(allocator, random); + defer manifest_original.deinit(allocator); - try bytes_buffer.ensureUnusedCapacity(bincode.sizeOf(snapshot_original, .{}) * 2); + try bytes_buffer.ensureUnusedCapacity(bincode.sizeOf(manifest_original, .{}) * 2); const original_bytes_start = bytes_buffer.items.len; - try bincode.write(bytes_buffer.writer(), snapshot_original, .{}); + try bincode.write(bytes_buffer.writer(), manifest_original, .{}); const original_bytes_end = bytes_buffer.items.len; const snapshot_deserialized = try bincode.readFromSlice(allocator, SnapshotManifest, bytes_buffer.items[original_bytes_start..original_bytes_end], .{}); @@ -69,7 +69,7 @@ pub fn run(args: *std.process.ArgIterator) !void { const max_list_entries = 1 << 8; -fn randomSnapshotFields( +fn randomSnapshotManifest( allocator: std.mem.Allocator, /// Should be a PRNG, not a true RNG. See the documentation on `std.Random.uintLessThan` /// for commentary on the runtime of this function. diff --git a/src/accountsdb/readme.md b/src/accountsdb/readme.md index 0905b3f27..3c6471198 100644 --- a/src/accountsdb/readme.md +++ b/src/accountsdb/readme.md @@ -331,7 +331,7 @@ The core logic for generating a snapshot lives in `accounts_db.db.writeSnapshotT The procedure consists of writing the version file, the status cache (`snapshots/status_cache`) file, the snapshot manifest (`snapshots/{SLOT}/{SLOT}`), and the account files (`accounts/{SLOT}.{FILE_ID}`). This is all written to a stream in the TAR archive format. -The snapshot manifest file content is comprised of the bincoded (bincode-encoded) data structure `SnapshotFields`, which is an aggregate of: +The snapshot manifest file content is comprised of the bincoded (bincode-encoded) data structure `Manifest`, which is an aggregate of: * implicit state: data derived from the current state of AccountsDB, like the file map for all the account which exist at that snapshot, or which have changed relative to a full snapshot in an incremental one * configuration state: data that is used to communicate details about the snapshot, like the full slot to which an incremental snapshot is relative. diff --git a/src/accountsdb/snapshots.zig b/src/accountsdb/snapshots.zig index 94ff059ec..b6dc4bd70 100644 --- a/src/accountsdb/snapshots.zig +++ b/src/accountsdb/snapshots.zig @@ -2599,7 +2599,6 @@ pub const SnapshotFiles = struct { pub const FullAndIncrementalManifest = struct { full: Manifest, incremental: ?Manifest, - was_collapsed: bool = false, // used for deinit() pub fn fromFiles( allocator: std.mem.Allocator, @@ -2644,78 +2643,104 @@ pub const FullAndIncrementalManifest = struct { }; } - /// collapse all full and incremental snapshots into one. - /// note: this works by stack copying the full snapshot and combining - /// the accounts-db account file map. - /// this will 1) modify the incremental snapshot account map - /// and 2) the returned snapshot heap fields will still point to the incremental snapshot - /// (so be sure not to deinit it while still using the returned snapshot) + pub const CollapseError = error{ + /// There are storages for the same slot in both the full and incremental snapshot. + SnapshotSlotOverlap, + }; + + /// Like `collapseIfNecessary`, but returns a clone of the full snapshot + /// manifest if there is no incremental update to apply. + /// The caller is responsible for `.deinit`ing the result with `allocator`. pub fn collapse( - self: *FullAndIncrementalManifest, - /// Should be the same allocator passed to `fromFiles`, or otherwise to allocate `Self`. + self: FullAndIncrementalManifest, allocator: std.mem.Allocator, - ) !Manifest { - // nothing to collapse - if (self.incremental == null) - return self.full; - self.was_collapsed = true; - - // collapse bank fields into the - // incremental =pushed into=> full - var snapshot = self.incremental.?; // stack copy - const full_slot = self.full.bank_fields.slot; - - // collapse accounts-db fields - const storages_map = &self.incremental.?.accounts_db_fields.file_map; - - // TODO: use a better allocator - var slots_to_remove = std.ArrayList(Slot).init(allocator); - defer slots_to_remove.deinit(); - - // make sure theres no overlap in slots between full and incremental and combine - var storages_entry_iter = storages_map.iterator(); - while (storages_entry_iter.next()) |incremental_entry| { - const slot = incremental_entry.key_ptr.*; - - // only keep slots > full snapshot slot - if (!(slot > full_slot)) { - try slots_to_remove.append(slot); - continue; - } + ) (std.mem.Allocator.Error || CollapseError)!Manifest { + const maybe_collapsed = try self.collapseIfNecessary(allocator); + return maybe_collapsed orelse try self.full.clone(allocator); + } + + /// Returns null if there is no incremental snapshot manifest; otherwise + /// returns the result of overlaying the updates of the incremental + /// onto the full snapshot manifest. + /// The caller is responsible for `.deinit`ing the result with `allocator` + /// if it is non-null. + pub fn collapseIfNecessary( + self: FullAndIncrementalManifest, + allocator: std.mem.Allocator, + ) (std.mem.Allocator.Error || CollapseError)!?Manifest { + const full = self.full; + const incremental = self.incremental orelse return null; - const slot_entry = try self.full.accounts_db_fields.file_map.getOrPut(allocator, slot); - if (slot_entry.found_existing) { - std.debug.panic("invalid incremental snapshot: slot {d} is in both full and incremental snapshots\n", .{slot}); - } else { - slot_entry.value_ptr.* = incremental_entry.value_ptr.*; - } - } + // make a heap clone of the incremental manifest's more up-to-date + // data, except with the file map of the full manifest, which is + // likely to contain a larger amount of entries; can then overlay + // the relevant entries from the incremental manifest onto the + // clone of the full manifest. - for (slots_to_remove.items) |slot| { - _ = storages_map.swapRemove(slot); - } + var collapsed = incremental; + collapsed.accounts_db_fields.file_map = full.accounts_db_fields.file_map; + + collapsed = try collapsed.clone(allocator); + errdefer collapsed.deinit(allocator); + + const collapsed_file_map = &collapsed.accounts_db_fields.file_map; + try collapsed_file_map.ensureUnusedCapacity( + allocator, + incremental.accounts_db_fields.file_map.count(), + ); - snapshot.accounts_db_fields = self.full.accounts_db_fields; + const inc_file_map = &incremental.accounts_db_fields.file_map; + for (inc_file_map.keys(), inc_file_map.values()) |slot, account_file_info| { + if (slot <= full.accounts_db_fields.slot) continue; + const gop = collapsed_file_map.getOrPutAssumeCapacity(slot); + if (gop.found_existing) return error.SnapshotSlotOverlap; + gop.value_ptr.* = account_file_info; + } - return snapshot; + return collapsed; } - pub fn deinit(self: *FullAndIncrementalManifest, allocator: std.mem.Allocator) void { + pub fn deinit(self: FullAndIncrementalManifest, allocator: std.mem.Allocator) void { self.full.deinit(allocator); - if (self.incremental) |*inc| { - if (!self.was_collapsed) { - inc.deinit(allocator); - } else { - inc.accounts_db_fields.file_map.deinit(allocator); - inc.bank_fields.deinit(allocator); - allocator.free(inc.accounts_db_fields.rooted_slots); - allocator.free(inc.accounts_db_fields.rooted_slot_hashes); - inc.bank_extra.deinit(allocator); - } + if (self.incremental) |inc| { + inc.deinit(allocator); } } }; +test "checkAllAllocationFailures FullAndIncrementalManifest" { + const local = struct { + fn parseFiles( + allocator: std.mem.Allocator, + snapdir: std.fs.Dir, + snapshot_files: SnapshotFiles, + ) !void { + const combined_manifest = try FullAndIncrementalManifest.fromFiles( + allocator, + .noop, + snapdir, + snapshot_files, + ); + defer combined_manifest.deinit(allocator); + + const collapsed_manifest = try combined_manifest.collapse(allocator); + defer collapsed_manifest.deinit(allocator); + } + }; + + var tmp_dir_root = std.testing.tmpDir(.{}); + defer tmp_dir_root.cleanup(); + const snapdir = tmp_dir_root.dir; + + const snapshot_files = try sig.accounts_db.db.findAndUnpackTestSnapshots(1, snapdir); + + try std.testing.checkAllAllocationFailures( + std.testing.allocator, + local.parseFiles, + .{ snapdir, snapshot_files }, + ); +} + pub const generate = struct { /// Writes the version, status cache, and manifest files. /// Should call this first to begin generating the snapshot archive. @@ -2874,8 +2899,8 @@ test "parse snapshot fields" { const full_manifest_file = try snapdir.openFile(full_manifest_path, .{}); defer full_manifest_file.close(); - const snapshot_fields_full = try Manifest.readFromFile(allocator, full_manifest_file); - defer snapshot_fields_full.deinit(allocator); + const full_manifest = try Manifest.readFromFile(allocator, full_manifest_file); + defer full_manifest.deinit(allocator); if (snapshot_files.incremental_info) |inc| { const inc_slot = inc.slot; @@ -2885,7 +2910,7 @@ test "parse snapshot fields" { const inc_manifest_file = try snapdir.openFile(inc_manifest_path, .{}); defer inc_manifest_file.close(); - const snapshot_fields_inc = try Manifest.readFromFile(allocator, inc_manifest_file); - defer snapshot_fields_inc.deinit(allocator); + const inc_manifest = try Manifest.readFromFile(allocator, inc_manifest_file); + defer inc_manifest.deinit(allocator); } } diff --git a/src/bincode/hashmap.zig b/src/bincode/hashmap.zig index e335c9f1a..db81cf543 100644 --- a/src/bincode/hashmap.zig +++ b/src/bincode/hashmap.zig @@ -101,11 +101,10 @@ pub fn readCtx( const key = try ctx_impl.readKey(allocator, reader, params); errdefer ctx_impl.freeKey(allocator, key); - const gop = hash_map.getOrPutAssumeCapacity(key); - if (gop.found_existing) return error.DuplicateFileMapEntry; + if (hash_map.contains(key)) return error.DuplicateFileMapEntry; const value = try ctx_impl.readValue(allocator, reader, params); - gop.value_ptr.* = value; + hash_map.putAssumeCapacityNoClobber(key, value); } return switch (hm_info.management) { diff --git a/src/cmd/cmd.zig b/src/cmd/cmd.zig index 43a87e7ee..f238ea439 100644 --- a/src/cmd/cmd.zig +++ b/src/cmd/cmd.zig @@ -750,19 +750,21 @@ fn validator() !void { } // snapshot - const snapshot = try loadSnapshot(allocator, app_base.logger.unscoped(), .{ + var loaded_snapshot = try loadSnapshot(allocator, app_base.logger.unscoped(), .{ .gossip_service = gossip_service, .geyser_writer = geyser_writer, .validate_snapshot = true, }); + defer loaded_snapshot.deinit(); // leader schedule - var leader_schedule_cache = LeaderScheduleCache.init(allocator, snapshot.bank.bank_fields.epoch_schedule); + var leader_schedule_cache = LeaderScheduleCache.init(allocator, loaded_snapshot.collapsed_manifest.bank_fields.epoch_schedule); if (try getLeaderScheduleFromCli(allocator)) |leader_schedule| { - try leader_schedule_cache.put(snapshot.bank.bank_fields.epoch, leader_schedule[1]); + try leader_schedule_cache.put(loaded_snapshot.collapsed_manifest.bank_fields.epoch, leader_schedule[1]); } else { - const schedule = try snapshot.bank.bank_fields.leaderSchedule(allocator); - try leader_schedule_cache.put(snapshot.bank.bank_fields.epoch, schedule); + const schedule = try loaded_snapshot.collapsed_manifest.bank_fields.leaderSchedule(allocator); + errdefer schedule.deinit(); + try leader_schedule_cache.put(loaded_snapshot.collapsed_manifest.bank_fields.epoch, schedule); } // This provider will fail at epoch boundary unless another thread updated the leader schedule cache // i.e. called leader_schedule_cache.getSlotLeaderMaybeCompute(slot, bank_fields); @@ -817,7 +819,7 @@ fn validator() !void { // shred networking const my_contact_info = sig.gossip.data.ThreadSafeContactInfo.fromContactInfo(gossip_service.my_contact_info); var shred_col_conf = config.current.shred_network; - shred_col_conf.start_slot = shred_col_conf.start_slot orelse snapshot.bank.bank_fields.slot; + shred_col_conf.start_slot = shred_col_conf.start_slot orelse loaded_snapshot.collapsed_manifest.bank_fields.slot; var shred_network_manager = try sig.shred_network.start( shred_col_conf, ShredCollectorDependencies{ @@ -835,7 +837,7 @@ fn validator() !void { .n_retransmit_threads = config.current.turbine.num_retransmit_threads, .overwrite_turbine_stake_for_testing = config.current.turbine.overwrite_stake_for_testing, .leader_schedule_cache = &leader_schedule_cache, - .bank_fields = snapshot.bank.bank_fields, + .bank_fields = &loaded_snapshot.collapsed_manifest.bank_fields, }, ); defer shred_network_manager.deinit(); @@ -865,20 +867,22 @@ fn shredCollector() !void { allocator.destroy(gossip_service); } - const snapshot = try loadSnapshot(allocator, app_base.logger.unscoped(), .{ + var loaded_snapshot = try loadSnapshot(allocator, app_base.logger.unscoped(), .{ .gossip_service = gossip_service, .geyser_writer = null, .validate_snapshot = true, .metadata_only = config.current.accounts_db.snapshot_metadata_only, }); + defer loaded_snapshot.deinit(); // leader schedule - var leader_schedule_cache = LeaderScheduleCache.init(allocator, snapshot.bank.bank_fields.epoch_schedule); + var leader_schedule_cache = LeaderScheduleCache.init(allocator, loaded_snapshot.collapsed_manifest.bank_fields.epoch_schedule); if (try getLeaderScheduleFromCli(allocator)) |leader_schedule| { - try leader_schedule_cache.put(snapshot.bank.bank_fields.epoch, leader_schedule[1]); + try leader_schedule_cache.put(loaded_snapshot.collapsed_manifest.bank_fields.epoch, leader_schedule[1]); } else { - const schedule = try snapshot.bank.bank_fields.leaderSchedule(allocator); - try leader_schedule_cache.put(snapshot.bank.bank_fields.epoch, schedule); + const schedule = try loaded_snapshot.collapsed_manifest.bank_fields.leaderSchedule(allocator); + errdefer schedule.deinit(); + try leader_schedule_cache.put(loaded_snapshot.collapsed_manifest.bank_fields.epoch, schedule); } // This provider will fail at epoch boundary unless another thread updated the leader schedule cache // i.e. called leader_schedule_cache.getSlotLeaderMaybeCompute(slot, bank_fields); @@ -951,7 +955,7 @@ fn shredCollector() !void { .n_retransmit_threads = config.current.turbine.num_retransmit_threads, .overwrite_turbine_stake_for_testing = config.current.turbine.overwrite_stake_for_testing, .leader_schedule_cache = &leader_schedule_cache, - .bank_fields = snapshot.bank.bank_fields, + .bank_fields = &loaded_snapshot.collapsed_manifest.bank_fields, }, ); defer shred_network_manager.deinit(); @@ -1000,16 +1004,16 @@ fn createSnapshot() !void { var snapshot_dir = try std.fs.cwd().makeOpenPath(snapshot_dir_str, .{}); defer snapshot_dir.close(); - const snapshot_result = try loadSnapshot(allocator, app_base.logger.unscoped(), .{ + var loaded_snapshot = try loadSnapshot(allocator, app_base.logger.unscoped(), .{ .gossip_service = null, .geyser_writer = null, .validate_snapshot = false, .metadata_only = false, }); - defer snapshot_result.deinit(); + defer loaded_snapshot.deinit(); - var accounts_db = snapshot_result.accounts_db; - const slot = snapshot_result.snapshot_fields.full.bank_fields.slot; + var accounts_db = loaded_snapshot.accounts_db; + const slot = loaded_snapshot.combined_manifest.full.bank_fields.slot; var n_accounts_indexed: u64 = 0; for (accounts_db.account_index.pubkey_ref_map.shards) |*shard_rw| { @@ -1026,7 +1030,7 @@ fn createSnapshot() !void { app_base.logger.info().logf("accountsdb[manager]: generating full snapshot for slot {d}", .{slot}); _ = try accounts_db.generateFullSnapshot(.{ .target_slot = slot, - .bank_fields = &snapshot_result.snapshot_fields.full.bank_fields, + .bank_fields = &loaded_snapshot.combined_manifest.full.bank_fields, .lamports_per_signature = lps: { var prng = std.Random.DefaultPrng.init(1234); break :lps prng.random().int(u64); @@ -1061,13 +1065,13 @@ fn validateSnapshot() !void { } } - const snapshot_result = try loadSnapshot(allocator, app_base.logger.unscoped(), .{ + var loaded_snapshot = try loadSnapshot(allocator, app_base.logger.unscoped(), .{ .gossip_service = null, .geyser_writer = geyser_writer, .validate_snapshot = true, .metadata_only = false, }); - defer snapshot_result.deinit(); + defer loaded_snapshot.deinit(); } /// entrypoint to print the leader schedule and then exit @@ -1081,7 +1085,8 @@ fn printLeaderSchedule() !void { const start_slot, const leader_schedule = try getLeaderScheduleFromCli(allocator) orelse b: { app_base.logger.info().log("Downloading a snapshot to calculate the leader schedule."); - const loaded_snapshot = loadSnapshot(allocator, app_base.logger.unscoped(), .{ + + var loaded_snapshot = loadSnapshot(allocator, app_base.logger.unscoped(), .{ .gossip_service = null, .geyser_writer = null, .validate_snapshot = true, @@ -1092,12 +1097,12 @@ fn printLeaderSchedule() !void { \\\ No snapshot found and no gossip service to download a snapshot from. \\\ Download using the `snapshot-download` command. ); - return err; - } else { - return err; } + return err; }; - const bank_fields = loaded_snapshot.bank.bank_fields; + defer loaded_snapshot.deinit(); + + const bank_fields = &loaded_snapshot.collapsed_manifest.bank_fields; _, const slot_index = bank_fields.epoch_schedule.getEpochAndSlotIndex(bank_fields.slot); break :b .{ bank_fields.slot - slot_index, @@ -1318,22 +1323,19 @@ fn spawnLogger(allocator: std.mem.Allocator) !Logger { const LoadedSnapshot = struct { allocator: Allocator, accounts_db: AccountsDB, - status_cache: sig.accounts_db.snapshots.StatusCache, - snapshot_fields: sig.accounts_db.snapshots.FullAndIncrementalManifest, - /// contains pointers to `accounts_db` and `snapshot_fields` - bank: Bank, + combined_manifest: sig.accounts_db.snapshots.FullAndIncrementalManifest, + collapsed_manifest: sig.accounts_db.snapshots.Manifest, genesis_config: GenesisConfig, - // Snapshot resulting from collapse needs to be retained here for - // valid lifetime as it is used by bank. This was a quick fix, a minor - // refactor is probably not a bad idea. - collapsed_snapshot_fields: sig.accounts_db.snapshots.Manifest, + status_cache: ?sig.accounts_db.snapshots.StatusCache, pub fn deinit(self: *@This()) void { - self.genesis_config.deinit(self.allocator); - self.status_cache.deinit(self.allocator); - self.snapshot_fields.deinit(self.allocator); self.accounts_db.deinit(); - self.allocator.destroy(self); + self.combined_manifest.deinit(self.allocator); + self.collapsed_manifest.deinit(self.allocator); + self.genesis_config.deinit(self.allocator); + if (self.status_cache) |status_cache| { + status_cache.deinit(self.allocator); + } } }; @@ -1350,13 +1352,10 @@ const LoadSnapshotOptions = struct { fn loadSnapshot( allocator: Allocator, - logger_: Logger, + unscoped_logger: Logger, options: LoadSnapshotOptions, -) !*LoadedSnapshot { - const logger = logger_.withScope(@typeName(@This())); - const result = try allocator.create(LoadedSnapshot); - errdefer allocator.destroy(result); - result.allocator = allocator; +) !LoadedSnapshot { + const logger = unscoped_logger.withScope(@typeName(@This()) ++ "." ++ @src().fn_name); const genesis_file_path = try config.current.genesisFilePath() orelse return error.GenesisPathNotProvided; @@ -1365,14 +1364,14 @@ fn loadSnapshot( var snapshot_dir = try std.fs.cwd().makeOpenPath(snapshot_dir_str, .{ .iterate = true }); defer snapshot_dir.close(); - var all_snapshot_fields, const snapshot_files = try getOrDownloadSnapshots(allocator, logger.unscoped(), options.gossip_service, .{ + const combined_manifest, const snapshot_files = try getOrDownloadSnapshots(allocator, logger.unscoped(), options.gossip_service, .{ .snapshot_dir = snapshot_dir, .force_unpack_snapshot = config.current.accounts_db.force_unpack_snapshot, .force_new_snapshot_download = config.current.accounts_db.force_new_snapshot_download, .num_threads_snapshot_unpack = config.current.accounts_db.num_threads_snapshot_unpack, .min_snapshot_download_speed_mbs = config.current.accounts_db.min_snapshot_download_speed_mbs, }); - result.snapshot_fields = all_snapshot_fields; + errdefer combined_manifest.deinit(allocator); logger.info().logf("full snapshot: {s}", .{ sig.utils.fmt.tryRealPath(snapshot_dir, snapshot_files.full.snapshotArchiveName().constSlice()), @@ -1395,7 +1394,7 @@ fn loadSnapshot( }; logger.info().logf("n_threads_snapshot_load: {d}", .{n_threads_snapshot_load}); - result.accounts_db = try AccountsDB.init(.{ + var accounts_db = try AccountsDB.init(.{ .allocator = allocator, .logger = logger.unscoped(), .snapshot_dir = snapshot_dir, @@ -1405,45 +1404,51 @@ fn loadSnapshot( .number_of_index_shards = config.current.accounts_db.number_of_index_shards, .lru_size = 10_000, }); - errdefer result.accounts_db.deinit(); + errdefer accounts_db.deinit(); - if (options.metadata_only) { - result.collapsed_snapshot_fields = try result.snapshot_fields.collapse(allocator); - } else { - result.collapsed_snapshot_fields = try result.accounts_db.loadWithDefaults( + const collapsed_manifest = if (options.metadata_only) + try combined_manifest.collapse(allocator) + else + try accounts_db.loadWithDefaults( allocator, - &all_snapshot_fields, + combined_manifest, n_threads_snapshot_load, options.validate_snapshot, config.current.accounts_db.accounts_per_file_estimate, config.current.accounts_db.fastload, config.current.accounts_db.save_index, ); - } - errdefer result.collapsed_snapshot_fields.deinit(allocator); - - const bank_fields = &result.collapsed_snapshot_fields.bank_fields; + errdefer collapsed_manifest.deinit(allocator); // this should exist before we start to unpack logger.info().log("reading genesis..."); - result.genesis_config = GenesisConfig.init(allocator, genesis_file_path) catch |err| { + + const genesis_config = GenesisConfig.init(allocator, genesis_file_path) catch |err| { if (err == error.FileNotFound) { logger.err().logf("genesis config not found - expecting {s} to exist", .{genesis_file_path}); } return err; }; - errdefer result.genesis_config.deinit(allocator); + errdefer genesis_config.deinit(allocator); logger.info().log("validating bank..."); - result.bank = Bank.init(&result.accounts_db, bank_fields); - try Bank.validateBankFields(result.bank.bank_fields, &result.genesis_config); + + try Bank.validateBankFields(&collapsed_manifest.bank_fields, &genesis_config); if (options.metadata_only) { - return result; + logger.info().log("accounts-db setup done..."); + return .{ + .allocator = allocator, + .accounts_db = accounts_db, + .combined_manifest = combined_manifest, + .collapsed_manifest = collapsed_manifest, + .genesis_config = genesis_config, + .status_cache = null, + }; } // validate the status cache - result.status_cache = StatusCache.initFromDir(allocator, snapshot_dir) catch |err| { + const status_cache = StatusCache.initFromDir(allocator, snapshot_dir) catch |err| { if (err == error.FileNotFound) { logger.err().logf( "status_cache not found - expecting {s}/snapshots/status_cache to exist", @@ -1452,15 +1457,23 @@ fn loadSnapshot( } return err; }; - errdefer result.status_cache.deinit(allocator); + errdefer status_cache.deinit(allocator); - var slot_history = try result.accounts_db.getSlotHistory(allocator); + const slot_history = try accounts_db.getSlotHistory(allocator); defer slot_history.deinit(allocator); - try result.status_cache.validate(allocator, bank_fields.slot, &slot_history); + + try status_cache.validate(allocator, collapsed_manifest.bank_fields.slot, &slot_history); logger.info().log("accounts-db setup done..."); - return result; + return .{ + .allocator = allocator, + .accounts_db = accounts_db, + .combined_manifest = combined_manifest, + .collapsed_manifest = collapsed_manifest, + .genesis_config = genesis_config, + .status_cache = status_cache, + }; } /// entrypoint to download snapshot diff --git a/src/rpc/server.zig b/src/rpc/server.zig index 287a5dc9a..c320e3900 100644 --- a/src/rpc/server.zig +++ b/src/rpc/server.zig @@ -436,15 +436,6 @@ test Server { try unpack(allocator, logger, inc_snap_file, snap_dir, 1, false); } - const FullAndIncrementalManifest = sig.accounts_db.snapshots.FullAndIncrementalManifest; - var all_snap_fields = try FullAndIncrementalManifest.fromFiles( - allocator, - logger, - snap_dir, - snap_files, - ); - defer all_snap_fields.deinit(allocator); - var accountsdb = try sig.accounts_db.AccountsDB.init(.{ .allocator = allocator, .logger = logger, @@ -456,7 +447,27 @@ test Server { .lru_size = null, }); defer accountsdb.deinit(); - _ = try accountsdb.loadWithDefaults(allocator, &all_snap_fields, 1, true, 300, false, false); + + { + const FullAndIncrementalManifest = sig.accounts_db.snapshots.FullAndIncrementalManifest; + const all_snap_fields = try FullAndIncrementalManifest.fromFiles( + allocator, + logger, + snap_dir, + snap_files, + ); + defer all_snap_fields.deinit(allocator); + + (try accountsdb.loadWithDefaults( + allocator, + all_snap_fields, + 1, + true, + 300, + false, + false, + )).deinit(allocator); + } var thread_pool = sig.sync.ThreadPool.init(.{ .max_threads = 1 }); defer {