Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(ledger): fuzz testing #452

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions build.zig
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,11 @@ pub fn build(b: *Build) void {
fuzz_exe.root_module.addImport("zig-network", zig_network_module);
fuzz_exe.root_module.addImport("httpz", httpz_mod);
fuzz_exe.root_module.addImport("zstd", zstd_mod);
fuzz_exe.root_module.addOptions("build-options", build_options);
switch (blockstore_db) {
.rocksdb => fuzz_exe.root_module.addImport("rocksdb", rocksdb_mod),
.hashmap => {},
}
fuzz_exe.linkLibC();

const fuzz_exe_run = b.addRunArtifact(fuzz_exe);
Expand Down
3 changes: 3 additions & 0 deletions src/fuzz.zig
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ const accountsdb_fuzz = sig.accounts_db.fuzz;
const gossip_fuzz_service = sig.gossip.fuzz_service;
const gossip_fuzz_table = sig.gossip.fuzz_table;
const accountsdb_snapshot_fuzz = sig.accounts_db.fuzz_snapshot;
const ledger_rocksdb_fuzz = sig.ledger.fuzz_rocksdb;
const StandardErrLogger = sig.trace.ChannelPrintLogger;
const Level = sig.trace.Level;

Expand All @@ -22,6 +23,7 @@ pub const FuzzFilter = enum {
gossip_service,
gossip_table,
allocators,
ledger_rocksdb,
};

pub fn main() !void {
Expand Down Expand Up @@ -86,6 +88,7 @@ pub fn main() !void {
.snapshot => try accountsdb_snapshot_fuzz.run(&cli_args),
.gossip_service => try gossip_fuzz_service.run(seed, &cli_args),
.gossip_table => try gossip_fuzz_table.run(seed, &cli_args),
.ledger_rocksdb => try ledger_rocksdb_fuzz.run(seed, &cli_args),
.allocators => try sig.utils.allocators.runFuzzer(seed, &cli_args),
}
}
Expand Down
324 changes: 324 additions & 0 deletions src/ledger/fuzz.zig
Original file line number Diff line number Diff line change
@@ -0,0 +1,324 @@
const std = @import("std");
const sig = @import("../sig.zig");

const ColumnFamily = sig.ledger.database.ColumnFamily;
const AtomicU64 = std.atomic.Value(u64);

var total_action_count: AtomicU64 = AtomicU64.init(0);

const allocator = std.heap.c_allocator;

const Data = struct {
value: []const u8,
};

const cf1 = ColumnFamily{
.name = "data",
.Key = u64,
.Value = Data,
};
const RocksDb = sig.ledger.database.RocksDB(&.{cf1});

pub fn run(seed: u64, args: *std.process.ArgIterator) !void {
const maybe_max_actions_string = args.next();
const maybe_max_actions = blk: {
if (maybe_max_actions_string) |max_actions_str| {
break :blk try std.fmt.parseInt(usize, max_actions_str, 10);
} else {
break :blk null;
}
};
defer {
_ = total_action_count.fetchAdd(1, .monotonic);
}

// NOTE: change to trace for full logs
var std_logger = sig.trace.DirectPrintLogger.init(
allocator,
.debug,
);
const logger = std_logger.logger();

var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();

const rocksdb_path =
try std.fmt.allocPrint(allocator, "{s}/ledger/rocksdb", .{sig.FUZZ_DATA_DIR});

// ensure we start with a clean slate.
if (std.fs.cwd().access(rocksdb_path, .{})) |_| {
try std.fs.cwd().deleteTree(rocksdb_path);
} else |_| {}
try std.fs.cwd().makePath(rocksdb_path);

var db: RocksDb = try RocksDb.open(
allocator,
logger,
rocksdb_path,
);

defer db.deinit();

{
var db_put_thread = try std.Thread.spawn(
.{},
dbPut,
.{ &db, &random, &total_action_count, maybe_max_actions },
);
defer db_put_thread.join();

var db_delete_thread = try std.Thread.spawn(
.{},
dbDelete,
.{ &db, &random, &total_action_count, maybe_max_actions },
);
defer db_delete_thread.join();

var db_delete_files_in_range = try std.Thread.spawn(
.{},
dbDeleteFilesInRange,
.{ &db, &random, &total_action_count, maybe_max_actions },
);
defer db_delete_files_in_range.join();

var db_get_bytes_thread = try std.Thread.spawn(
.{},
dbGetBytes,
.{ &db, &random, &total_action_count, maybe_max_actions },
);
defer db_get_bytes_thread.join();

var db_get_thread = try std.Thread.spawn(
.{},
dbGet,
.{ &db, &random, &total_action_count, maybe_max_actions },
);
defer db_get_thread.join();

var db_count_thread = try std.Thread.spawn(
.{},
dbCount,
.{ &db, &total_action_count, maybe_max_actions },
);
defer db_count_thread.join();

var db_contains_thread = try std.Thread.spawn(
.{},
dbContains,
.{ &db, &random, &total_action_count, maybe_max_actions },
);
defer db_contains_thread.join();

// Batch API
var batch_delete_range_thread = try std.Thread.spawn(
.{},
batchDeleteRange,
.{ &db, &random, &total_action_count, maybe_max_actions },
);
defer batch_delete_range_thread.join();
}
}

fn performDbAction(
action_name: []const u8,
comptime func: anytype,
args: anytype,
count: *std.atomic.Value(u64),
max_actions: ?usize,
) !void {
var last_print_msg_count: u64 = 0;

while (true) {
if (max_actions) |max| {
if (count.load(.monotonic) >= max) {
std.debug.print("{s} reached max actions: {}\n", .{ action_name, max });
break;
}
}

_ = try @call(.auto, func, args);
const current_count = count.load(.monotonic);
if ((current_count - last_print_msg_count) >= 1_000) {
std.debug.print("{d} {s} actions\n", .{ current_count, action_name });
last_print_msg_count = current_count;
}

_ = count.fetchAdd(1, .monotonic);
}
}

fn dbPut(
db: *RocksDb,
random: *const std.rand.Random,
count: *std.atomic.Value(u64),
max_actions: ?usize,
) !void {
const key = random.int(u32);
var buffer: [61]u8 = undefined;
// Fill the buffer with random bytes
for (0..buffer.len) |i| {
buffer[i] = @intCast(random.int(u8));
}
const value: []const u8 = buffer[0..];
try performDbAction(
"RocksDb.put",
RocksDb.put,
.{ db, cf1, (key + 1), Data{ .value = value } },
count,
max_actions,
);
}

fn dbDelete(
db: *RocksDb,
random: *const std.rand.Random,
count: *std.atomic.Value(u64),
max_actions: ?usize,
) !void {
const key = random.int(u32);
try performDbAction(
"RocksDb.delete",
RocksDb.delete,
.{ db, cf1, key },
count,
max_actions,
);
}

fn dbDeleteFilesInRange(
db: *RocksDb,
random: *const std.rand.Random,
count: *std.atomic.Value(u64),
max_actions: ?usize,
) !void {
const start = random.int(u32);
const end = blk: {
const end_ = random.int(u32);
if (end_ < start)
break :blk (end_ +| start)
else
break :blk end_;
};

try performDbAction(
"RocksDb.deleteFilesInRange",
RocksDb.deleteFilesInRange,
.{ db, cf1, start, end },
count,
max_actions,
);
}

fn dbGetBytes(
db: *RocksDb,
random: *const std.rand.Random,
count: *std.atomic.Value(u64),
max_actions: ?usize,
) !void {
const key = random.int(u32);
try performDbAction(
"RocksDb.getBytes",
RocksDb.getBytes,
.{ db, cf1, key },
count,
max_actions,
);
}

fn dbGet(
db: *RocksDb,
random: *const std.rand.Random,
count: *std.atomic.Value(u64),
max_actions: ?usize,
) !void {
const key = random.int(u32);
try performDbAction(
"RocksDb.get",
RocksDb.get,
.{ db, allocator, cf1, key },
count,
max_actions,
);
}

fn dbCount(
db: *RocksDb,
count: *std.atomic.Value(u64),
max_actions: ?usize,
) !void {
try performDbAction(
"RocksDb.count",
RocksDb.count,
.{ db, cf1 },
count,
max_actions,
);
}

fn dbContains(
db: *RocksDb,
random: *const std.rand.Random,
count: *std.atomic.Value(u64),
max_actions: ?usize,
) !void {
const key = random.int(u32);
try performDbAction(
"RocksDb.contains",
RocksDb.contains,
.{ db, cf1, key },
count,
max_actions,
);
}

// Batch API
fn batchDeleteRange(
db: *RocksDb,
random: *const std.rand.Random,
count: *std.atomic.Value(u64),
max_actions: ?usize,
) !void {
var last_print_msg_count: u64 = 0;
while (true) {
if (max_actions) |max| {
if (count.load(.monotonic) >= max) {
std.debug.print("Batch actions reached max actions: {}\n", .{max});
break;
}
}
const start = random.int(u32);
const end = blk: {
const end_ = random.int(u32);
if (end_ < start)
break :blk (end_ +| start)
else
break :blk end_;
};

const key = random.int(u32);
var buffer: [61]u8 = undefined;

// Fill the buffer with random bytes
for (0..buffer.len) |i| {
buffer[i] = @intCast(random.int(u8));
}

const value: []const u8 = buffer[0..];

var batch = try db.initWriteBatch();
defer batch.deinit();

try batch.put(cf1, key, Data{ .value = value });
try batch.deleteRange(cf1, start, end);
try batch.delete(cf1, key);
try db.commit(&batch);

const current_count = count.load(.monotonic);
if ((current_count - last_print_msg_count) >= 1_000) {
std.debug.print("{d} Batch actions\n", .{current_count});
last_print_msg_count = current_count;
}

_ = count.fetchAdd(1, .monotonic);
}
}
1 change: 1 addition & 0 deletions src/ledger/lib.zig
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ pub const shred = @import("shred.zig");
pub const shredder = @import("shredder.zig");
pub const transaction_status = @import("transaction_status.zig");
pub const tests = @import("tests.zig");
pub const fuzz_rocksdb = @import("fuzz.zig");

pub const BlockstoreDB = blockstore.BlockstoreDB;
pub const ShredInserter = shred_inserter.ShredInserter;
Expand Down
Loading