Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Store computed metrics in BenchmarkResult and move report/pretty-print functionality into BenchmarkResult #38

Merged
merged 4 commits into from
Feb 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/basic.zig
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,5 @@ test "bench test basic" {
};
defer benchmarkResults.results.deinit();
try zbench.run(myBenchmark, &bench, &benchmarkResults);
try benchmarkResults.prettyPrint();
hendriknielaender marked this conversation as resolved.
Show resolved Hide resolved
}
1 change: 1 addition & 0 deletions examples/bubble_sort.zig
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,5 @@ test "bench test bubbleSort" {
};
defer benchmarkResults.results.deinit();
try zbench.run(myBenchmark, &bench, &benchmarkResults);
try benchmarkResults.prettyPrint();
}
1 change: 1 addition & 0 deletions examples/sleep.zig
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,5 @@ test "bench test sleepy" {
};
defer benchmarkResults.results.deinit();
try zbench.run(sleepBenchmark, &bench, &benchmarkResults);
try benchmarkResults.prettyPrint();
}
7 changes: 2 additions & 5 deletions util/color.zig
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ pub const Color = enum {
magenta,
cyan,
reset,
none,

// Return the ANSI escape code for this color.
pub fn code(self: Color) []const u8 {
Expand All @@ -19,11 +20,7 @@ pub const Color = enum {
.magenta => "\x1b[35m",
.cyan => "\x1b[36m",
.reset => "\x1b[0m",
.none => "",
};
}
};

pub fn colorPrint(color: Color, text: []const u8) !void {
const stdout = std.io.getStdOut().writer();
try stdout.print("{}{}{}", .{ color.code(), text, Color.reset.code() });
}
64 changes: 64 additions & 0 deletions util/format.zig
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
const std = @import("std");
const Color = @import("./color.zig").Color;

pub fn duration(buffer: []u8, d: u64) ![]u8 {
const units = [_][]const u8{ "ns", "µs", "ms", "s" };
Expand All @@ -18,3 +19,66 @@ pub fn duration(buffer: []u8, d: u64) ![]u8 {

return formatted;
}

/// Pretty-prints the name of the benchmark
/// writer: Type that has the associated method print (for example std.io.getStdOut.writer())
pub fn prettyPrintName(name: []const u8, writer: anytype, color: Color) !void {
try writer.print("{s}{s:<22}{s} ", .{ color.code(), name, Color.reset.code() });
}

/// Pretty-prints the number of total operations (or runs) of the benchmark performed
/// writer: Type that has the associated method print (for example std.io.getStdOut.writer())
pub fn prettyPrintTotalOperations(total_operations: u64, writer: anytype, color: Color) !void {
try writer.print("{s}{d:<8}{s} ", .{ color.code(), total_operations, Color.reset.code() });
}

/// Pretty-prints the total time it took to perform all the runs
/// writer: Type that has the associated method print (for example std.io.getStdOut.writer())
pub fn prettyPrintTotalTime(total_time: u64, writer: anytype, color: Color) !void {
var buffer: [128]u8 = undefined;
const str = try duration(buffer[0..], total_time);

try writer.print("{s}{s:<14}{s} ", .{ color.code(), str, Color.reset.code() });
}

/// Pretty-prints the average (arithmetic mean) and the standard deviation of the durations
/// writer: Type that has the associated method print (for example std.io.getStdOut.writer())
pub fn prettyPrintAvgStd(avg: u64, stdd: u64, writer: anytype, color: Color) !void {
var buffer: [128]u8 = undefined;
var avg_stdd_offset = (try duration(buffer[0..], avg)).len;
avg_stdd_offset += (try std.fmt.bufPrint(buffer[avg_stdd_offset..], " ± ", .{})).len;
avg_stdd_offset += (try duration(buffer[avg_stdd_offset..], stdd)).len;
const str = buffer[0..avg_stdd_offset];

try writer.print("{s}{s:<22}{s} ", .{ color.code(), str, Color.reset.code() });
}

/// Pretty-prints the minumim and maximum duration
/// writer: Type that has the associated method print (for example std.io.getStdOut.writer())
pub fn prettyPrintMinMax(min: u64, max: u64, writer: anytype, color: Color) !void {
var min_buffer: [128]u8 = undefined;
const min_str = try duration(min_buffer[0..], min);

var max_buffer: [128]u8 = undefined;
const max_str = try duration(max_buffer[0..], max);

var buffer: [128]u8 = undefined;
const str = try std.fmt.bufPrint(buffer[0..], "({s} ... {s})", .{ min_str, max_str });

try writer.print("{s}{s:<28}{s} ", .{ color.code(), str, Color.reset.code() });
}

/// Pretty-prints the 75th, 99th and 99.5th percentile of the durations
/// writer: Type that has the associated method print (for example std.io.getStdOut.writer())
pub fn prettyPrintPercentiles(p75: u64, p99: u64, p995: u64, writer: anytype, color: Color) !void {
var p75_buffer: [128]u8 = undefined;
const p75_str = try duration(p75_buffer[0..], p75);

var p99_buffer: [128]u8 = undefined;
const p99_str = try duration(p99_buffer[0..], p99);

var p995_buffer: [128]u8 = undefined;
const p995_str = try duration(p995_buffer[0..], p995);

try writer.print("{s}{s:<10} {s:<10} {s:<10}{s} ", .{ color.code(), p75_str, p99_str, p995_str, Color.reset.code() });
}
147 changes: 105 additions & 42 deletions zbench.zig
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,14 @@ const format = @import("./util/format.zig");
/// Benchmark is a type representing a single benchmark session.
/// It provides metrics and utilities for performance measurement.
pub const Benchmark = struct {
/// Used to represent the 75th, 99th and 99.5th percentiles of the recorded durations,
/// generated by `Benchmark.calculatePercentiles`.
pub const Percentiles = struct {
p75: u64,
p99: u64,
p995: u64,
};

/// Name of the benchmark.
name: []const u8,
/// Number of iterations to be performed in the benchmark.
Expand Down Expand Up @@ -83,12 +91,6 @@ pub const Benchmark = struct {
self.total_operations = ops;
}

pub const Percentiles = struct {
p75: u64,
p99: u64,
p995: u64,
};

pub fn quickSort(items: []u64, low: usize, high: usize) void {
if (low < high) {
const pivotIndex = partition(items, low, high);
Expand All @@ -114,7 +116,9 @@ pub const Benchmark = struct {
return i;
}

/// Calculate the p75, p99, and p995 durations
/// Calculate the 75th, 99th and 99.5th percentiles of the durations. They represent the timings below
/// which 75%, 99% and 99.5% of the other measurments would lie (respectively) when timings are
/// sorted in increasing order.
pub fn calculatePercentiles(self: Benchmark) Percentiles {
// quickSort might fail with an empty input slice, so safety checks first
const len = self.durations.items.len;
Expand All @@ -139,9 +143,13 @@ pub const Benchmark = struct {
}

/// Prints a report of total operations and timing statistics.
/// (Similar to BenchmarkResult.prettyPrint)
pub fn report(self: Benchmark) !void {
const percentiles = self.calculatePercentiles();

var total_time_buffer: [128]u8 = undefined;
const total_time_str = try format.duration(total_time_buffer[0..], self.elapsed());

var p75_buffer: [128]u8 = undefined;
const p75_str = try format.duration(p75_buffer[0..], percentiles.p75);

Expand All @@ -167,19 +175,16 @@ pub const Benchmark = struct {
const min_max_str = try std.fmt.bufPrint(min_max_buffer[0..], "({s} ... {s})", .{ min_str, max_str });

const stdout = std.io.getStdOut().writer();
try stdout.print(
"\n{s:<22} {s:<8} {s:<22} {s:<28} {s:<10} {s:<10} {s:<10}\n",
.{ "benchmark", "runs", "time (avg ± σ)", "(min ... max)", "p75", "p99", "p995" },
);
prettyPrintHeader();
try stdout.print("---------------------------------------------------------------------------------------------------------------\n", .{});
try stdout.print(
"{s:<22} \x1b[90m{d:<8} \x1b[33m{s:<22} \x1b[95m{s:<28} \x1b[90m{s:<10} {s:<10} {s:<10}\x1b[0m\n",
.{ self.name, self.total_operations, avg_std_str, min_max_str, p75_str, p99_str, p995_str },
"{s:<22} \x1b[90m{d:<8} \x1b[90m{s:<10} \x1b[33m{s:<22} \x1b[95m{s:<28} \x1b[90m{s:<10} {s:<10} {s:<10}\x1b[0m\n\n",
.{ self.name, self.total_operations, total_time_str, avg_std_str, min_max_str, p75_str, p99_str, p995_str },
);
try stdout.print("\n", .{});
}

/// Calculate the average duration
/// Calculate the average (more precisely arithmetic mean) of the durations
pub fn calculateAverage(self: Benchmark) u64 {
// prevent division by zero
const len = self.durations.items.len;
Expand All @@ -195,7 +200,8 @@ pub const Benchmark = struct {
return avg;
}

/// Calculate the standard deviation of the durations
/// Calculate the standard deviation of the durations. An estimate for the average *deviation*
/// from the average duration.
pub fn calculateStd(self: Benchmark) u64 {
if (self.durations.items.len <= 1) return 0;

Expand All @@ -210,7 +216,7 @@ pub const Benchmark = struct {
nvar += @bitCast((d - a) * (d - a));
}

// We are using the non-biased estimator for the variance; sum(X - μ)^2 / (n - 1)
// We are using the non-biased estimator for the variance; sum(Xi - μ)^2 / (n - 1)
return std.math.sqrt(nvar / (self.durations.items.len - 1));
}
};
Expand All @@ -219,46 +225,99 @@ pub const Benchmark = struct {
/// It takes a pointer to a Benchmark object.
pub const BenchFunc = fn (*Benchmark) void;

/// BenchmarkResult stores the result of a single benchmark.
/// It includes the name and the total duration of the benchmark.
/// BenchmarkResult stores the resulting computed metrics/statistics from a benchmark
pub const BenchmarkResult = struct {
/// Name of the benchmark.
const Self = @This();
const Color = c.Color;

/// Name of the benchmark
name: []const u8,
/// Total duration of the benchmark in nanoseconds.
duration: u64,
/// 75th, 99th and 99.5th percentiles of the recorded durations. They represent the timings below
/// which 75%, 99% and 99.5% of the other measurments would lie, respectively, when timings
/// are sorted in increasing order.
percentiles: Benchmark.Percentiles,
/// The average (more precisely arithmetic mean) of the recorded durations
avg_duration: usize,
/// The standard-deviation of the recorded durations (an estimate for the average *deviation* from
/// the average duration).
std_duration: usize,
/// The minimum among the recorded durations
min_duration: usize,
/// The maximum among the recorded durations
max_duration: usize,
/// The total amount of operations (or runs) performed of the benchmark
total_operations: usize,
/// Total time for all the operations (or runs) of the benchmark combined
total_time: usize,

/// Formats and prints the benchmark-result in a readable format.
/// writer: Type that has the associated method print (for example std.io.getStdOut.writer())
/// header: Whether to pretty-print the header or not
pub fn prettyPrint(self: Self, writer: anytype, header: bool) !void {
if (header) try prettyPrintHeader(writer);

try format.prettyPrintName(self.name, writer, Color.none);
try format.prettyPrintTotalOperations(self.total_operations, writer, Color.cyan);
try format.prettyPrintTotalTime(self.total_time, writer, Color.cyan);
try format.prettyPrintAvgStd(self.avg_durations, self.std_durations, writer, Color.green);
try format.prettyPrintMinMax(self.min_durations, self.max_durations, writer, Color.blue);
try format.prettyPrintPercentiles(self.percentiles.p75, self.percentiles.p99, self.percentiles.p995, writer, Color.cyan);

_ = try writer.write("\n");
}
};

/// Pretty-prints the header for the result pretty-print table
/// writer: Type that has the associated method print (for example std.io.getStdOut.writer())
pub fn prettyPrintHeader(writer: anytype) !void {
try writer.print(
"\n{s:<22} {s:<8} {s:<14} {s:<22} {s:<28} {s:<10} {s:<10} {s:<10}\n",
.{ "benchmark", "runs", "total time", "time/run (avg ± σ)", "(min ... max)", "p75", "p99", "p995" },
);
try writer.print("-----------------------------------------------------------------------------------------------------------------------------\n", .{});
}

/// BenchmarkResults acts as a container for multiple benchmark results.
/// It provides functionality to format and print these results.
pub const BenchmarkResults = struct {
const Color = c.Color;

/// A dynamic list of BenchmarkResult objects.
results: std.ArrayList(BenchmarkResult),
/// A handle to a buffered stdout-writer. Used for printing-operations
out_stream: std.io.BufferedWriter(1024, @TypeOf(std.io.getStdOut().writer())) = .{ .unbuffered_writer = std.io.getStdOut().writer() },

/// Determines the color representation based on the duration of the benchmark.
/// duration: The duration to evaluate.
pub fn getColor(self: *const BenchmarkResults, duration: u64) c.Color {
const max_duration = @max(self.results.items[0].duration, self.results.items[self.results.items.len - 1].duration);
const min_duration = @min(self.results.items[0].duration, self.results.items[self.results.items.len - 1].duration);
/// Determines the color representation based on the total-time of the benchmark.
/// total_time: The total-time to evaluate.
pub fn getColor(self: *const BenchmarkResults, total_time: u64) Color {
const max_total_time = @max(self.results.items[0].total_time, self.results.items[self.results.items.len - 1].total_time);
const min_total_time = @min(self.results.items[0].total_time, self.results.items[self.results.items.len - 1].total_time);

if (duration <= min_duration) return c.Color.green;
if (duration >= max_duration) return c.Color.red;
if (total_time <= min_total_time) return Color.green;
if (total_time >= max_total_time) return Color.red;

const prop = (duration - min_duration) * 100 / (max_duration - min_duration + 1);
const prop = (total_time - min_total_time) * 100 / (max_total_time - min_total_time + 1);

if (prop < 50) return c.Color.green;
if (prop < 75) return c.Color.yellow;
if (prop < 50) return Color.green;
if (prop < 75) return Color.yellow;

return c.Color.red;
return Color.red;
}

/// Formats and prints the benchmark results in a readable format.
pub fn prettyPrint(self: BenchmarkResults) !void {
const stdout = std.io.getStdOut().writer();
stdout.print("--------------------------------------------------------------------------------------\n", .{});

pub fn prettyPrint(self: *BenchmarkResults) !void {
var writer = self.out_stream.writer();
try prettyPrintHeader(writer);
for (self.results.items) |result| {
try stdout.print("{s}", .{result.name});
try format.prettyPrintName(result.name, writer, Color.none);
try format.prettyPrintTotalOperations(result.total_operations, writer, Color.cyan);
try format.prettyPrintTotalTime(result.total_time, writer, Color.cyan);
try format.prettyPrintAvgStd(result.avg_duration, result.std_duration, writer, Color.green);
try format.prettyPrintMinMax(result.min_duration, result.max_duration, writer, Color.blue);
try format.prettyPrintPercentiles(result.percentiles.p75, result.percentiles.p99, result.percentiles.p995, writer, Color.cyan);
}

try self.out_stream.flush();
}
};

Expand Down Expand Up @@ -316,13 +375,17 @@ pub fn run(comptime func: BenchFunc, bench: *Benchmark, benchResult: *BenchmarkR
bench.stop();
}

bench.setTotalOperations(bench.N);

const elapsed = bench.elapsed();
try benchResult.results.append(BenchmarkResult{
.name = bench.name,
.duration = elapsed,
.percentiles = bench.calculatePercentiles(),
.avg_duration = bench.calculateAverage(),
.std_duration = bench.calculateStd(),
.min_duration = bench.min_duration,
.max_duration = bench.max_duration,
.total_time = elapsed,
.total_operations = bench.total_operations,
});

bench.setTotalOperations(bench.N);

try bench.report();
}
Loading