Skip to content

Commit

Permalink
Refactor and improve/tweak printing logic and doc-comments
Browse files Browse the repository at this point in the history
- Split BenchmarkResult.prettyPrint into individual functions
  with respect to each metric to be pretty-printed

- Propagate choice of writer for pretty-print functions within
  BenchmarkResult

- Add a buffered std-out writer as a field of BenchmarkResults. We
  use this for all formatted printing-operations within BenchmarkResults.

- Make use of BenchmarkResults.getColor to color-code the total-time metric
  in BenchmarkResults.prettyPrint

- Add various doc-comments
  • Loading branch information
Bryysen committed Jan 17, 2024
1 parent 3069ad4 commit 3313c40
Show file tree
Hide file tree
Showing 2 changed files with 117 additions and 56 deletions.
7 changes: 2 additions & 5 deletions util/color.zig
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ pub const Color = enum {
magenta,
cyan,
reset,
none,

// Return the ANSI escape code for this color.
pub fn code(self: Color) []const u8 {
Expand All @@ -19,11 +20,7 @@ pub const Color = enum {
.magenta => "\x1b[35m",
.cyan => "\x1b[36m",
.reset => "\x1b[0m",
.none => "",
};
}
};

pub fn colorPrint(color: Color, text: []const u8) !void {
const stdout = std.io.getStdOut().writer();
try stdout.print("{}{}{}", .{ color.code(), text, Color.reset.code() });
}
166 changes: 115 additions & 51 deletions zbench.zig
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,14 @@ const format = @import("./util/format.zig");
/// Benchmark is a type representing a single benchmark session.
/// It provides metrics and utilities for performance measurement.
pub const Benchmark = struct {
/// Used to represent the 75th, 99th and 99.5th percentiles of the durations,
/// generated by `Benchmark.calculatePercentiles`.
pub const Percentiles = struct {
p75: u64,
p99: u64,
p995: u64,
};

/// Name of the benchmark.
name: []const u8,
/// Number of iterations to be performed in the benchmark.
Expand Down Expand Up @@ -108,7 +116,9 @@ pub const Benchmark = struct {
return i;
}

/// Calculate the p75, p99, and p995 durations
/// Calculate the 75th, 99th and 99.5th percentiles of the durations. They represent the timings below
/// which 75%, 99% and 99.5% of the other measurments would lie (respectively) when timings are
/// sorted in increasing order.
pub fn calculatePercentiles(self: Benchmark) Percentiles {
// quickSort might fail with an empty input slice, so safety checks first
const len = self.durations.items.len;
Expand Down Expand Up @@ -174,7 +184,7 @@ pub const Benchmark = struct {
try stdout.print("\n", .{});
}

/// Calculate the average duration
/// Calculate the average (more precisely arithmetic mean) of the durations
pub fn calculateAverage(self: Benchmark) u64 {
// prevent division by zero
const len = self.durations.items.len;
Expand All @@ -190,7 +200,8 @@ pub const Benchmark = struct {
return avg;
}

/// Calculate the standard deviation of the durations
/// Calculate the standard deviation of the durations. An estimate for the average *deviation*
/// from the average duration.
pub fn calculateStd(self: Benchmark) u64 {
if (self.durations.items.len <= 1) return 0;

Expand All @@ -216,98 +227,151 @@ pub const BenchFunc = fn (*Benchmark) void;

/// BenchmarkResult stores the resulting computed metrics/statistics from a benchmark
pub const BenchmarkResult = struct {
const Self = @This();
const Color = c.Color;

/// Name of the benchmark
name: []const u8,
percs: Percentiles,
/// 75th, 99th and 99.5th percentiles of the durations. They represent the timings below
/// which 75%, 99% and 99.5% of the other measurments would lie, respectively, when timings
/// are sorted in increasing order.
percs: Benchmark.Percentiles,
/// The average (more precisely arithmetic mean) of the durations
avg_duration: usize,
/// The standard-deviation of the durations (an estimate for the average *deviation* from
/// the average duration).
std_duration: usize,
/// The minimum among the durations
min_duration: usize,
/// The maximum among the durations
max_duration: usize,
/// The total amount of runs performed of the benchmark
total_operations: usize,
/// Total time for all the runs of the benchmark combined
total_time: usize,

/// Formats and prints the benchmark result in a readable format.
pub fn prettyPrint(self: BenchmarkResult, header: bool) !void {
var total_time_buffer: [128]u8 = undefined;
const total_time_str = try format.duration(total_time_buffer[0..], self.total_time);
/// Pretty-prints the name of the benchmark
pub fn prettyPrintName(self: Self, writer: anytype, color: Color) !void {
try writer.print("{s}{s:<22}{s} ", .{ color.code(), self.name, Color.reset.code() });
}

var p75_buffer: [128]u8 = undefined;
const p75_str = try format.duration(p75_buffer[0..], self.percs.p75);
/// Pretty-prints the number of total operations (or runs) of the benchmark performed
pub fn prettyPrintTotalOperations(self: Self, writer: anytype, color: Color) !void {
try writer.print("{s}{d:<8}{s} ", .{ color.code(), self.total_operations, Color.reset.code() });
}

var p99_buffer: [128]u8 = undefined;
const p99_str = try format.duration(p99_buffer[0..], self.percs.p99);
/// Pretty-prints the total time it took to perform all the runs
pub fn prettyPrintTotalTime(self: Self, writer: anytype, color: Color) !void {
var buffer: [128]u8 = undefined;
const str = try format.duration(buffer[0..], self.total_time);

var p995_buffer: [128]u8 = undefined;
const p995_str = try format.duration(p995_buffer[0..], self.percs.p995);
try writer.print("{s}{s:<14}{s} ", .{ color.code(), str, Color.reset.code() });
}

var avg_std_buffer: [128]u8 = undefined;
var avg_std_offset = (try format.duration(avg_std_buffer[0..], self.avg_duration)).len;
avg_std_offset += (try std.fmt.bufPrint(avg_std_buffer[avg_std_offset..], " ± ", .{})).len;
avg_std_offset += (try format.duration(avg_std_buffer[avg_std_offset..], self.std_duration)).len;
const avg_std_str = avg_std_buffer[0..avg_std_offset];
/// Pretty-prints the average (arithmetic mean) and the standard deviation of the durations
pub fn prettyPrintAvgStd(self: Self, writer: anytype, color: Color) !void {
var buffer: [128]u8 = undefined;
var avg_std_offset = (try format.duration(buffer[0..], self.avg_duration)).len;
avg_std_offset += (try std.fmt.bufPrint(buffer[avg_std_offset..], " ± ", .{})).len;
avg_std_offset += (try format.duration(buffer[avg_std_offset..], self.std_duration)).len;
const str = buffer[0..avg_std_offset];

try writer.print("{s}{s:<22}{s} ", .{ color.code(), str, Color.reset.code() });
}

/// Pretty-prints the minumim and maximum duration
pub fn prettyPrintMinMax(self: Self, writer: anytype, color: Color) !void {
var min_buffer: [128]u8 = undefined;
const min_str = try format.duration(min_buffer[0..], self.min_duration);

var max_buffer: [128]u8 = undefined;
const max_str = try format.duration(max_buffer[0..], self.max_duration);

var min_max_buffer: [128]u8 = undefined;
const min_max_str = try std.fmt.bufPrint(min_max_buffer[0..], "({s} ... {s})", .{ min_str, max_str });
var buffer: [128]u8 = undefined;
const str = try std.fmt.bufPrint(buffer[0..], "({s} ... {s})", .{ min_str, max_str });

if (header) try prettyPrintHeader();
try writer.print("{s}{s:<28}{s} ", .{ color.code(), str, Color.reset.code() });
}

const stdout = std.io.getStdOut().writer();
try stdout.print(
"{s:<22} \x1b[90m{d:<8} \x1b[90m{s:<14} \x1b[33m{s:<22} \x1b[95m{s:<28} \x1b[90m{s:<10} {s:<10} {s:<10}\x1b[0m\n\n",
.{ self.name, self.total_operations, total_time_str, avg_std_str, min_max_str, p75_str, p99_str, p995_str },
);
/// Pretty-prints the 75th, 99th and 99.5th percentile of the durations
pub fn prettyPrintPercentiles(self: Self, writer: anytype, color: Color) !void {
var p75_buffer: [128]u8 = undefined;
const p75_str = try format.duration(p75_buffer[0..], self.percs.p75);

var p99_buffer: [128]u8 = undefined;
const p99_str = try format.duration(p99_buffer[0..], self.percs.p99);

var p995_buffer: [128]u8 = undefined;
const p995_str = try format.duration(p995_buffer[0..], self.percs.p995);

try writer.print("{s}{s:<10} {s:<10} {s:<10}{s} ", .{ color.code(), p75_str, p99_str, p995_str, Color.reset.code() });
}

/// Formats and prints the benchmark-result in a readable format.
pub fn prettyPrint(self: Self, writer: anytype, header: bool) !void {
if (header) try prettyPrintHeader(writer);

try self.prettyPrintName(writer, Color.none);
try self.prettyPrintTotalOperations(writer, Color.cyan);
try self.prettyPrintTotalTime(writer, Color.cyan);
try self.prettyPrintAvgStd(writer, Color.green);
try self.prettyPrintMinMax(writer, Color.blue);
try self.prettyPrintPercentiles(writer, Color.cyan);
_ = try writer.write("\n");
}
};

pub fn prettyPrintHeader() !void {
const stdout = std.io.getStdOut().writer();
try stdout.print(
/// Pretty-prints the header for the result pretty-print table
pub fn prettyPrintHeader(writer: anytype) !void {
try writer.print(
"\n{s:<22} {s:<8} {s:<14} {s:<22} {s:<28} {s:<10} {s:<10} {s:<10}\n",
.{ "benchmark", "runs", "total time", "time/run (avg ± σ)", "(min ... max)", "p75", "p99", "p995" },
);
try stdout.print("-----------------------------------------------------------------------------------------------------------------------------\n", .{});
try writer.print("-----------------------------------------------------------------------------------------------------------------------------\n", .{});
}

pub const Percentiles = struct {
p75: u64,
p99: u64,
p995: u64,
};

/// BenchmarkResults acts as a container for multiple benchmark results.
/// It provides functionality to format and print these results.
pub const BenchmarkResults = struct {
const Color = c.Color;

/// A dynamic list of BenchmarkResult objects.
results: std.ArrayList(BenchmarkResult),
/// A handle to a buffered stdout-writer
out_stream: std.io.BufferedWriter(1024, @TypeOf(std.io.getStdOut().writer())) = .{ .unbuffered_writer = std.io.getStdOut().writer() },

/// Determines the color representation based on the duration of the benchmark.
/// duration: The duration to evaluate.
pub fn getColor(self: *const BenchmarkResults, duration: u64) c.Color {
const max_duration = @max(self.results.items[0].duration, self.results.items[self.results.items.len - 1].duration);
const min_duration = @min(self.results.items[0].duration, self.results.items[self.results.items.len - 1].duration);
/// Determines the color representation based on the total-time of the benchmark.
/// total_time: The total-time to evaluate.
pub fn getColor(self: *const BenchmarkResults, total_time: u64) Color {
const max_total_time = @max(self.results.items[0].total_time, self.results.items[self.results.items.len - 1].total_time);
const min_total_time = @min(self.results.items[0].total_time, self.results.items[self.results.items.len - 1].total_time);

if (duration <= min_duration) return c.Color.green;
if (duration >= max_duration) return c.Color.red;
if (total_time <= min_total_time) return Color.green;
if (total_time >= max_total_time) return Color.red;

const prop = (duration - min_duration) * 100 / (max_duration - min_duration + 1);
const prop = (total_time - min_total_time) * 100 / (max_total_time - min_total_time + 1);

if (prop < 50) return c.Color.green;
if (prop < 75) return c.Color.yellow;
if (prop < 50) return Color.green;
if (prop < 75) return Color.yellow;

return c.Color.red;
return Color.red;
}

/// Formats and prints the benchmark results in a readable format.
pub fn prettyPrint(self: BenchmarkResults) !void {
try prettyPrintHeader();
pub fn prettyPrint(self: *BenchmarkResults) !void {
var writer = self.out_stream.writer();
try prettyPrintHeader(writer);
for (self.results.items) |result| {
try result.prettyPrint(false);
try result.prettyPrintName(writer, Color.none);
try result.prettyPrintTotalOperations(writer, Color.cyan);
try result.prettyPrintTotalTime(writer, self.getColor(result.total_time));
try result.prettyPrintAvgStd(writer, Color.green);
try result.prettyPrintMinMax(writer, Color.blue);
try result.prettyPrintPercentiles(writer, Color.cyan);
_ = try writer.write("\n");
}

try self.out_stream.flush();
}
};

Expand Down

0 comments on commit 3313c40

Please sign in to comment.