Skip to content

Commit

Permalink
Running benchmarks with sequencer locally (#1629)
Browse files Browse the repository at this point in the history
Closes #1628
#1695
<!-- These comments should help create a useful PR message, please
delete any remaining comments before opening the PR. -->
<!-- If there is no issue number make sure to describe clearly *why*
this PR is necessary. -->
<!-- Mention open questions, remaining TODOs, if any -->

### This PR:
- makes benchmarks runnable with sequencer locally
- have the metrics of each run saved into a file
<!-- Describe what this PR adds to this repo and why -->
<!-- E.g. -->
<!-- * Implements feature 1 -->
<!-- * Fixes bug 3 -->

### This PR does not:
- parameterize benchmark parameter for sequencer, especially like
`start_round` and `end_round`, they're hard-coded now. This will be
designed later.
<!-- Describe what is out of scope for this PR, if applicable. Leave
this section blank if it's not applicable -->
<!-- This section helps avoid the reviewer having to needlessly point
out missing parts -->
<!-- * Implement feature 3 because that feature is blocked by Issue 4
-->
<!-- * Implement xyz because that is tracked in issue #123. -->
<!-- * Address xzy for which I opened issue #456 -->

### Key places to review:
<!-- Describe key places for reviewers to pay close attention to -->
<!-- * file.rs, `add_integers` function -->
<!-- Or directly comment on those files/lines to make it easier for the
reviewers -->

### How to test this PR: 
Create `results.csv` under `scripts/benchmarks_results`, then run `just
demo-native-benchmark` to test it.
<!-- Optional, uncomment the above line if this is relevant to your PR
-->
<!-- If your PR is fully tested through CI there is no need to add this
section -->
<!-- * E.g. `just test` -->

<!-- ### Things tested -->
<!-- Anything that was manually tested (that is not tested in CI). -->
<!-- E.g. building/running of docker containers. Changes to docker demo,
... -->
<!-- Especially mention anything untested, with reasoning and link an
issue to resolve this. -->

<!-- Complete the following items before creating this PR -->
<!-- [ ] Issue linked or PR description mentions why this change is
necessary. -->
<!-- [ ] PR description is clear enough for reviewers. -->
<!-- [ ] Documentation for changes (additions) has been updated (added).
-->
<!-- [ ] If this is a draft it is marked as "draft".  -->

<!-- To make changes to this template edit
https://github.com/EspressoSystems/.github/blob/main/PULL_REQUEST_TEMPLATE.md
-->
  • Loading branch information
dailinsubjam authored Jul 17, 2024
2 parents c1d3530 + b3cf1b5 commit 0331a90
Show file tree
Hide file tree
Showing 9 changed files with 185 additions and 0 deletions.
4 changes: 4 additions & 0 deletions .env
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,10 @@ ESPRESSO_SUBMIT_TRANSACTIONS_DELAY=2s
ESPRESSO_SUBMIT_TRANSACTIONS_PUBLIC_PORT=24010
ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_PORT=24020

# Benchmarks
ESPRESSO_BENCH_START_BLOCK=50
ESPRESSO_BENCH_END_BLOCK=150

# Query service fetch requests rate limit
ESPRESSO_SEQUENCER_FETCH_RATE_LIMIT=25

Expand Down
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -446,8 +446,11 @@ services:
ports:
- "$ESPRESSO_SUBMIT_TRANSACTIONS_PUBLIC_PORT:8080"
environment:
- ESPRESSO_ORCHESTRATOR_NUM_NODES
- ESPRESSO_SUBMIT_TRANSACTIONS_PORT=8080
- ESPRESSO_SUBMIT_TRANSACTIONS_DELAY
- ESPRESSO_BENCH_START_BLOCK
- ESPRESSO_BENCH_END_BLOCK
- ESPRESSO_SEQUENCER_URL
- RUST_LOG
- RUST_LOG_FORMAT
Expand All @@ -465,9 +468,12 @@ services:
ports:
- "$ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_PORT:8080"
environment:
- ESPRESSO_ORCHESTRATOR_NUM_NODES
- ESPRESSO_SUBMIT_TRANSACTIONS_PORT=8080
- ESPRESSO_SUBMIT_TRANSACTIONS_SUBMIT_URL=http://permissionless-builder:$ESPRESSO_BUILDER_SERVER_PORT/txn_submit
- ESPRESSO_SUBMIT_TRANSACTIONS_DELAY
- ESPRESSO_BENCH_START_BLOCK
- ESPRESSO_BENCH_END_BLOCK
- ESPRESSO_SEQUENCER_URL
- RUST_LOG
- RUST_LOG_FORMAT
Expand Down
4 changes: 4 additions & 0 deletions justfile
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ demo-native:
cargo build --release
scripts/demo-native

demo-native-benchmark:
cargo build --release --features benchmarking
scripts/demo-native

down *args:
docker compose down {{args}}

Expand Down
26 changes: 26 additions & 0 deletions scripts/benchmarks_results/upload_results.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
total_nodes,da_committee_size,block_range,transaction_size_range,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_bytes,total_time_elapsed_in_sec
5,5,20~120,1~1000,1~20,public,1,0,4,209,22,503,53
total_nodes,da_committee_size,block_range,transaction_size_range,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_bytes,total_time_elapsed_in_sec
5,5,20~120,1~1000,1~20,private,3,0,6,1267,115,584,53
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~20,public,1,0,2,260,23,566,50
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~20,private,2,0,2,1277,118,541,50
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~10,public,1,0,1,242,15,695,43
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~10,private,1,0,2,852,63,581,43
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~20,private,2,0,2,990,78,571,45
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~20,public,1,0,2,278,21,597,45
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~40,private,5,0,9,1758,255,565,82
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~40,public,4,0,9,215,33,536,82
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~50,private,11,0,11,54,18,616,204
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~1,private,1,0,1,227,14,665,41
total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec
5,5,50~150,1~1000,1~1,public,1,0,1,293,20,601,41
2 changes: 2 additions & 0 deletions sequencer/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ edition = "2021"
default = ["libp2p"]
testing = ["hotshot-testing"]
libp2p = []
benchmarking = []

[[bin]]
name = "espresso-dev-node"
Expand Down Expand Up @@ -55,6 +56,7 @@ clap = { workspace = true }
cld = { workspace = true }
committable = "0.2"
contract-bindings = { path = "../contract-bindings" }
csv = "1"
derivative = "2.2"
derive_more = { workspace = true }
dotenvy = { workspace = true }
Expand Down
2 changes: 2 additions & 0 deletions sequencer/api/public-env-vars.toml
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,8 @@ variables = [
"ESPRESSO_SUBMIT_TRANSACTIONS_PORT",
"ESPRESSO_SUBMIT_TRANSACTIONS_SLOW_TRANSACTION_WARNING_THRESHOLD",
"ESPRESSO_SUBMIT_TRANSACTIONS_SUBMIT_URL",
"ESPRESSO_BENCH_START_BLOCK",
"ESPRESSO_BENCH_END_BLOCK",
"FROM",
"TO"
]
138 changes: 138 additions & 0 deletions sequencer/src/bin/submit-transactions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,13 @@ use surf_disco::{Client, Url};
use tide_disco::{error::ServerError, App};
use vbs::version::StaticVersionType;

#[cfg(feature = "benchmarking")]
use csv::Writer;
#[cfg(feature = "benchmarking")]
use std::fs::OpenOptions;
#[cfg(feature = "benchmarking")]
use std::num::NonZeroUsize;

/// Submit random transactions to an Espresso Sequencer.
#[derive(Clone, Debug, Parser)]
struct Options {
Expand Down Expand Up @@ -123,6 +130,21 @@ struct Options {
/// URL of the query service.
#[clap(env = "ESPRESSO_SEQUENCER_URL")]
url: Url,

/// Relay num_nodes for benchmark results output
#[cfg(feature = "benchmarking")]
#[clap(short, long, env = "ESPRESSO_ORCHESTRATOR_NUM_NODES")]
num_nodes: NonZeroUsize,

/// The first block that benchmark starts counting in
#[cfg(feature = "benchmarking")]
#[clap(short, long, env = "ESPRESSO_BENCH_START_BLOCK")]
benchmark_start_block: NonZeroUsize,

/// The final block that benchmark counts in
#[cfg(feature = "benchmarking")]
#[clap(short, long, env = "ESPRESSO_BENCH_END_BLOCK")]
benchmark_end_block: NonZeroUsize,
}

impl Options {
Expand Down Expand Up @@ -179,6 +201,27 @@ async fn main() {
let mut pending = HashMap::new();
let mut total_latency = Duration::default();
let mut total_transactions = 0;

// Keep track of the latency after warm up for benchmarking
#[cfg(feature = "benchmarking")]
let mut num_block = 0;
#[cfg(feature = "benchmarking")]
let mut benchmark_total_latency = Duration::default();
#[cfg(feature = "benchmarking")]
let mut benchmark_minimum_latency = Duration::default();
#[cfg(feature = "benchmarking")]
let mut benchmark_maximum_latency = Duration::default();
#[cfg(feature = "benchmarking")]
let mut benchmark_total_transactions = 0;
#[cfg(feature = "benchmarking")]
let mut benchmark_finish = false;
#[cfg(feature = "benchmarking")]
let mut total_throughput = 0;
#[cfg(feature = "benchmarking")]
let mut start: Instant = Instant::now(); // will be re-assign once has_started turned to true
#[cfg(feature = "benchmarking")]
let mut has_started: bool = false;

while let Some(block) = blocks.next().await {
let block: BlockQueryData<SeqTypes> = match block {
Ok(block) => block,
Expand All @@ -189,6 +232,14 @@ async fn main() {
};
let received_at = Instant::now();
tracing::debug!("got block {}", block.height());
#[cfg(feature = "benchmarking")]
{
num_block += 1;
if !has_started && (num_block as usize) >= opt.benchmark_start_block.into() {
has_started = true;
start = Instant::now();
}
}

// Get all transactions which were submitted before this block.
while let Ok(Some(tx)) = receiver.try_next() {
Expand All @@ -207,9 +258,96 @@ async fn main() {
total_latency += latency;
total_transactions += 1;
tracing::info!("average latency: {:?}", total_latency / total_transactions);
#[cfg(feature = "benchmarking")]
{
if has_started && !benchmark_finish {
benchmark_minimum_latency = if total_transactions == 0 {
latency
} else {
std::cmp::min(benchmark_minimum_latency, latency)
};
benchmark_maximum_latency = if total_transactions == 0 {
latency
} else {
std::cmp::max(benchmark_maximum_latency, latency)
};

benchmark_total_latency += latency;
benchmark_total_transactions += 1;
// Transaction = NamespaceId(u64) + payload(Vec<u8>)
let payload_length = tx.into_payload().len();
let tx_sz = payload_length * std::mem::size_of::<u8>() // size of payload
+ std::mem::size_of::<u64>() // size of the namespace
+ std::mem::size_of::<Transaction>(); // size of the struct wrapper
total_throughput += tx_sz;
}
}
}
}

#[cfg(feature = "benchmarking")]
if !benchmark_finish && (num_block as usize) >= opt.benchmark_end_block.into() {
let block_range = format!("{}~{}", opt.benchmark_start_block, opt.benchmark_end_block,);
let transaction_size_range_in_bytes = format!("{}~{}", opt.min_size, opt.max_size,);
let transactions_per_batch_range = format!(
"{}~{}",
(opt.jobs as u64 * opt.min_batch_size),
(opt.jobs as u64 * opt.max_batch_size),
);
let benchmark_average_latency = benchmark_total_latency / benchmark_total_transactions;
let avg_transaction_size = total_throughput as u32 / benchmark_total_transactions;
let total_time_elapsed_in_sec = start.elapsed(); // in seconds
let avg_throughput_bytes_per_sec = (total_throughput as u64)
/ std::cmp::max(total_time_elapsed_in_sec.as_secs(), 1u64);
// Open the CSV file in append mode
let results_csv_file = OpenOptions::new()
.create(true)
.append(true) // Open in append mode
.open("scripts/benchmarks_results/results.csv")
.unwrap();
// Open a file for writing
let mut wtr = Writer::from_writer(results_csv_file);
let mut pub_or_priv_pool = "private";
if opt.use_public_mempool() {
pub_or_priv_pool = "public";
}
let _ = wtr.write_record([
"total_nodes",
"da_committee_size",
"block_range",
"transaction_size_range_in_bytes",
"transaction_per_batch_range",
"pub_or_priv_pool",
"avg_latency_in_sec",
"minimum_latency_in_sec",
"maximum_latency_in_sec",
"avg_throughput_bytes_per_sec",
"total_transactions",
"avg_transaction_size_in_bytes",
"total_time_elapsed_in_sec",
]);
let _ = wtr.write_record(&[
opt.num_nodes.to_string(),
opt.num_nodes.to_string(),
block_range,
transaction_size_range_in_bytes,
transactions_per_batch_range,
pub_or_priv_pool.to_string(),
benchmark_average_latency.as_secs().to_string(),
benchmark_minimum_latency.as_secs().to_string(),
benchmark_maximum_latency.as_secs().to_string(),
avg_throughput_bytes_per_sec.to_string(),
benchmark_total_transactions.to_string(),
avg_transaction_size.to_string(),
total_time_elapsed_in_sec.as_secs().to_string(),
]);
let _ = wtr.flush();
println!(
"Latency results successfully saved in scripts/benchmarks_results/results.csv"
);
benchmark_finish = true;
}

// If a lot of transactions are pending, it might indicate the sequencer is struggling to
// finalize them. We should warn about this.
if pending.len() >= opt.pending_transactions_warning_threshold {
Expand Down
2 changes: 2 additions & 0 deletions sequencer/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,8 @@ impl<N: ConnectedNetwork<PubKey>, P: SequencerPersistence, Ver: StaticVersionTyp
orchestrator_client
.wait_for_all_nodes_ready(self.node_state.node_id)
.await;
} else {
tracing::error!("Cannot get info from orchestrator client");
}
tracing::warn!("starting consensus");
self.handle.read().await.hotshot.start_consensus().await;
Expand Down

0 comments on commit 0331a90

Please sign in to comment.