Skip to content

Commit

Permalink
Send large tx in espresso-dev-node (#1772)
Browse files Browse the repository at this point in the history
* Send large tx in espresso-dev-node

* Skip the transaction that exceeds the block size limit

* Increase the default max block size
  • Loading branch information
ImJeremyHe authored Jul 26, 2024
1 parent a31c0bc commit 3a90aba
Show file tree
Hide file tree
Showing 6 changed files with 96 additions and 8 deletions.
Binary file modified data/messages.bin
Binary file not shown.
2 changes: 1 addition & 1 deletion data/messages.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"chain_id": "35353",
"fee_contract": null,
"fee_recipient": "0x0000000000000000000000000000000000000000",
"max_block_size": "10240"
"max_block_size": "30720"
}
}
},
Expand Down
76 changes: 76 additions & 0 deletions sequencer/src/bin/espresso-dev-node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ async fn main() -> anyhow::Result<()> {
.state_relay_url(relay_server_url.clone())
.l1_url(url.clone())
.build();

const NUM_NODES: usize = 2;
let config = TestNetworkConfigBuilder::<NUM_NODES, _, _>::with_num_nodes()
.api_config(api_options)
Expand Down Expand Up @@ -438,6 +439,81 @@ mod tests {
.await;
}

let large_tx = Transaction::new(100_u32.into(), vec![0; 20000]);
let large_hash: Commitment<Transaction> = api_client
.post("submit/submit")
.body_json(&large_tx)
.unwrap()
.send()
.await
.unwrap();

let tx_hash = large_tx.commit();
assert_eq!(large_hash, tx_hash);

let mut tx_result = api_client
.get::<TransactionQueryData<SeqTypes>>(&format!(
"availability/transaction/hash/{tx_hash}",
))
.send()
.await;
while tx_result.is_err() {
tracing::info!("waiting for large tx");
sleep(Duration::from_secs(3)).await;

tx_result = api_client
.get::<TransactionQueryData<SeqTypes>>(&format!(
"availability/transaction/hash/{}",
tx_hash
))
.send()
.await;
}

// Now the `submit/submit` endpoint allows the extremely large transactions to be in the mempool.
// And we need to check whether this extremely large transaction blocks the building process.
// Currently the default value of `max_block_size` is 30720, and this transaction exceeds the limit.
// TODO: https://github.com/EspressoSystems/espresso-sequencer/issues/1777
{
let extremely_large_tx = Transaction::new(100_u32.into(), vec![0; 50120]);
let extremely_large_hash: Commitment<Transaction> = api_client
.post("submit/submit")
.body_json(&extremely_large_tx)
.unwrap()
.send()
.await
.unwrap();
assert_eq!(extremely_large_tx.commit(), extremely_large_hash);

// Now we send a small transaction to make sure this transaction can be included in a hotshot block.
let tx = Transaction::new(100_u32.into(), vec![0; 3]);
let tx_hash: Commitment<Transaction> = api_client
.post("submit/submit")
.body_json(&tx)
.unwrap()
.send()
.await
.unwrap();

let mut result = api_client
.get::<TransactionQueryData<SeqTypes>>(&format!(
"availability/transaction/hash/{tx_hash}",
))
.send()
.await;
while result.is_err() {
sleep(Duration::from_secs(3)).await;

result = api_client
.get::<TransactionQueryData<SeqTypes>>(&format!(
"availability/transaction/hash/{}",
tx_hash
))
.send()
.await;
}
}

let tx_block_height = tx_result.unwrap().block_height();

let light_client_address = "0xdc64a140aa3e981100a9beca4e685f962f0cf6c9";
Expand Down
8 changes: 5 additions & 3 deletions sequencer/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ pub mod testing {
eth_signature_key::EthKeyPair,
mock::MockStateCatchup,
v0::traits::{PersistenceOptions, StateCatchup},
ChainConfig, Event, FeeAccount, PubKey, SeqTypes, Transaction, Upgrade,
Event, FeeAccount, PubKey, SeqTypes, Transaction, Upgrade,
};
use futures::{
future::join_all,
Expand Down Expand Up @@ -420,7 +420,9 @@ pub mod testing {
(
<SimpleBuilderImplementation as TestBuilderImplementation<SeqTypes>>::start(
NUM_NODES,
url.clone(),
format!("http://0.0.0.0:{port}")
.parse()
.expect("Failed to parse builder listener"),
(),
HashMap::new(),
)
Expand Down Expand Up @@ -664,7 +666,7 @@ pub mod testing {
state.prefund_account(builder_account, U256::max_value().into());
let node_state = NodeState::new(
i as u64,
ChainConfig::default(),
state.chain_config.resolve().unwrap_or_default(),
L1Client::new(self.l1_url.clone(), 1000),
catchup::local_and_remote(persistence_opt.clone(), catchup).await,
)
Expand Down
16 changes: 13 additions & 3 deletions types/src/v0/impls/block/full_payload/payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,23 @@ impl Payload {
// add each tx to its namespace
let mut ns_builders = BTreeMap::<NamespaceId, NsPayloadBuilder>::new();
for tx in transactions.into_iter() {
// accounting for block byte length limit
block_byte_len += tx.payload().len() + NsPayloadBuilder::tx_table_entry_byte_len();
let mut tx_size = tx.payload().len() + NsPayloadBuilder::tx_table_entry_byte_len();
if !ns_builders.contains_key(&tx.namespace()) {
// each new namespace adds overhead
block_byte_len +=
tx_size +=
NsTableBuilder::entry_byte_len() + NsPayloadBuilder::tx_table_header_byte_len();
}

if tx_size > max_block_byte_len {
// skip this transaction since it excceds the block size limit
tracing::warn!(
"skip the transaction to fit in maximum block byte length {max_block_byte_len}, transaction size {tx_size}"
);
continue;
}

// accounting for block byte length limit
block_byte_len += tx_size;
if block_byte_len > max_block_byte_len {
tracing::warn!("transactions truncated to fit in maximum block byte length {max_block_byte_len}");
break;
Expand Down
2 changes: 1 addition & 1 deletion types/src/v0/impls/chain_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ impl Default for ChainConfig {
fn default() -> Self {
Self {
chain_id: U256::from(35353).into(), // arbitrarily chosen chain ID
max_block_size: 10240.into(),
max_block_size: 30720.into(),
base_fee: 0.into(),
fee_contract: None,
fee_recipient: Default::default(),
Expand Down

0 comments on commit 3a90aba

Please sign in to comment.