From 3a90abaee896273a2c34923e9efaca5dea54dfd1 Mon Sep 17 00:00:00 2001 From: Jeremy Date: Fri, 26 Jul 2024 09:32:56 +0800 Subject: [PATCH] Send large tx in espresso-dev-node (#1772) * Send large tx in espresso-dev-node * Skip the transaction that exceeds the block size limit * Increase the default max block size --- data/messages.bin | Bin 7396 -> 7396 bytes data/messages.json | 2 +- sequencer/src/bin/espresso-dev-node.rs | 76 ++++++++++++++++++ sequencer/src/lib.rs | 8 +- .../v0/impls/block/full_payload/payload.rs | 16 +++- types/src/v0/impls/chain_config.rs | 2 +- 6 files changed, 96 insertions(+), 8 deletions(-) diff --git a/data/messages.bin b/data/messages.bin index a3962ad3d40a122db09173415e8f0f2b582daf62..046ea94dba83eef681a9a2950289d23e78c81e43 100644 GIT binary patch delta 12 TcmaE2`NVR anyhow::Result<()> { .state_relay_url(relay_server_url.clone()) .l1_url(url.clone()) .build(); + const NUM_NODES: usize = 2; let config = TestNetworkConfigBuilder::::with_num_nodes() .api_config(api_options) @@ -438,6 +439,81 @@ mod tests { .await; } + let large_tx = Transaction::new(100_u32.into(), vec![0; 20000]); + let large_hash: Commitment = api_client + .post("submit/submit") + .body_json(&large_tx) + .unwrap() + .send() + .await + .unwrap(); + + let tx_hash = large_tx.commit(); + assert_eq!(large_hash, tx_hash); + + let mut tx_result = api_client + .get::>(&format!( + "availability/transaction/hash/{tx_hash}", + )) + .send() + .await; + while tx_result.is_err() { + tracing::info!("waiting for large tx"); + sleep(Duration::from_secs(3)).await; + + tx_result = api_client + .get::>(&format!( + "availability/transaction/hash/{}", + tx_hash + )) + .send() + .await; + } + + // Now the `submit/submit` endpoint allows the extremely large transactions to be in the mempool. + // And we need to check whether this extremely large transaction blocks the building process. + // Currently the default value of `max_block_size` is 30720, and this transaction exceeds the limit. + // TODO: https://github.com/EspressoSystems/espresso-sequencer/issues/1777 + { + let extremely_large_tx = Transaction::new(100_u32.into(), vec![0; 50120]); + let extremely_large_hash: Commitment = api_client + .post("submit/submit") + .body_json(&extremely_large_tx) + .unwrap() + .send() + .await + .unwrap(); + assert_eq!(extremely_large_tx.commit(), extremely_large_hash); + + // Now we send a small transaction to make sure this transaction can be included in a hotshot block. + let tx = Transaction::new(100_u32.into(), vec![0; 3]); + let tx_hash: Commitment = api_client + .post("submit/submit") + .body_json(&tx) + .unwrap() + .send() + .await + .unwrap(); + + let mut result = api_client + .get::>(&format!( + "availability/transaction/hash/{tx_hash}", + )) + .send() + .await; + while result.is_err() { + sleep(Duration::from_secs(3)).await; + + result = api_client + .get::>(&format!( + "availability/transaction/hash/{}", + tx_hash + )) + .send() + .await; + } + } + let tx_block_height = tx_result.unwrap().block_height(); let light_client_address = "0xdc64a140aa3e981100a9beca4e685f962f0cf6c9"; diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 346db3b09..1793ee6eb 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -376,7 +376,7 @@ pub mod testing { eth_signature_key::EthKeyPair, mock::MockStateCatchup, v0::traits::{PersistenceOptions, StateCatchup}, - ChainConfig, Event, FeeAccount, PubKey, SeqTypes, Transaction, Upgrade, + Event, FeeAccount, PubKey, SeqTypes, Transaction, Upgrade, }; use futures::{ future::join_all, @@ -420,7 +420,9 @@ pub mod testing { ( >::start( NUM_NODES, - url.clone(), + format!("http://0.0.0.0:{port}") + .parse() + .expect("Failed to parse builder listener"), (), HashMap::new(), ) @@ -664,7 +666,7 @@ pub mod testing { state.prefund_account(builder_account, U256::max_value().into()); let node_state = NodeState::new( i as u64, - ChainConfig::default(), + state.chain_config.resolve().unwrap_or_default(), L1Client::new(self.l1_url.clone(), 1000), catchup::local_and_remote(persistence_opt.clone(), catchup).await, ) diff --git a/types/src/v0/impls/block/full_payload/payload.rs b/types/src/v0/impls/block/full_payload/payload.rs index bbb75399a..4d705a045 100644 --- a/types/src/v0/impls/block/full_payload/payload.rs +++ b/types/src/v0/impls/block/full_payload/payload.rs @@ -68,13 +68,23 @@ impl Payload { // add each tx to its namespace let mut ns_builders = BTreeMap::::new(); for tx in transactions.into_iter() { - // accounting for block byte length limit - block_byte_len += tx.payload().len() + NsPayloadBuilder::tx_table_entry_byte_len(); + let mut tx_size = tx.payload().len() + NsPayloadBuilder::tx_table_entry_byte_len(); if !ns_builders.contains_key(&tx.namespace()) { // each new namespace adds overhead - block_byte_len += + tx_size += NsTableBuilder::entry_byte_len() + NsPayloadBuilder::tx_table_header_byte_len(); } + + if tx_size > max_block_byte_len { + // skip this transaction since it excceds the block size limit + tracing::warn!( + "skip the transaction to fit in maximum block byte length {max_block_byte_len}, transaction size {tx_size}" + ); + continue; + } + + // accounting for block byte length limit + block_byte_len += tx_size; if block_byte_len > max_block_byte_len { tracing::warn!("transactions truncated to fit in maximum block byte length {max_block_byte_len}"); break; diff --git a/types/src/v0/impls/chain_config.rs b/types/src/v0/impls/chain_config.rs index 754b2db5b..1593dbf45 100644 --- a/types/src/v0/impls/chain_config.rs +++ b/types/src/v0/impls/chain_config.rs @@ -81,7 +81,7 @@ impl Default for ChainConfig { fn default() -> Self { Self { chain_id: U256::from(35353).into(), // arbitrarily chosen chain ID - max_block_size: 10240.into(), + max_block_size: 30720.into(), base_fee: 0.into(), fee_contract: None, fee_recipient: Default::default(),