From d8627aec09db2196feda3662ca0d600d1364e123 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 15 Dec 2022 17:14:49 +0100 Subject: [PATCH 1/3] sc-network-test::Peer: block push methods return hashes vec This commit reworks the block generation/push methods in sc-network-test::Peer. Now methods are providing the vector of hashes that were built. This allows to get rid of redundant `block_hash_from_id` call, as all hashes are known just after being built. Similar approach was taken in BeefyTestNet::generate_blocks_and_sync method. This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) --- bin/node/bench/src/import.rs | 9 +- client/beefy/src/tests.rs | 120 +++++++++++----------- client/beefy/src/worker.rs | 8 +- client/finality-grandpa/src/tests.rs | 97 +++++++++--------- client/network/test/src/lib.rs | 32 +++--- client/network/test/src/sync.rs | 142 ++++++++++++++------------- 6 files changed, 212 insertions(+), 196 deletions(-) diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index 28a322834271c..7d59e94a44a09 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -34,7 +34,7 @@ use std::borrow::Cow; use node_primitives::Block; use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; -use sc_client_api::{backend::Backend, HeaderBackend}; +use sc_client_api::backend::Backend; use sp_runtime::generic::BlockId; use sp_state_machine::InspectState; @@ -127,15 +127,10 @@ impl core::Benchmark for ImportBenchmark { context.import_block(self.block.clone()); let elapsed = start.elapsed(); - let hash = context - .client - .expect_block_hash_from_id(&BlockId::number(1)) - .expect("Block 1 was imported; qed"); - // Sanity checks. context .client - .state_at(hash) + .state_at(self.block.header.hash()) .expect("state_at failed for block#1") .inspect_state(|| { match self.block_type { diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index f6ab0dd1020f1..90d047a9ff014 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -151,6 +151,9 @@ impl BeefyTestNet { }); } + /// Builds the blocks and returns the vector of built block hashes. + /// Returned vector contains the genesis hash which allows for easy indexing (block number is + /// equal to index) pub(crate) fn generate_blocks_and_sync( &mut self, count: usize, @@ -158,8 +161,17 @@ impl BeefyTestNet { validator_set: &BeefyValidatorSet, include_mmr_digest: bool, runtime: &mut Runtime, - ) { - self.peer(0).generate_blocks(count, BlockOrigin::File, |builder| { + ) -> Vec { + let mut all_hashes = Vec::with_capacity(count + 1); + + // make sure genesis is the only block in network, so we can insert genesis at he beginning + // of hashes, otherwise indexing would be broken + assert!(self.peer(0).client().as_backend().blockchain().hash(1).unwrap().is_none()); + + // push genesis to make indexing human readable (index equals to block number) + all_hashes.push(self.peer(0).client().info().genesis_hash); + + let mut built_hashes = self.peer(0).generate_blocks(count, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; if include_mmr_digest { @@ -176,6 +188,9 @@ impl BeefyTestNet { block }); runtime.block_on(self.wait_until_sync()); + + all_hashes.append(&mut built_hashes); + all_hashes } } @@ -512,7 +527,7 @@ fn finalize_block_and_wait_for_beefy( // peer index and key peers: impl Iterator + Clone, runtime: &mut Runtime, - finalize_targets: &[u64], + finalize_targets: &[H256], expected_beefy: &[u64], ) { let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); @@ -520,8 +535,7 @@ fn finalize_block_and_wait_for_beefy( for block in finalize_targets { peers.clone().for_each(|(index, _)| { let client = net.lock().peer(index).client().as_client(); - let finalize = client.expect_block_hash_from_id(&BlockId::number(*block)).unwrap(); - client.finalize_block(finalize, None).unwrap(); + client.finalize_block(*block, None).unwrap(); }) } @@ -554,7 +568,7 @@ fn beefy_finalizing_blocks() { runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); // push 42 blocks including `AuthorityChange` digests every 10 blocks. - net.generate_blocks_and_sync(42, session_len, &validator_set, true, &mut runtime); + let hashes = net.generate_blocks_and_sync(42, session_len, &validator_set, true, &mut runtime); let net = Arc::new(Mutex::new(net)); @@ -562,19 +576,25 @@ fn beefy_finalizing_blocks() { let peers = peers.into_iter().enumerate(); // finalize block #5 -> BEEFY should finalize #1 (mandatory) and #5 from diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[1, 5], &[1, 5]); + finalize_block_and_wait_for_beefy( + &net, + peers.clone(), + &mut runtime, + &[hashes[1], hashes[5]], + &[1, 5], + ); // GRANDPA finalize #10 -> BEEFY finalize #10 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[10], &[10]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[hashes[10]], &[10]); // GRANDPA finalize #18 -> BEEFY finalize #14, then #18 (diff-power-of-two rule) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[18], &[14, 18]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[hashes[18]], &[14, 18]); // GRANDPA finalize #20 -> BEEFY finalize #20 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[20], &[20]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[hashes[20]], &[20]); // GRANDPA finalize #21 -> BEEFY finalize nothing (yet) because min delta is 4 - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[21], &[]); + finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[hashes[21]], &[]); } #[test] @@ -593,7 +613,7 @@ fn lagging_validators() { runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); // push 62 blocks including `AuthorityChange` digests every 30 blocks. - net.generate_blocks_and_sync(62, session_len, &validator_set, true, &mut runtime); + let hashes = net.generate_blocks_and_sync(62, session_len, &validator_set, true, &mut runtime); let net = Arc::new(Mutex::new(net)); @@ -604,18 +624,12 @@ fn lagging_validators() { &net, peers.clone(), &mut runtime, - &[1, 15], + &[hashes[1], hashes[15]], &[1, 9, 13, 14, 15], ); // Alice finalizes #25, Bob lags behind - let finalize = net - .lock() - .peer(0) - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::number(25)) - .unwrap(); + let finalize = hashes[25]; let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY @@ -631,7 +645,13 @@ fn lagging_validators() { wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 - finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[30, 32], &[30, 31, 32]); + finalize_block_and_wait_for_beefy( + &net, + peers.clone(), + &mut runtime, + &[hashes[30], hashes[32]], + &[30, 31, 32], + ); // Verify that session-boundary votes get buffered by client and only processed once // session-boundary block is GRANDPA-finalized (this guarantees authenticity for the new session @@ -639,13 +659,7 @@ fn lagging_validators() { // Alice finalizes session-boundary mandatory block #60, Bob lags behind let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - let finalize = net - .lock() - .peer(0) - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::number(60)) - .unwrap(); + let finalize = hashes[60]; net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); @@ -687,24 +701,18 @@ fn correct_beefy_payload() { runtime.spawn(initialize_beefy(&mut net, bad_peers, min_block_delta)); // push 12 blocks - net.generate_blocks_and_sync(12, session_len, &validator_set, false, &mut runtime); + let hashes = net.generate_blocks_and_sync(12, session_len, &validator_set, false, &mut runtime); let net = Arc::new(Mutex::new(net)); let peers = peers.into_iter().enumerate(); // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[1, 10], &[1, 9]); + finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[hashes[1], hashes[10]], &[1, 9]); let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); // now 2 good validators and 1 bad one are voting - let hashof11 = net - .lock() - .peer(0) - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::number(11)) - .unwrap(); + let hashof11 = hashes[11]; net.lock().peer(0).client().as_client().finalize_block(hashof11, None).unwrap(); net.lock().peer(1).client().as_client().finalize_block(hashof11, None).unwrap(); net.lock().peer(3).client().as_client().finalize_block(hashof11, None).unwrap(); @@ -894,7 +902,7 @@ fn voter_initialization() { runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); // push 26 blocks - net.generate_blocks_and_sync(26, session_len, &validator_set, false, &mut runtime); + let hashes = net.generate_blocks_and_sync(26, session_len, &validator_set, false, &mut runtime); let net = Arc::new(Mutex::new(net)); // Finalize multiple blocks at once to get a burst of finality notifications right from start. @@ -904,7 +912,7 @@ fn voter_initialization() { &net, peers.into_iter().enumerate(), &mut runtime, - &[1, 6, 10, 17, 24, 26], + &[hashes[1], hashes[6], hashes[10], hashes[17], hashes[24], hashes[26]], &[1, 5, 10, 15, 20, 25], ); } @@ -937,7 +945,7 @@ fn on_demand_beefy_justification_sync() { let dave_index = 3; // push 30 blocks - net.generate_blocks_and_sync(30, session_len, &validator_set, false, &mut runtime); + let hashes = net.generate_blocks_and_sync(30, session_len, &validator_set, false, &mut runtime); let fast_peers = fast_peers.into_iter().enumerate(); let net = Arc::new(Mutex::new(net)); @@ -947,7 +955,7 @@ fn on_demand_beefy_justification_sync() { &net, fast_peers.clone(), &mut runtime, - &[1, 6, 10, 17, 24], + &[hashes[1], hashes[6], hashes[10], hashes[17], hashes[24]], &[1, 5, 10, 15, 20], ); @@ -959,14 +967,13 @@ fn on_demand_beefy_justification_sync() { let (dave_best_blocks, _) = get_beefy_streams(&mut net.lock(), [(dave_index, BeefyKeyring::Dave)].into_iter()); let client = net.lock().peer(dave_index).client().as_client(); - let hashof1 = client.expect_block_hash_from_id(&BlockId::number(1)).unwrap(); - client.finalize_block(hashof1, None).unwrap(); + client.finalize_block(hashes[1], None).unwrap(); // Give Dave task some cpu cycles to process the finality notification, run_for(Duration::from_millis(100), &net, &mut runtime); // freshly spun up Dave now needs to listen for gossip to figure out the state of his peers. // Have the other peers do some gossip so Dave finds out about their progress. - finalize_block_and_wait_for_beefy(&net, fast_peers, &mut runtime, &[25], &[25]); + finalize_block_and_wait_for_beefy(&net, fast_peers, &mut runtime, &[hashes[25]], &[25]); // Now verify Dave successfully finalized #1 (through on-demand justification request). wait_for_best_beefy_blocks(dave_best_blocks, &net, &mut runtime, &[1]); @@ -978,13 +985,13 @@ fn on_demand_beefy_justification_sync() { &net, [(dave_index, BeefyKeyring::Dave)].into_iter(), &mut runtime, - &[6, 10, 17, 24, 26], + &[hashes[6], hashes[10], hashes[17], hashes[24], hashes[26]], &[5, 10, 15, 20, 25], ); let all_peers = all_peers.into_iter().enumerate(); // Now that Dave has caught up, sanity check voting works for all of them. - finalize_block_and_wait_for_beefy(&net, all_peers, &mut runtime, &[30], &[30]); + finalize_block_and_wait_for_beefy(&net, all_peers, &mut runtime, &[hashes[30]], &[30]); } #[test] @@ -996,13 +1003,12 @@ fn should_initialize_voter_at_genesis() { let backend = net.peer(0).client().as_backend(); // push 15 blocks with `AuthorityChange` digests every 10 blocks - net.generate_blocks_and_sync(15, 10, &validator_set, false, &mut runtime); + let hashes = net.generate_blocks_and_sync(15, 10, &validator_set, false, &mut runtime); let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse(); // finalize 13 without justifications - let hashof13 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(13)).unwrap(); - net.peer(0).client().as_client().finalize_block(hashof13, None).unwrap(); + net.peer(0).client().as_client().finalize_block(hashes[13], None).unwrap(); // load persistent state - nothing in DB, should init at session boundary let persisted_state = voter_init_setup(&mut net, &mut finality).unwrap(); @@ -1042,13 +1048,12 @@ fn should_initialize_voter_when_last_final_is_session_boundary() { let backend = net.peer(0).client().as_backend(); // push 15 blocks with `AuthorityChange` digests every 10 blocks - net.generate_blocks_and_sync(15, 10, &validator_set, false, &mut runtime); + let hashes = net.generate_blocks_and_sync(15, 10, &validator_set, false, &mut runtime); let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse(); // finalize 13 without justifications - let hashof13 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(13)).unwrap(); - net.peer(0).client().as_client().finalize_block(hashof13, None).unwrap(); + net.peer(0).client().as_client().finalize_block(hashes[13], None).unwrap(); // import/append BEEFY justification for session boundary block 10 let commitment = Commitment { @@ -1060,9 +1065,8 @@ fn should_initialize_voter_when_last_final_is_session_boundary() { commitment, signatures: vec![None], }); - let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); backend - .append_justification(hashof10, (BEEFY_ENGINE_ID, justif.encode())) + .append_justification(hashes[10], (BEEFY_ENGINE_ID, justif.encode())) .unwrap(); // Test corner-case where session boundary == last beefy finalized, @@ -1103,13 +1107,12 @@ fn should_initialize_voter_at_latest_finalized() { let backend = net.peer(0).client().as_backend(); // push 15 blocks with `AuthorityChange` digests every 10 blocks - net.generate_blocks_and_sync(15, 10, &validator_set, false, &mut runtime); + let hashes = net.generate_blocks_and_sync(15, 10, &validator_set, false, &mut runtime); let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse(); // finalize 13 without justifications - let hashof13 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(13)).unwrap(); - net.peer(0).client().as_client().finalize_block(hashof13, None).unwrap(); + net.peer(0).client().as_client().finalize_block(hashes[13], None).unwrap(); // import/append BEEFY justification for block 12 let commitment = Commitment { @@ -1121,9 +1124,8 @@ fn should_initialize_voter_at_latest_finalized() { commitment, signatures: vec![None], }); - let hashof12 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(12)).unwrap(); backend - .append_justification(hashof12, (BEEFY_ENGINE_ID, justif.encode())) + .append_justification(hashes[12], (BEEFY_ENGINE_ID, justif.encode())) .unwrap(); // Test initialization at last BEEFY finalized. diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index bba3563a8f70e..bd0f383172a4f 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -1383,10 +1383,10 @@ pub(crate) mod tests { // generate 2 blocks, try again expect success let (mut best_block_streams, _) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - net.peer(0).push_blocks(2, false); - // finalize 1 and 2 without justifications - let hashof1 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); - let hashof2 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(2)).unwrap(); + let hashes = net.peer(0).push_blocks(2, false); + // finalize 1 and 2 without justifications (hashes does not contain genesis) + let hashof1 = hashes[0]; + let hashof2 = hashes[1]; backend.finalize_block(hashof1, None).unwrap(); backend.finalize_block(hashof2, None).unwrap(); diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 6b577fd712930..4211736754261 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -649,14 +649,18 @@ fn sync_justifications_on_change_blocks() { net.peer(0).push_blocks(20, false); // at block 21 we do add a transition which is instant - let hashof21 = net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let mut block = builder.build().unwrap().block; - add_scheduled_change( - &mut block, - ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, - ); - block - }); + let hashof21 = net + .peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let mut block = builder.build().unwrap().block; + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); + block + }) + .pop() + .unwrap(); // add more blocks on top of it (until we have 25) net.peer(0).push_blocks(4, false); @@ -1384,7 +1388,7 @@ fn grandpa_environment_respects_voting_rules() { let link = peer.data.lock().take().unwrap(); // add 21 blocks - peer.push_blocks(21, false); + let hashes = peer.push_blocks(21, false); // create an environment with no voting rule restrictions let unrestricted_env = test_environment(&link, None, network_service.clone(), ()); @@ -1437,12 +1441,7 @@ fn grandpa_environment_respects_voting_rules() { ); // we finalize block 19 with block 21 being the best block - let hashof19 = peer - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::Number(19)) - .unwrap(); - peer.client().finalize_block(hashof19, None, false).unwrap(); + peer.client().finalize_block(hashes[18], None, false).unwrap(); // the 3/4 environment should propose block 21 for voting assert_eq!( @@ -1466,11 +1465,7 @@ fn grandpa_environment_respects_voting_rules() { ); // we finalize block 21 with block 21 being the best block - let hashof21 = peer - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::Number(21)) - .unwrap(); + let hashof21 = hashes[20]; peer.client().finalize_block(hashof21, None, false).unwrap(); // even though the default environment will always try to not vote on the @@ -1789,20 +1784,23 @@ fn revert_prunes_authority_changes() { // Fork before revert point // add more blocks on top of block 23 (until we have 26) - let hash = peer.generate_blocks_at( - BlockId::Number(23), - 3, - BlockOrigin::File, - |builder| { - let mut block = builder.build().unwrap().block; - block.header.digest_mut().push(DigestItem::Other(vec![1])); - block - }, - false, - false, - true, - ForkChoiceStrategy::LongestChain, - ); + let hash = peer + .generate_blocks_at( + BlockId::Number(23), + 3, + BlockOrigin::File, + |builder| { + let mut block = builder.build().unwrap().block; + block.header.digest_mut().push(DigestItem::Other(vec![1])); + block + }, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ) + .pop() + .unwrap(); // at block 27 of the fork add an authority transition peer.generate_blocks_at( BlockId::Hash(hash), @@ -1818,20 +1816,23 @@ fn revert_prunes_authority_changes() { // Fork after revert point // add more block on top of block 25 (until we have 28) - let hash = peer.generate_blocks_at( - BlockId::Number(25), - 3, - BlockOrigin::File, - |builder| { - let mut block = builder.build().unwrap().block; - block.header.digest_mut().push(DigestItem::Other(vec![2])); - block - }, - false, - false, - true, - ForkChoiceStrategy::LongestChain, - ); + let hash = peer + .generate_blocks_at( + BlockId::Number(25), + 3, + BlockOrigin::File, + |builder| { + let mut block = builder.build().unwrap().block; + block.header.digest_mut().push(DigestItem::Other(vec![2])); + block + }, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ) + .pop() + .unwrap(); // at block 29 of the fork add an authority transition peer.generate_blocks_at( BlockId::Hash(hash), diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 173ca81653b1a..58ad1a6cd66c4 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -297,7 +297,12 @@ where } /// Add blocks to the peer -- edit the block before adding - pub fn generate_blocks(&mut self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 + pub fn generate_blocks( + &mut self, + count: usize, + origin: BlockOrigin, + edit_block: F, + ) -> Vec where F: FnMut( BlockBuilder, @@ -323,7 +328,7 @@ where origin: BlockOrigin, edit_block: F, fork_choice: ForkChoiceStrategy, - ) -> H256 + ) -> Vec where F: FnMut( BlockBuilder, @@ -354,12 +359,13 @@ where inform_sync_about_new_best_block: bool, announce_block: bool, fork_choice: ForkChoiceStrategy, - ) -> H256 + ) -> Vec where F: FnMut( BlockBuilder, ) -> Block, { + let mut hashes = Vec::with_capacity(count); let full_client = self.client.as_client(); let mut at = full_client.header(&at).unwrap().unwrap().hash(); for _ in 0..count { @@ -391,6 +397,7 @@ where if announce_block { self.network.service().announce_block(hash, None); } + hashes.push(hash); at = hash; } @@ -400,24 +407,24 @@ where *full_client.header(&BlockId::Hash(at)).ok().flatten().unwrap().number(), ); } - at + hashes } /// Push blocks to the peer (simplified: with or without a TX) - pub fn push_blocks(&mut self, count: usize, with_tx: bool) -> H256 { + pub fn push_blocks(&mut self, count: usize, with_tx: bool) -> Vec { let best_hash = self.client.info().best_hash; self.push_blocks_at(BlockId::Hash(best_hash), count, with_tx) } /// Push blocks to the peer (simplified: with or without a TX) - pub fn push_headers(&mut self, count: usize) -> H256 { + pub fn push_headers(&mut self, count: usize) -> Vec { let best_hash = self.client.info().best_hash; self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true, true, true) } /// Push blocks to the peer (simplified: with or without a TX) starting from /// given hash. - pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> H256 { + pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> Vec { self.generate_tx_blocks_at(at, count, with_tx, false, true, true) } @@ -429,7 +436,7 @@ where count: usize, with_tx: bool, announce_block: bool, - ) -> H256 { + ) -> Vec { self.generate_tx_blocks_at(at, count, with_tx, false, false, announce_block) } @@ -440,7 +447,7 @@ where at: BlockId, count: usize, with_tx: bool, - ) -> H256 { + ) -> Vec { self.generate_tx_blocks_at(at, count, with_tx, false, true, false) } @@ -454,7 +461,7 @@ where headers_only: bool, inform_sync_about_new_best_block: bool, announce_block: bool, - ) -> H256 { + ) -> Vec { let mut nonce = 0; if with_tx { self.generate_blocks_at( @@ -491,7 +498,10 @@ where } } - pub fn push_authorities_change_block(&mut self, new_authorities: Vec) -> H256 { + pub fn push_authorities_change_block( + &mut self, + new_authorities: Vec, + ) -> Vec { self.generate_blocks(1, BlockOrigin::File, |mut builder| { builder.push(Extrinsic::AuthoritiesChange(new_authorities.clone())).unwrap(); builder.build().unwrap().block diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index efe0e0577c11e..a1e6a6609de7e 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -255,13 +255,13 @@ fn sync_justifications() { sp_tracing::try_init_simple(); let runtime = Runtime::new().unwrap(); let mut net = JustificationTestNet::new(runtime.handle().clone(), 3); - net.peer(0).push_blocks(20, false); + let hashes = net.peer(0).push_blocks(20, false); runtime.block_on(net.wait_until_sync()); let backend = net.peer(0).client().as_backend(); - let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); - let hashof15 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(15)).unwrap(); - let hashof20 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(20)).unwrap(); + let hashof10 = hashes[9]; + let hashof15 = hashes[14]; + let hashof20 = hashes[19]; // there's currently no justification for block #10 assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); @@ -285,13 +285,13 @@ fn sync_justifications() { runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - for hash in [hashof10, hashof15, hashof20] { - if net.peer(0).client().justifications(hash).unwrap() != + for height in (10..21).step_by(5) { + if net.peer(0).client().justifications(hashes[height - 1]).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending } - if net.peer(1).client().justifications(hash).unwrap() != + if net.peer(1).client().justifications(hashes[height - 1]).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending @@ -310,8 +310,8 @@ fn sync_justifications_across_forks() { // we push 5 blocks net.peer(0).push_blocks(5, false); // and then two forks 5 and 6 blocks long - let f1_best = net.peer(0).push_blocks_at(BlockId::Number(5), 5, false); - let f2_best = net.peer(0).push_blocks_at(BlockId::Number(5), 6, false); + let f1_best = net.peer(0).push_blocks_at(BlockId::Number(5), 5, false).pop().unwrap(); + let f2_best = net.peer(0).push_blocks_at(BlockId::Number(5), 6, false).pop().unwrap(); // peer 1 will only see the longer fork. but we'll request justifications // for both and finalize the small fork instead. @@ -370,8 +370,8 @@ fn syncs_all_forks() { net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); - let b1 = net.peer(0).push_blocks(2, true); - let b2 = net.peer(1).push_blocks(4, false); + let b1 = net.peer(0).push_blocks(2, true).pop().unwrap(); + let b2 = net.peer(1).push_blocks(4, false).pop().unwrap(); runtime.block_on(net.wait_until_sync()); // Check that all peers have all of the branches. @@ -452,7 +452,7 @@ fn can_sync_small_non_best_forks() { })); runtime.block_on(net.wait_until_sync()); - let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true); + let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true).pop().unwrap(); net.peer(0).announce_block(another_fork, None); runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); @@ -473,12 +473,16 @@ fn can_sync_forks_ahead_of_the_best_chain() { runtime.block_on(net.wait_until_connected()); // Peer 0 is on 2-block fork which is announced with is_best=false - let fork_hash = net.peer(0).generate_blocks_with_fork_choice( - 2, - BlockOrigin::Own, - |builder| builder.build().unwrap().block, - ForkChoiceStrategy::Custom(false), - ); + let fork_hash = net + .peer(0) + .generate_blocks_with_fork_choice( + 2, + BlockOrigin::Own, + |builder| builder.build().unwrap().block, + ForkChoiceStrategy::Custom(false), + ) + .pop() + .unwrap(); // Peer 1 is on 1-block fork net.peer(1).push_blocks(1, false); assert!(net.peer(0).client().header(&BlockId::Hash(fork_hash)).unwrap().is_some()); @@ -581,8 +585,8 @@ fn does_not_sync_announced_old_best_block() { let runtime = Runtime::new().unwrap(); let mut net = TestNet::new(runtime.handle().clone(), 3); - let old_hash = net.peer(0).push_blocks(1, false); - let old_hash_with_parent = net.peer(0).push_blocks(1, false); + let old_hash = net.peer(0).push_blocks(1, false).pop().unwrap(); + let old_hash_with_parent = net.peer(0).push_blocks(1, false).pop().unwrap(); net.peer(0).push_blocks(18, true); net.peer(1).push_blocks(20, true); @@ -651,12 +655,12 @@ fn imports_stale_once() { runtime.block_on(net.wait_until_sync()); // check that NEW block is imported from announce message - let new_hash = net.peer(0).push_blocks(1, false); + let new_hash = net.peer(0).push_blocks(1, false).pop().unwrap(); import_with_announce(&runtime, &mut net, new_hash); assert_eq!(net.peer(1).num_downloaded_blocks(), 1); // check that KNOWN STALE block is imported from announce message - let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); + let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true).pop().unwrap(); import_with_announce(&runtime, &mut net, known_stale_hash); assert_eq!(net.peer(1).num_downloaded_blocks(), 2); } @@ -669,7 +673,7 @@ fn can_sync_to_peers_with_wrong_common_block() { net.peer(0).push_blocks(2, true); net.peer(1).push_blocks(2, true); - let fork_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 2, false); + let fork_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 2, false).pop().unwrap(); net.peer(1).push_blocks_at(BlockId::Number(0), 2, false); // wait for connection runtime.block_on(net.wait_until_connected()); @@ -678,7 +682,7 @@ fn can_sync_to_peers_with_wrong_common_block() { let just = Some((*b"FRNK", Vec::new())); net.peer(0).client().finalize_block(fork_hash, just.clone(), true).unwrap(); net.peer(1).client().finalize_block(fork_hash, just, true).unwrap(); - let final_hash = net.peer(0).push_blocks(1, false); + let final_hash = net.peer(0).push_blocks(1, false).pop().unwrap(); runtime.block_on(net.wait_until_sync()); @@ -737,12 +741,16 @@ fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { runtime.block_on(net.wait_until_connected()); // Add blocks but don't set them as best - let block_hash = net.peer(0).generate_blocks_with_fork_choice( - 1, - BlockOrigin::Own, - |builder| builder.build().unwrap().block, - ForkChoiceStrategy::Custom(false), - ); + let block_hash = net + .peer(0) + .generate_blocks_with_fork_choice( + 1, + BlockOrigin::Own, + |builder| builder.build().unwrap().block, + ForkChoiceStrategy::Custom(false), + ) + .pop() + .unwrap(); while !net.peer(2).has_block(block_hash) { runtime.block_on(net.wait_until_idle()); @@ -781,12 +789,16 @@ fn wait_until_deferred_block_announce_validation_is_ready() { runtime.block_on(net.wait_until_connected()); // Add blocks but don't set them as best - let block_hash = net.peer(0).generate_blocks_with_fork_choice( - 1, - BlockOrigin::Own, - |builder| builder.build().unwrap().block, - ForkChoiceStrategy::Custom(false), - ); + let block_hash = net + .peer(0) + .generate_blocks_with_fork_choice( + 1, + BlockOrigin::Own, + |builder| builder.build().unwrap().block, + ForkChoiceStrategy::Custom(false), + ) + .pop() + .unwrap(); while !net.peer(1).has_block(block_hash) { runtime.block_on(net.wait_until_idle()); @@ -802,9 +814,11 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { let mut net = TestNet::new(runtime.handle().clone(), 1); // Produce some blocks - let block_hash = - net.peer(0) - .push_blocks_at_without_informing_sync(BlockId::Number(0), 3, true, true); + let block_hash = net + .peer(0) + .push_blocks_at_without_informing_sync(BlockId::Number(0), 3, true, true) + .pop() + .unwrap(); // Add a node and wait until they are connected runtime.block_on(async { @@ -848,9 +862,11 @@ fn sync_to_tip_when_we_sync_together_with_multiple_peers() { let mut net = TestNet::new(runtime.handle().clone(), 3); - let block_hash = - net.peer(0) - .push_blocks_at_without_informing_sync(BlockId::Number(0), 10_000, false, false); + let block_hash = net + .peer(0) + .push_blocks_at_without_informing_sync(BlockId::Number(0), 10_000, false, false) + .pop() + .unwrap(); net.peer(1) .push_blocks_at_without_informing_sync(BlockId::Number(0), 5_000, false, false); @@ -924,7 +940,11 @@ fn block_announce_data_is_propagated() { } })); - let block_hash = net.peer(0).push_blocks_at_without_announcing(BlockId::Number(0), 1, true); + let block_hash = net + .peer(0) + .push_blocks_at_without_announcing(BlockId::Number(0), 1, true) + .pop() + .unwrap(); net.peer(0).announce_block(block_hash, Some(vec![137])); while !net.peer(1).has_block(block_hash) || !net.peer(2).has_block(block_hash) { @@ -971,7 +991,7 @@ fn continue_to_sync_after_some_block_announcement_verifications_failed() { net.wait_until_idle().await; }); - let block_hash = net.peer(0).push_blocks(500, true); + let block_hash = net.peer(0).push_blocks(500, true).pop().unwrap(); runtime.block_on(net.wait_until_sync()); assert!(net.peer(1).has_block(block_hash)); @@ -986,10 +1006,10 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { sp_tracing::try_init_simple(); let runtime = Runtime::new().unwrap(); let mut net = JustificationTestNet::new(runtime.handle().clone(), 2); - net.peer(0).push_blocks(10, false); + let hashes = net.peer(0).push_blocks(10, false); runtime.block_on(net.wait_until_sync()); - let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); + let hashof10 = hashes[9]; // there's currently no justification for block #10 assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); @@ -1008,14 +1028,8 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { assert_eq!(1, net.peer(0).num_peers()); } - let hashof10 = net - .peer(0) - .client() - .as_backend() - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(10)) - .unwrap(); - // Finalize the block and make the justification available. + let hashof10 = hashes[9]; + // Finalize the 10th block and make the justification available. net.peer(0) .client() .finalize_block(hashof10, Some((*b"FRNK", Vec::new())), true) @@ -1046,7 +1060,7 @@ fn syncs_all_forks_from_single_peer() { runtime.block_on(net.wait_until_connected()); // Peer 0 produces new blocks and announces. - let branch1 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, true); + let branch1 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, true).pop().unwrap(); // Wait till peer 1 starts downloading runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { @@ -1058,7 +1072,7 @@ fn syncs_all_forks_from_single_peer() { })); // Peer 0 produces and announces another fork - let branch2 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, false); + let branch2 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, false).pop().unwrap(); runtime.block_on(net.wait_until_sync()); @@ -1086,7 +1100,7 @@ fn syncs_after_missing_announcement() { // Peer 0 produces a new block and announces. Peer 1 ignores announcement. net.peer(0).push_blocks_at(BlockId::Number(10), 1, false); // Peer 0 produces another block and announces. - let final_block = net.peer(0).push_blocks_at(BlockId::Number(11), 1, false); + let final_block = net.peer(0).push_blocks_at(BlockId::Number(11), 1, false).pop().unwrap(); net.peer(1).push_blocks_at(BlockId::Number(10), 1, true); runtime.block_on(net.wait_until_sync()); assert!(net.peer(1).client().header(&BlockId::Hash(final_block)).unwrap().is_some()); @@ -1136,19 +1150,13 @@ fn syncs_state() { config_two.sync_mode = SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false }; net.add_full_peer_with_config(config_two); - net.peer(0).push_blocks(64, false); + let hashes = net.peer(0).push_blocks(64, false); // Wait for peer 1 to sync header chain. runtime.block_on(net.wait_until_sync()); assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64))); let just = (*b"FRNK", Vec::new()); - let hashof60 = net - .peer(0) - .client() - .as_backend() - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(60)) - .unwrap(); + let hashof60 = hashes[59]; net.peer(1).client().finalize_block(hashof60, Some(just), true).unwrap(); // Wait for state sync. runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { @@ -1239,8 +1247,8 @@ fn warp_sync() { sync_mode: SyncMode::Warp, ..Default::default() }); - let gap_end = net.peer(0).push_blocks(63, false); - let target = net.peer(0).push_blocks(1, false); + let gap_end = net.peer(0).push_blocks(63, false).pop().unwrap(); + let target = net.peer(0).push_blocks(1, false).pop().unwrap(); net.peer(1).push_blocks(64, false); net.peer(2).push_blocks(64, false); // Wait for peer 1 to sync state. From dba00e18560a464e5a85b9b998840928c6ea3306 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 15 Dec 2022 17:37:02 +0100 Subject: [PATCH 2/3] fix --- client/network/test/src/sync.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index a1e6a6609de7e..7c518e715bef8 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -258,7 +258,6 @@ fn sync_justifications() { let hashes = net.peer(0).push_blocks(20, false); runtime.block_on(net.wait_until_sync()); - let backend = net.peer(0).client().as_backend(); let hashof10 = hashes[9]; let hashof15 = hashes[14]; let hashof20 = hashes[19]; From 875668546f9bbdb92677ddbb0e882e7976bbf9d4 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 19 Dec 2022 09:40:02 +0100 Subject: [PATCH 3/3] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- client/beefy/src/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 90d047a9ff014..d593fe57c760b 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -171,7 +171,7 @@ impl BeefyTestNet { // push genesis to make indexing human readable (index equals to block number) all_hashes.push(self.peer(0).client().info().genesis_hash); - let mut built_hashes = self.peer(0).generate_blocks(count, BlockOrigin::File, |builder| { + let built_hashes = self.peer(0).generate_blocks(count, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; if include_mmr_digest { @@ -189,7 +189,7 @@ impl BeefyTestNet { }); runtime.block_on(self.wait_until_sync()); - all_hashes.append(&mut built_hashes); + all_hashes.extend(built_hashes); all_hashes } }