diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index 28a322834271c..7d59e94a44a09 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -34,7 +34,7 @@ use std::borrow::Cow; use node_primitives::Block; use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; -use sc_client_api::{backend::Backend, HeaderBackend}; +use sc_client_api::backend::Backend; use sp_runtime::generic::BlockId; use sp_state_machine::InspectState; @@ -127,15 +127,10 @@ impl core::Benchmark for ImportBenchmark { context.import_block(self.block.clone()); let elapsed = start.elapsed(); - let hash = context - .client - .expect_block_hash_from_id(&BlockId::number(1)) - .expect("Block 1 was imported; qed"); - // Sanity checks. context .client - .state_at(hash) + .state_at(self.block.header.hash()) .expect("state_at failed for block#1") .inspect_state(|| { match self.block_type { diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index d2befc979b737..055c2669eb415 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -139,14 +139,26 @@ impl BeefyTestNet { }); } + /// Builds the blocks and returns the vector of built block hashes. + /// Returned vector contains the genesis hash which allows for easy indexing (block number is + /// equal to index) pub(crate) async fn generate_blocks_and_sync( &mut self, count: usize, session_length: u64, validator_set: &BeefyValidatorSet, include_mmr_digest: bool, - ) { - self.peer(0).generate_blocks(count, BlockOrigin::File, |builder| { + ) -> Vec { + let mut all_hashes = Vec::with_capacity(count + 1); + + // make sure genesis is the only block in network, so we can insert genesis at he beginning + // of hashes, otherwise indexing would be broken + assert!(self.peer(0).client().as_backend().blockchain().hash(1).unwrap().is_none()); + + // push genesis to make indexing human readable (index equals to block number) + all_hashes.push(self.peer(0).client().info().genesis_hash); + + let built_hashes = self.peer(0).generate_blocks(count, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; if include_mmr_digest { @@ -162,7 +174,10 @@ impl BeefyTestNet { block }); + all_hashes.extend(built_hashes); self.run_until_sync().await; + + all_hashes } } @@ -495,7 +510,7 @@ async fn finalize_block_and_wait_for_beefy( net: &Arc>, // peer index and key peers: impl Iterator + Clone, - finalize_targets: &[u64], + finalize_targets: &[H256], expected_beefy: &[u64], ) { let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); @@ -503,8 +518,7 @@ async fn finalize_block_and_wait_for_beefy( for block in finalize_targets { peers.clone().for_each(|(index, _)| { let client = net.lock().peer(index).client().as_client(); - let finalize = client.expect_block_hash_from_id(&BlockId::number(*block)).unwrap(); - client.finalize_block(finalize, None).unwrap(); + client.finalize_block(*block, None).unwrap(); }) } @@ -536,7 +550,7 @@ async fn beefy_finalizing_blocks() { tokio::spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); // push 42 blocks including `AuthorityChange` digests every 10 blocks. - net.generate_blocks_and_sync(42, session_len, &validator_set, true).await; + let hashes = net.generate_blocks_and_sync(42, session_len, &validator_set, true).await; let net = Arc::new(Mutex::new(net)); @@ -544,19 +558,19 @@ async fn beefy_finalizing_blocks() { let peers = peers.into_iter().enumerate(); // finalize block #5 -> BEEFY should finalize #1 (mandatory) and #5 from diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[1, 5], &[1, 5]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &[hashes[1], hashes[5]], &[1, 5]).await; // GRANDPA finalize #10 -> BEEFY finalize #10 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[10], &[10]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &[hashes[10]], &[10]).await; // GRANDPA finalize #18 -> BEEFY finalize #14, then #18 (diff-power-of-two rule) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[18], &[14, 18]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &[hashes[18]], &[14, 18]).await; // GRANDPA finalize #20 -> BEEFY finalize #20 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[20], &[20]).await; + finalize_block_and_wait_for_beefy(&net, peers.clone(), &[hashes[20]], &[20]).await; // GRANDPA finalize #21 -> BEEFY finalize nothing (yet) because min delta is 4 - finalize_block_and_wait_for_beefy(&net, peers, &[21], &[]).await; + finalize_block_and_wait_for_beefy(&net, peers, &[hashes[21]], &[]).await; } #[tokio::test] @@ -574,23 +588,23 @@ async fn lagging_validators() { tokio::spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); // push 62 blocks including `AuthorityChange` digests every 30 blocks. - net.generate_blocks_and_sync(62, session_len, &validator_set, true).await; + let hashes = net.generate_blocks_and_sync(62, session_len, &validator_set, true).await; let net = Arc::new(Mutex::new(net)); let peers = peers.into_iter().enumerate(); // finalize block #15 -> BEEFY should finalize #1 (mandatory) and #9, #13, #14, #15 from // diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[1, 15], &[1, 9, 13, 14, 15]).await; + finalize_block_and_wait_for_beefy( + &net, + peers.clone(), + &[hashes[1], hashes[15]], + &[1, 9, 13, 14, 15], + ) + .await; // Alice finalizes #25, Bob lags behind - let finalize = net - .lock() - .peer(0) - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::number(25)) - .unwrap(); + let finalize = hashes[25]; let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY @@ -606,7 +620,13 @@ async fn lagging_validators() { wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &[23, 24, 25]).await; // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 - finalize_block_and_wait_for_beefy(&net, peers.clone(), &[30, 32], &[30, 31, 32]).await; + finalize_block_and_wait_for_beefy( + &net, + peers.clone(), + &[hashes[30], hashes[32]], + &[30, 31, 32], + ) + .await; // Verify that session-boundary votes get buffered by client and only processed once // session-boundary block is GRANDPA-finalized (this guarantees authenticity for the new session @@ -614,13 +634,7 @@ async fn lagging_validators() { // Alice finalizes session-boundary mandatory block #60, Bob lags behind let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - let finalize = net - .lock() - .peer(0) - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::number(60)) - .unwrap(); + let finalize = hashes[60]; net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); @@ -661,24 +675,18 @@ async fn correct_beefy_payload() { tokio::spawn(initialize_beefy(&mut net, bad_peers, min_block_delta)); // push 12 blocks - net.generate_blocks_and_sync(12, session_len, &validator_set, false).await; + let hashes = net.generate_blocks_and_sync(12, session_len, &validator_set, false).await; let net = Arc::new(Mutex::new(net)); let peers = peers.into_iter().enumerate(); // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. - finalize_block_and_wait_for_beefy(&net, peers, &[1, 10], &[1, 9]).await; + finalize_block_and_wait_for_beefy(&net, peers, &[hashes[1], hashes[10]], &[1, 9]).await; let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); // now 2 good validators and 1 bad one are voting - let hashof11 = net - .lock() - .peer(0) - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::number(11)) - .unwrap(); + let hashof11 = hashes[11]; net.lock().peer(0).client().as_client().finalize_block(hashof11, None).unwrap(); net.lock().peer(1).client().as_client().finalize_block(hashof11, None).unwrap(); net.lock().peer(3).client().as_client().finalize_block(hashof11, None).unwrap(); @@ -863,7 +871,7 @@ async fn voter_initialization() { tokio::spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); // push 26 blocks - net.generate_blocks_and_sync(26, session_len, &validator_set, false).await; + let hashes = net.generate_blocks_and_sync(26, session_len, &validator_set, false).await; let net = Arc::new(Mutex::new(net)); // Finalize multiple blocks at once to get a burst of finality notifications right from start. @@ -872,7 +880,7 @@ async fn voter_initialization() { finalize_block_and_wait_for_beefy( &net, peers.into_iter().enumerate(), - &[1, 6, 10, 17, 24, 26], + &[hashes[1], hashes[6], hashes[10], hashes[17], hashes[24], hashes[26]], &[1, 5, 10, 15, 20, 25], ) .await; @@ -905,7 +913,7 @@ async fn on_demand_beefy_justification_sync() { let dave_index = 3; // push 30 blocks - net.generate_blocks_and_sync(30, session_len, &validator_set, false).await; + let hashes = net.generate_blocks_and_sync(30, session_len, &validator_set, false).await; let fast_peers = fast_peers.into_iter().enumerate(); let net = Arc::new(Mutex::new(net)); @@ -914,7 +922,7 @@ async fn on_demand_beefy_justification_sync() { finalize_block_and_wait_for_beefy( &net, fast_peers.clone(), - &[1, 6, 10, 17, 24], + &[hashes[1], hashes[6], hashes[10], hashes[17], hashes[24]], &[1, 5, 10, 15, 20], ) .await; @@ -927,14 +935,13 @@ async fn on_demand_beefy_justification_sync() { let (dave_best_blocks, _) = get_beefy_streams(&mut net.lock(), [(dave_index, BeefyKeyring::Dave)].into_iter()); let client = net.lock().peer(dave_index).client().as_client(); - let hashof1 = client.expect_block_hash_from_id(&BlockId::number(1)).unwrap(); - client.finalize_block(hashof1, None).unwrap(); + client.finalize_block(hashes[1], None).unwrap(); // Give Dave task some cpu cycles to process the finality notification, run_for(Duration::from_millis(100), &net).await; // freshly spun up Dave now needs to listen for gossip to figure out the state of his peers. // Have the other peers do some gossip so Dave finds out about their progress. - finalize_block_and_wait_for_beefy(&net, fast_peers, &[25], &[25]).await; + finalize_block_and_wait_for_beefy(&net, fast_peers, &[hashes[25]], &[25]).await; // Now verify Dave successfully finalized #1 (through on-demand justification request). wait_for_best_beefy_blocks(dave_best_blocks, &net, &[1]).await; @@ -945,14 +952,14 @@ async fn on_demand_beefy_justification_sync() { finalize_block_and_wait_for_beefy( &net, [(dave_index, BeefyKeyring::Dave)].into_iter(), - &[6, 10, 17, 24, 26], + &[hashes[6], hashes[10], hashes[17], hashes[24], hashes[26]], &[5, 10, 15, 20, 25], ) .await; let all_peers = all_peers.into_iter().enumerate(); // Now that Dave has caught up, sanity check voting works for all of them. - finalize_block_and_wait_for_beefy(&net, all_peers, &[30], &[30]).await; + finalize_block_and_wait_for_beefy(&net, all_peers, &[hashes[30]], &[30]).await; } #[tokio::test] @@ -963,13 +970,12 @@ async fn should_initialize_voter_at_genesis() { let backend = net.peer(0).client().as_backend(); // push 15 blocks with `AuthorityChange` digests every 10 blocks - net.generate_blocks_and_sync(15, 10, &validator_set, false).await; + let hashes = net.generate_blocks_and_sync(15, 10, &validator_set, false).await; let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse(); // finalize 13 without justifications - let hashof13 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(13)).unwrap(); - net.peer(0).client().as_client().finalize_block(hashof13, None).unwrap(); + net.peer(0).client().as_client().finalize_block(hashes[13], None).unwrap(); // load persistent state - nothing in DB, should init at session boundary let persisted_state = voter_init_setup(&mut net, &mut finality).unwrap(); @@ -1008,13 +1014,12 @@ async fn should_initialize_voter_when_last_final_is_session_boundary() { let backend = net.peer(0).client().as_backend(); // push 15 blocks with `AuthorityChange` digests every 10 blocks - net.generate_blocks_and_sync(15, 10, &validator_set, false).await; + let hashes = net.generate_blocks_and_sync(15, 10, &validator_set, false).await; let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse(); // finalize 13 without justifications - let hashof13 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(13)).unwrap(); - net.peer(0).client().as_client().finalize_block(hashof13, None).unwrap(); + net.peer(0).client().as_client().finalize_block(hashes[13], None).unwrap(); // import/append BEEFY justification for session boundary block 10 let commitment = Commitment { @@ -1026,9 +1031,8 @@ async fn should_initialize_voter_when_last_final_is_session_boundary() { commitment, signatures: vec![None], }); - let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); backend - .append_justification(hashof10, (BEEFY_ENGINE_ID, justif.encode())) + .append_justification(hashes[10], (BEEFY_ENGINE_ID, justif.encode())) .unwrap(); // Test corner-case where session boundary == last beefy finalized, @@ -1068,13 +1072,12 @@ async fn should_initialize_voter_at_latest_finalized() { let backend = net.peer(0).client().as_backend(); // push 15 blocks with `AuthorityChange` digests every 10 blocks - net.generate_blocks_and_sync(15, 10, &validator_set, false).await; + let hashes = net.generate_blocks_and_sync(15, 10, &validator_set, false).await; let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse(); // finalize 13 without justifications - let hashof13 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(13)).unwrap(); - net.peer(0).client().as_client().finalize_block(hashof13, None).unwrap(); + net.peer(0).client().as_client().finalize_block(hashes[13], None).unwrap(); // import/append BEEFY justification for block 12 let commitment = Commitment { @@ -1086,9 +1089,8 @@ async fn should_initialize_voter_at_latest_finalized() { commitment, signatures: vec![None], }); - let hashof12 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(12)).unwrap(); backend - .append_justification(hashof12, (BEEFY_ENGINE_ID, justif.encode())) + .append_justification(hashes[12], (BEEFY_ENGINE_ID, justif.encode())) .unwrap(); // Test initialization at last BEEFY finalized. diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index df465b2dbf52c..89d231a6c2d73 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -1382,10 +1382,10 @@ pub(crate) mod tests { // generate 2 blocks, try again expect success let (mut best_block_streams, _) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - net.peer(0).push_blocks(2, false); - // finalize 1 and 2 without justifications - let hashof1 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); - let hashof2 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(2)).unwrap(); + let hashes = net.peer(0).push_blocks(2, false); + // finalize 1 and 2 without justifications (hashes does not contain genesis) + let hashof1 = hashes[0]; + let hashof2 = hashes[1]; backend.finalize_block(hashof1, None).unwrap(); backend.finalize_block(hashof2, None).unwrap(); diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 97c0c9fe7ce49..d5bc7d50c5fa8 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -627,14 +627,18 @@ async fn sync_justifications_on_change_blocks() { net.peer(0).push_blocks(20, false); // at block 21 we do add a transition which is instant - let hashof21 = net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let mut block = builder.build().unwrap().block; - add_scheduled_change( - &mut block, - ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, - ); - block - }); + let hashof21 = net + .peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let mut block = builder.build().unwrap().block; + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); + block + }) + .pop() + .unwrap(); // add more blocks on top of it (until we have 25) net.peer(0).push_blocks(4, false); @@ -1352,7 +1356,7 @@ async fn grandpa_environment_respects_voting_rules() { let link = peer.data.lock().take().unwrap(); // add 21 blocks - peer.push_blocks(21, false); + let hashes = peer.push_blocks(21, false); // create an environment with no voting rule restrictions let unrestricted_env = test_environment(&link, None, network_service.clone(), ()); @@ -1408,12 +1412,7 @@ async fn grandpa_environment_respects_voting_rules() { ); // we finalize block 19 with block 21 being the best block - let hashof19 = peer - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::Number(19)) - .unwrap(); - peer.client().finalize_block(hashof19, None, false).unwrap(); + peer.client().finalize_block(hashes[18], None, false).unwrap(); // the 3/4 environment should propose block 21 for voting assert_eq!( @@ -1439,11 +1438,7 @@ async fn grandpa_environment_respects_voting_rules() { ); // we finalize block 21 with block 21 being the best block - let hashof21 = peer - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::Number(21)) - .unwrap(); + let hashof21 = hashes[20]; peer.client().finalize_block(hashof21, None, false).unwrap(); // even though the default environment will always try to not vote on the @@ -1757,20 +1752,23 @@ async fn revert_prunes_authority_changes() { // Fork before revert point // add more blocks on top of block 23 (until we have 26) - let hash = peer.generate_blocks_at( - BlockId::Number(23), - 3, - BlockOrigin::File, - |builder| { - let mut block = builder.build().unwrap().block; - block.header.digest_mut().push(DigestItem::Other(vec![1])); - block - }, - false, - false, - true, - ForkChoiceStrategy::LongestChain, - ); + let hash = peer + .generate_blocks_at( + BlockId::Number(23), + 3, + BlockOrigin::File, + |builder| { + let mut block = builder.build().unwrap().block; + block.header.digest_mut().push(DigestItem::Other(vec![1])); + block + }, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ) + .pop() + .unwrap(); // at block 27 of the fork add an authority transition peer.generate_blocks_at( BlockId::Hash(hash), @@ -1786,20 +1784,23 @@ async fn revert_prunes_authority_changes() { // Fork after revert point // add more block on top of block 25 (until we have 28) - let hash = peer.generate_blocks_at( - BlockId::Number(25), - 3, - BlockOrigin::File, - |builder| { - let mut block = builder.build().unwrap().block; - block.header.digest_mut().push(DigestItem::Other(vec![2])); - block - }, - false, - false, - true, - ForkChoiceStrategy::LongestChain, - ); + let hash = peer + .generate_blocks_at( + BlockId::Number(25), + 3, + BlockOrigin::File, + |builder| { + let mut block = builder.build().unwrap().block; + block.header.digest_mut().push(DigestItem::Other(vec![2])); + block + }, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ) + .pop() + .unwrap(); // at block 29 of the fork add an authority transition peer.generate_blocks_at( BlockId::Hash(hash), diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index c3304df629e4e..a7b963555cae4 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -297,7 +297,12 @@ where } /// Add blocks to the peer -- edit the block before adding - pub fn generate_blocks(&mut self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 + pub fn generate_blocks( + &mut self, + count: usize, + origin: BlockOrigin, + edit_block: F, + ) -> Vec where F: FnMut( BlockBuilder, @@ -323,7 +328,7 @@ where origin: BlockOrigin, edit_block: F, fork_choice: ForkChoiceStrategy, - ) -> H256 + ) -> Vec where F: FnMut( BlockBuilder, @@ -354,12 +359,13 @@ where inform_sync_about_new_best_block: bool, announce_block: bool, fork_choice: ForkChoiceStrategy, - ) -> H256 + ) -> Vec where F: FnMut( BlockBuilder, ) -> Block, { + let mut hashes = Vec::with_capacity(count); let full_client = self.client.as_client(); let mut at = full_client.header(&at).unwrap().unwrap().hash(); for _ in 0..count { @@ -391,6 +397,7 @@ where if announce_block { self.network.service().announce_block(hash, None); } + hashes.push(hash); at = hash; } @@ -400,24 +407,24 @@ where *full_client.header(&BlockId::Hash(at)).ok().flatten().unwrap().number(), ); } - at + hashes } /// Push blocks to the peer (simplified: with or without a TX) - pub fn push_blocks(&mut self, count: usize, with_tx: bool) -> H256 { + pub fn push_blocks(&mut self, count: usize, with_tx: bool) -> Vec { let best_hash = self.client.info().best_hash; self.push_blocks_at(BlockId::Hash(best_hash), count, with_tx) } /// Push blocks to the peer (simplified: with or without a TX) - pub fn push_headers(&mut self, count: usize) -> H256 { + pub fn push_headers(&mut self, count: usize) -> Vec { let best_hash = self.client.info().best_hash; self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true, true, true) } /// Push blocks to the peer (simplified: with or without a TX) starting from /// given hash. - pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> H256 { + pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> Vec { self.generate_tx_blocks_at(at, count, with_tx, false, true, true) } @@ -429,7 +436,7 @@ where count: usize, with_tx: bool, announce_block: bool, - ) -> H256 { + ) -> Vec { self.generate_tx_blocks_at(at, count, with_tx, false, false, announce_block) } @@ -440,7 +447,7 @@ where at: BlockId, count: usize, with_tx: bool, - ) -> H256 { + ) -> Vec { self.generate_tx_blocks_at(at, count, with_tx, false, true, false) } @@ -454,7 +461,7 @@ where headers_only: bool, inform_sync_about_new_best_block: bool, announce_block: bool, - ) -> H256 { + ) -> Vec { let mut nonce = 0; if with_tx { self.generate_blocks_at( @@ -491,7 +498,10 @@ where } } - pub fn push_authorities_change_block(&mut self, new_authorities: Vec) -> H256 { + pub fn push_authorities_change_block( + &mut self, + new_authorities: Vec, + ) -> Vec { self.generate_blocks(1, BlockOrigin::File, |mut builder| { builder.push(Extrinsic::AuthoritiesChange(new_authorities.clone())).unwrap(); builder.build().unwrap().block diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 9a9b5782dc2a7..c7b651f457a56 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -250,13 +250,12 @@ async fn sync_no_common_longer_chain_fails() { async fn sync_justifications() { sp_tracing::try_init_simple(); let mut net = JustificationTestNet::new(3); - net.peer(0).push_blocks(20, false); + let hashes = net.peer(0).push_blocks(20, false); net.run_until_sync().await; - let backend = net.peer(0).client().as_backend(); - let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); - let hashof15 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(15)).unwrap(); - let hashof20 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(20)).unwrap(); + let hashof10 = hashes[9]; + let hashof15 = hashes[14]; + let hashof20 = hashes[19]; // there's currently no justification for block #10 assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); @@ -280,13 +279,13 @@ async fn sync_justifications() { futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - for hash in [hashof10, hashof15, hashof20] { - if net.peer(0).client().justifications(hash).unwrap() != + for height in (10..21).step_by(5) { + if net.peer(0).client().justifications(hashes[height - 1]).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending } - if net.peer(1).client().justifications(hash).unwrap() != + if net.peer(1).client().justifications(hashes[height - 1]).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending @@ -305,8 +304,8 @@ async fn sync_justifications_across_forks() { // we push 5 blocks net.peer(0).push_blocks(5, false); // and then two forks 5 and 6 blocks long - let f1_best = net.peer(0).push_blocks_at(BlockId::Number(5), 5, false); - let f2_best = net.peer(0).push_blocks_at(BlockId::Number(5), 6, false); + let f1_best = net.peer(0).push_blocks_at(BlockId::Number(5), 5, false).pop().unwrap(); + let f2_best = net.peer(0).push_blocks_at(BlockId::Number(5), 6, false).pop().unwrap(); // peer 1 will only see the longer fork. but we'll request justifications // for both and finalize the small fork instead. @@ -364,8 +363,8 @@ async fn syncs_all_forks() { net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); - let b1 = net.peer(0).push_blocks(2, true); - let b2 = net.peer(1).push_blocks(4, false); + let b1 = net.peer(0).push_blocks(2, true).pop().unwrap(); + let b2 = net.peer(1).push_blocks(4, false).pop().unwrap(); net.run_until_sync().await; // Check that all peers have all of the branches. @@ -446,7 +445,7 @@ async fn can_sync_small_non_best_forks() { .await; net.run_until_sync().await; - let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true); + let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true).pop().unwrap(); net.peer(0).announce_block(another_fork, None); futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); @@ -467,12 +466,16 @@ async fn can_sync_forks_ahead_of_the_best_chain() { net.run_until_connected().await; // Peer 0 is on 2-block fork which is announced with is_best=false - let fork_hash = net.peer(0).generate_blocks_with_fork_choice( - 2, - BlockOrigin::Own, - |builder| builder.build().unwrap().block, - ForkChoiceStrategy::Custom(false), - ); + let fork_hash = net + .peer(0) + .generate_blocks_with_fork_choice( + 2, + BlockOrigin::Own, + |builder| builder.build().unwrap().block, + ForkChoiceStrategy::Custom(false), + ) + .pop() + .unwrap(); // Peer 1 is on 1-block fork net.peer(1).push_blocks(1, false); assert!(net.peer(0).client().header(&BlockId::Hash(fork_hash)).unwrap().is_some()); @@ -575,8 +578,8 @@ async fn does_not_sync_announced_old_best_block() { sp_tracing::try_init_simple(); let mut net = TestNet::new(3); - let old_hash = net.peer(0).push_blocks(1, false); - let old_hash_with_parent = net.peer(0).push_blocks(1, false); + let old_hash = net.peer(0).push_blocks(1, false).pop().unwrap(); + let old_hash_with_parent = net.peer(0).push_blocks(1, false).pop().unwrap(); net.peer(0).push_blocks(18, true); net.peer(1).push_blocks(20, true); @@ -647,12 +650,12 @@ async fn imports_stale_once() { net.run_until_sync().await; // check that NEW block is imported from announce message - let new_hash = net.peer(0).push_blocks(1, false); + let new_hash = net.peer(0).push_blocks(1, false).pop().unwrap(); import_with_announce(&mut net, new_hash).await; assert_eq!(net.peer(1).num_downloaded_blocks(), 1); // check that KNOWN STALE block is imported from announce message - let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); + let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true).pop().unwrap(); import_with_announce(&mut net, known_stale_hash).await; assert_eq!(net.peer(1).num_downloaded_blocks(), 2); } @@ -664,7 +667,7 @@ async fn can_sync_to_peers_with_wrong_common_block() { net.peer(0).push_blocks(2, true); net.peer(1).push_blocks(2, true); - let fork_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 2, false); + let fork_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 2, false).pop().unwrap(); net.peer(1).push_blocks_at(BlockId::Number(0), 2, false); // wait for connection net.run_until_connected().await; @@ -673,7 +676,7 @@ async fn can_sync_to_peers_with_wrong_common_block() { let just = Some((*b"FRNK", Vec::new())); net.peer(0).client().finalize_block(fork_hash, just.clone(), true).unwrap(); net.peer(1).client().finalize_block(fork_hash, just, true).unwrap(); - let final_hash = net.peer(0).push_blocks(1, false); + let final_hash = net.peer(0).push_blocks(1, false).pop().unwrap(); net.run_until_sync().await; @@ -731,12 +734,16 @@ async fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { net.run_until_connected().await; // Add blocks but don't set them as best - let block_hash = net.peer(0).generate_blocks_with_fork_choice( - 1, - BlockOrigin::Own, - |builder| builder.build().unwrap().block, - ForkChoiceStrategy::Custom(false), - ); + let block_hash = net + .peer(0) + .generate_blocks_with_fork_choice( + 1, + BlockOrigin::Own, + |builder| builder.build().unwrap().block, + ForkChoiceStrategy::Custom(false), + ) + .pop() + .unwrap(); while !net.peer(2).has_block(block_hash) { net.run_until_idle().await; @@ -774,12 +781,16 @@ async fn wait_until_deferred_block_announce_validation_is_ready() { net.run_until_connected().await; // Add blocks but don't set them as best - let block_hash = net.peer(0).generate_blocks_with_fork_choice( - 1, - BlockOrigin::Own, - |builder| builder.build().unwrap().block, - ForkChoiceStrategy::Custom(false), - ); + let block_hash = net + .peer(0) + .generate_blocks_with_fork_choice( + 1, + BlockOrigin::Own, + |builder| builder.build().unwrap().block, + ForkChoiceStrategy::Custom(false), + ) + .pop() + .unwrap(); while !net.peer(1).has_block(block_hash) { net.run_until_idle().await; @@ -794,9 +805,11 @@ async fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() let mut net = TestNet::new(1); // Produce some blocks - let block_hash = - net.peer(0) - .push_blocks_at_without_informing_sync(BlockId::Number(0), 3, true, true); + let block_hash = net + .peer(0) + .push_blocks_at_without_informing_sync(BlockId::Number(0), 3, true, true) + .pop() + .unwrap(); // Add a node and wait until they are connected net.add_full_peer_with_config(Default::default()); @@ -838,9 +851,11 @@ async fn sync_to_tip_when_we_sync_together_with_multiple_peers() { let mut net = TestNet::new(3); - let block_hash = - net.peer(0) - .push_blocks_at_without_informing_sync(BlockId::Number(0), 10_000, false, false); + let block_hash = net + .peer(0) + .push_blocks_at_without_informing_sync(BlockId::Number(0), 10_000, false, false) + .pop() + .unwrap(); net.peer(1) .push_blocks_at_without_informing_sync(BlockId::Number(0), 5_000, false, false); @@ -912,7 +927,11 @@ async fn block_announce_data_is_propagated() { }) .await; - let block_hash = net.peer(0).push_blocks_at_without_announcing(BlockId::Number(0), 1, true); + let block_hash = net + .peer(0) + .push_blocks_at_without_announcing(BlockId::Number(0), 1, true) + .pop() + .unwrap(); net.peer(0).announce_block(block_hash, Some(vec![137])); while !net.peer(1).has_block(block_hash) || !net.peer(2).has_block(block_hash) { @@ -956,7 +975,7 @@ async fn continue_to_sync_after_some_block_announcement_verifications_failed() { net.run_until_connected().await; net.run_until_idle().await; - let block_hash = net.peer(0).push_blocks(500, true); + let block_hash = net.peer(0).push_blocks(500, true).pop().unwrap(); net.run_until_sync().await; assert!(net.peer(1).has_block(block_hash)); @@ -970,10 +989,10 @@ async fn continue_to_sync_after_some_block_announcement_verifications_failed() { async fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { sp_tracing::try_init_simple(); let mut net = JustificationTestNet::new(2); - net.peer(0).push_blocks(10, false); + let hashes = net.peer(0).push_blocks(10, false); net.run_until_sync().await; - let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); + let hashof10 = hashes[9]; // there's currently no justification for block #10 assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); @@ -992,14 +1011,8 @@ async fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { assert_eq!(1, net.peer(0).num_peers()); } - let hashof10 = net - .peer(0) - .client() - .as_backend() - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(10)) - .unwrap(); - // Finalize the block and make the justification available. + let hashof10 = hashes[9]; + // Finalize the 10th block and make the justification available. net.peer(0) .client() .finalize_block(hashof10, Some((*b"FRNK", Vec::new())), true) @@ -1030,7 +1043,7 @@ async fn syncs_all_forks_from_single_peer() { net.run_until_connected().await; // Peer 0 produces new blocks and announces. - let branch1 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, true); + let branch1 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, true).pop().unwrap(); // Wait till peer 1 starts downloading futures::future::poll_fn::<(), _>(|cx| { @@ -1043,7 +1056,7 @@ async fn syncs_all_forks_from_single_peer() { .await; // Peer 0 produces and announces another fork - let branch2 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, false); + let branch2 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, false).pop().unwrap(); net.run_until_sync().await; @@ -1070,7 +1083,7 @@ async fn syncs_after_missing_announcement() { // Peer 0 produces a new block and announces. Peer 1 ignores announcement. net.peer(0).push_blocks_at(BlockId::Number(10), 1, false); // Peer 0 produces another block and announces. - let final_block = net.peer(0).push_blocks_at(BlockId::Number(11), 1, false); + let final_block = net.peer(0).push_blocks_at(BlockId::Number(11), 1, false).pop().unwrap(); net.peer(1).push_blocks_at(BlockId::Number(10), 1, true); net.run_until_sync().await; assert!(net.peer(1).client().header(&BlockId::Hash(final_block)).unwrap().is_some()); @@ -1119,19 +1132,13 @@ async fn syncs_state() { config_two.sync_mode = SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false }; net.add_full_peer_with_config(config_two); - net.peer(0).push_blocks(64, false); + let hashes = net.peer(0).push_blocks(64, false); // Wait for peer 1 to sync header chain. net.run_until_sync().await; assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64))); let just = (*b"FRNK", Vec::new()); - let hashof60 = net - .peer(0) - .client() - .as_backend() - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(60)) - .unwrap(); + let hashof60 = hashes[59]; net.peer(1).client().finalize_block(hashof60, Some(just), true).unwrap(); // Wait for state sync. futures::future::poll_fn::<(), _>(|cx| { @@ -1222,8 +1229,8 @@ async fn warp_sync() { sync_mode: SyncMode::Warp, ..Default::default() }); - let gap_end = net.peer(0).push_blocks(63, false); - let target = net.peer(0).push_blocks(1, false); + let gap_end = net.peer(0).push_blocks(63, false).pop().unwrap(); + let target = net.peer(0).push_blocks(1, false).pop().unwrap(); net.peer(1).push_blocks(64, false); net.peer(2).push_blocks(64, false); // Wait for peer 1 to sync state.