diff --git a/Cargo.lock b/Cargo.lock
index 38a401f11ac25..aaa4746d4ca42 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -9898,16 +9898,19 @@ dependencies = [
name = "test-runner"
version = "0.9.0"
dependencies = [
- "env_logger 0.7.1",
"frame-system",
"futures 0.3.15",
"jsonrpc-core",
"log",
+ "num-traits",
"sc-basic-authorship",
"sc-cli",
"sc-client-api",
+ "sc-consensus",
+ "sc-consensus-babe",
"sc-consensus-manual-seal",
"sc-executor",
+ "sc-finality-grandpa",
"sc-informant",
"sc-network",
"sc-rpc",
@@ -9919,10 +9922,10 @@ dependencies = [
"sp-block-builder",
"sp-blockchain",
"sp-consensus",
+ "sp-consensus-babe",
"sp-core",
"sp-externalities",
"sp-inherents",
- "sp-io",
"sp-keyring",
"sp-keystore",
"sp-offchain",
@@ -9959,6 +9962,7 @@ dependencies = [
"sc-network",
"sc-service",
"sp-api",
+ "sp-consensus",
"sp-consensus-babe",
"sp-inherents",
"sp-keyring",
@@ -10116,7 +10120,6 @@ dependencies = [
"libc",
"memchr",
"mio",
- "mio-named-pipes",
"mio-uds",
"num_cpus",
"pin-project-lite 0.1.12",
diff --git a/bin/node/test-runner-example/Cargo.toml b/bin/node/test-runner-example/Cargo.toml
index 5882a73982ecd..3435a34c45c1e 100644
--- a/bin/node/test-runner-example/Cargo.toml
+++ b/bin/node/test-runner-example/Cargo.toml
@@ -6,35 +6,36 @@ edition = "2018"
publish = false
[dependencies]
-test-runner = { path = "../../../test-utils/test-runner", version = "0.9.0" }
+test-runner = { path = "../../../test-utils/test-runner" }
-frame-system = { version = "4.0.0-dev", path = "../../../frame/system" }
-frame-support = { path = "../../../frame/support", version = "4.0.0-dev"}
-frame-benchmarking = { path = "../../../frame/benchmarking", version = "4.0.0-dev"}
-pallet-balances = { path = "../../../frame/balances", version = "4.0.0-dev"}
-pallet-sudo = { path = "../../../frame/sudo", version = "4.0.0-dev"}
-pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" }
+frame-system = { path = "../../../frame/system" }
+frame-support = { path = "../../../frame/support" }
+frame-benchmarking = { path = "../../../frame/benchmarking" }
+pallet-balances = { path = "../../../frame/balances" }
+pallet-sudo = { path = "../../../frame/sudo" }
+pallet-transaction-payment = { path = "../../../frame/transaction-payment" }
-node-runtime = { path = "../runtime", version = "3.0.0-dev"}
-node-primitives = { version = "2.0.0", path = "../primitives" }
-node-cli = { path = "../cli", version = "3.0.0-dev"}
+node-runtime = { path = "../runtime" }
+node-primitives = { path = "../primitives" }
+node-cli = { path = "../cli" }
-grandpa = { version = "0.10.0-dev", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" }
-sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" }
-sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" }
-sc-consensus-manual-seal = { version = "0.10.0-dev", path = "../../../client/consensus/manual-seal" }
-sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../client/service" }
-sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" }
-sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" }
-sc-network = { version = "0.10.0-dev", path = "../../../client/network" }
-sc-informant = { version = "0.10.0-dev", path = "../../../client/informant" }
-sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" }
+grandpa = { package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" }
+sp-consensus-babe = { path = "../../../primitives/consensus/babe" }
+sc-consensus-babe = { path = "../../../client/consensus/babe" }
+sc-consensus-manual-seal = { path = "../../../client/consensus/manual-seal" }
+sc-service = { default-features = false, path = "../../../client/service" }
+sc-executor = { path = "../../../client/executor" }
+sc-client-api = { path = "../../../client/api" }
+sc-network = { path = "../../../client/network" }
+sc-informant = { path = "../../../client/informant" }
+sc-consensus = { path = "../../../client/consensus/common" }
-sp-runtime = { path = "../../../primitives/runtime", version = "4.0.0-dev"}
-sp-keyring = { version = "4.0.0-dev", path = "../../../primitives/keyring" }
-sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" }
-sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" }
-sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" }
-sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" }
+sp-runtime = { path = "../../../primitives/runtime" }
+sp-consensus = { path = "../../../primitives/consensus/common" }
+sp-keyring = { path = "../../../primitives/keyring" }
+sp-timestamp = { path = "../../../primitives/timestamp" }
+sp-api = { path = "../../../primitives/api" }
+sp-inherents = { path = "../../../primitives/inherents" }
+sp-keystore = { path = "../../../primitives/keystore" }
log = "0.4.14"
diff --git a/bin/node/test-runner-example/src/lib.rs b/bin/node/test-runner-example/src/lib.rs
index 8a3f5560ec86e..513c8a7d8b5c8 100644
--- a/bin/node/test-runner-example/src/lib.rs
+++ b/bin/node/test-runner-example/src/lib.rs
@@ -15,23 +15,16 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
+#![deny(unused_extern_crates, missing_docs)]
//! Basic example of end to end runtime tests.
-use test_runner::{Node, ChainInfo, SignatureVerificationOverride, default_config};
+use test_runner::{ChainInfo, SignatureVerificationOverride};
use grandpa::GrandpaBlockImport;
-use sc_service::{TFullBackend, TFullClient, Configuration, TaskManager, new_full_parts, TaskExecutor};
-use std::sync::Arc;
-use sp_inherents::CreateInherentDataProviders;
+use sc_service::{TFullBackend, TFullClient};
use sc_consensus_babe::BabeBlockImport;
-use sp_keystore::SyncCryptoStorePtr;
-use sp_keyring::sr25519::Keyring::Alice;
-use sp_consensus_babe::AuthorityId;
-use sc_consensus_manual_seal::{
- ConsensusDataProvider, consensus::babe::{BabeConsensusDataProvider, SlotTimestampProvider},
-};
-use sp_runtime::{traits::IdentifyAccount, MultiSigner, generic::Era};
-use node_cli::chain_spec::development_config;
+use sc_consensus_manual_seal::consensus::babe::SlotTimestampProvider;
+use sp_runtime::generic::Era;
type BlockImport = BabeBlockImport>;
@@ -74,137 +67,39 @@ impl ChainInfo for NodeTemplateChainInfo {
pallet_transaction_payment::ChargeTransactionPayment::::from(0),
)
}
-
- fn config(task_executor: TaskExecutor) -> Configuration {
- default_config(task_executor, Box::new(development_config()))
- }
-
- fn create_client_parts(
- config: &Configuration,
- ) -> Result<
- (
- Arc>,
- Arc>,
- SyncCryptoStorePtr,
- TaskManager,
- Box>,
- Option<
- Box<
- dyn ConsensusDataProvider<
- Self::Block,
- Transaction = sp_api::TransactionFor<
- TFullClient,
- Self::Block,
- >,
- >,
- >,
- >,
- Self::SelectChain,
- Self::BlockImport,
- ),
- sc_service::Error,
- > {
- let (client, backend, keystore, task_manager) =
- new_full_parts::(config, None)?;
- let client = Arc::new(client);
-
- let select_chain = sc_consensus::LongestChain::new(backend.clone());
-
- let (grandpa_block_import, ..) =
- grandpa::block_import(
- client.clone(),
- &(client.clone() as Arc<_>),
- select_chain.clone(),
- None
- )?;
-
- let slot_duration = sc_consensus_babe::Config::get_or_compute(&*client)?;
- let (block_import, babe_link) = sc_consensus_babe::block_import(
- slot_duration.clone(),
- grandpa_block_import,
- client.clone(),
- )?;
-
- let consensus_data_provider = BabeConsensusDataProvider::new(
- client.clone(),
- keystore.sync_keystore(),
- babe_link.epoch_changes().clone(),
- vec![(AuthorityId::from(Alice.public()), 1000)],
- )
- .expect("failed to create ConsensusDataProvider");
-
- Ok((
- client.clone(),
- backend,
- keystore.sync_keystore(),
- task_manager,
- Box::new(move |_, _| {
- let client = client.clone();
- async move {
- let timestamp = SlotTimestampProvider::new(client.clone()).map_err(|err| format!("{:?}", err))?;
- let babe = sp_consensus_babe::inherents::InherentDataProvider::new(timestamp.slot().into());
- Ok((timestamp, babe))
- }
- }),
- Some(Box::new(consensus_data_provider)),
- select_chain,
- block_import,
- ))
- }
-
- fn dispatch_with_root(call: ::Call, node: &mut Node) {
- let alice = MultiSigner::from(Alice.public()).into_account();
- let call = pallet_sudo::Call::sudo(Box::new(call));
- node.submit_extrinsic(call, alice);
- node.seal_blocks(1);
- }
}
#[cfg(test)]
mod tests {
use super::*;
- use test_runner::NodeConfig;
- use log::LevelFilter;
+ use test_runner::{Node, client_parts, ConfigOrChainSpec, build_runtime, task_executor};
+ use sp_keyring::sr25519::Keyring::Alice;
+ use node_cli::chain_spec::development_config;
+ use sp_runtime::{traits::IdentifyAccount, MultiSigner};
#[test]
fn test_runner() {
- let config = NodeConfig {
- log_targets: vec![
- ("yamux", LevelFilter::Off),
- ("multistream_select", LevelFilter::Off),
- ("libp2p", LevelFilter::Off),
- ("jsonrpc_client_transports", LevelFilter::Off),
- ("sc_network", LevelFilter::Off),
- ("tokio_reactor", LevelFilter::Off),
- ("parity-db", LevelFilter::Off),
- ("sub-libp2p", LevelFilter::Off),
- ("sync", LevelFilter::Off),
- ("peerset", LevelFilter::Off),
- ("ws", LevelFilter::Off),
- ("sc_network", LevelFilter::Off),
- ("sc_service", LevelFilter::Off),
- ("sc_basic_authorship", LevelFilter::Off),
- ("telemetry-logger", LevelFilter::Off),
- ("sc_peerset", LevelFilter::Off),
- ("rpc", LevelFilter::Off),
- ("runtime", LevelFilter::Trace),
- ("babe", LevelFilter::Debug)
- ],
- };
- let mut node = Node::::new(config).unwrap();
- // seals blocks
- node.seal_blocks(1);
- // submit extrinsics
- let alice = MultiSigner::from(Alice.public()).into_account();
- node.submit_extrinsic(frame_system::Call::remark((b"hello world").to_vec()), alice);
-
- // look ma, I can read state.
- let _events = node.with_state(|| frame_system::Pallet::::events());
- // get access to the underlying client.
- let _client = node.client();
+ let mut tokio_runtime = build_runtime().unwrap();
+ let task_executor = task_executor(tokio_runtime.handle().clone());
+ let (rpc, task_manager, client, pool, command_sink, backend) =
+ client_parts::(
+ ConfigOrChainSpec::ChainSpec(Box::new(development_config()), task_executor)
+ ).unwrap();
+ let node = Node::::new(rpc, task_manager, client, pool, command_sink, backend);
+
+ tokio_runtime.block_on(async {
+ // seals blocks
+ node.seal_blocks(1).await;
+ // submit extrinsics
+ let alice = MultiSigner::from(Alice.public()).into_account();
+ let _hash = node.submit_extrinsic(frame_system::Call::remark((b"hello world").to_vec()), alice)
+ .await
+ .unwrap();
+
+ // look ma, I can read state.
+ let _events = node.with_state(|| frame_system::Pallet::::events());
+ // get access to the underlying client.
+ let _client = node.client();
+ })
}
}
diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs
index b512588a204c8..947cdd5a21e50 100644
--- a/client/cli/src/runner.rs
+++ b/client/cli/src/runner.rs
@@ -160,22 +160,7 @@ impl Runner {
/// 2020-06-03 16:14:21 ⛓ Native runtime: node-251 (substrate-node-1.tx1.au10)
/// ```
fn print_node_infos(&self) {
- info!("{}", C::impl_name());
- info!("✌️ version {}", C::impl_version());
- info!(
- "❤️ by {}, {}-{}",
- C::author(),
- C::copyright_start_year(),
- Local::today().year(),
- );
- info!("📋 Chain specification: {}", self.config.chain_spec.name());
- info!("🏷 Node name: {}", self.config.network.node_name);
- info!("👤 Role: {}", self.config.display_role());
- info!("💾 Database: {} at {}",
- self.config.database,
- self.config.database.path().map_or_else(|| "".to_owned(), |p| p.display().to_string())
- );
- info!("⛓ Native runtime: {}", C::native_runtime_version(&self.config.chain_spec));
+ print_node_infos::(self.config())
}
/// A helper function that runs a node with tokio and stops if the process receives the signal
@@ -229,3 +214,24 @@ impl Runner {
&mut self.config
}
}
+
+/// Log information about the node itself.
+pub fn print_node_infos(config: &Configuration) {
+ info!("{}", C::impl_name());
+ info!("✌️ version {}", C::impl_version());
+ info!(
+ "❤️ by {}, {}-{}",
+ C::author(),
+ C::copyright_start_year(),
+ Local::today().year(),
+ );
+ info!("📋 Chain specification: {}", config.chain_spec.name());
+ info!("🏷 Node name: {}", config.network.node_name);
+ info!("👤 Role: {}", config.display_role());
+ info!("💾 Database: {} at {}",
+ config.database,
+ config.database.path().map_or_else(|| "".to_owned(), |p| p.display().to_string())
+ );
+ info!("⛓ Native runtime: {}", C::native_runtime_version(&config.chain_spec));
+}
+
diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs
index 100fec912faa8..fb2d47b48fed1 100644
--- a/client/consensus/manual-seal/src/consensus/babe.rs
+++ b/client/consensus/manual-seal/src/consensus/babe.rs
@@ -32,7 +32,7 @@ use sp_keystore::SyncCryptoStorePtr;
use sp_api::{ProvideRuntimeApi, TransactionFor};
use sp_blockchain::{HeaderBackend, HeaderMetadata};
-use sp_consensus::BlockImportParams;
+use sp_consensus::{BlockImportParams, BlockOrigin, ForkChoiceStrategy};
use sp_consensus_slots::Slot;
use sp_consensus_babe::{
BabeApi, inherents::BabeInherentData, ConsensusLog, BABE_ENGINE_ID, AuthorityId,
@@ -41,9 +41,10 @@ use sp_consensus_babe::{
use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier};
use sp_runtime::{
traits::{DigestItemFor, DigestFor, Block as BlockT, Zero, Header},
- generic::{Digest, BlockId},
+ generic::{Digest, BlockId}, Justifications,
};
use sp_timestamp::{InherentType, INHERENT_IDENTIFIER, TimestampInherentData};
+use sp_consensus::import_queue::{Verifier, CacheKeyId};
/// Provides BABE-compatible predigests and BlockImportParams.
/// Intended for use with BABE runtimes.
@@ -64,6 +65,74 @@ pub struct BabeConsensusDataProvider {
authorities: Vec<(AuthorityId, BabeAuthorityWeight)>,
}
+/// Verifier to be used for babe chains
+pub struct BabeVerifier {
+ /// Shared epoch changes
+ epoch_changes: SharedEpochChanges,
+
+ /// Shared reference to the client.
+ client: Arc,
+}
+
+impl BabeVerifier {
+ /// create a nrew verifier
+ pub fn new(epoch_changes: SharedEpochChanges, client: Arc) -> BabeVerifier {
+ BabeVerifier {
+ epoch_changes,
+ client,
+ }
+ }
+}
+
+/// The verifier for the manual seal engine; instantly finalizes.
+#[async_trait::async_trait]
+impl Verifier for BabeVerifier
+ where
+ B: BlockT,
+ C: HeaderBackend + HeaderMetadata
+{
+ async fn verify(
+ &mut self,
+ origin: BlockOrigin,
+ header: B::Header,
+ justifications: Option,
+ body: Option>,
+ ) -> Result<(BlockImportParams, Option)>>), String> {
+ let mut import_params = BlockImportParams::new(origin, header.clone());
+ import_params.justifications = justifications;
+ import_params.body = body;
+ import_params.finalized = false;
+ import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain);
+
+ let pre_digest = find_pre_digest::(&header)?;
+
+ let parent_hash = header.parent_hash();
+ let parent = self.client.header(BlockId::Hash(*parent_hash))
+ .ok()
+ .flatten()
+ .ok_or_else(|| format!("header for block {} not found", parent_hash))?;
+ let epoch_changes = self.epoch_changes.shared_data();
+ let epoch_descriptor = epoch_changes
+ .epoch_descriptor_for_child_of(
+ descendent_query(&*self.client),
+ &parent.hash(),
+ parent.number().clone(),
+ pre_digest.slot(),
+ )
+ .map_err(|e| format!("failed to fetch epoch_descriptor: {}", e))?
+ .ok_or_else(|| format!("{:?}", sp_consensus::Error::InvalidAuthoritiesSet))?;
+ // drop the lock
+ drop(epoch_changes);
+
+ import_params.intermediates.insert(
+ Cow::from(INTERMEDIATE_KEY),
+ Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>,
+ );
+
+ Ok((import_params, None))
+ }
+}
+
impl BabeConsensusDataProvider
where
B: BlockT,
@@ -166,27 +235,32 @@ impl ConsensusDataProvider for BabeConsensusDataProvider
.map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))?
.ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?;
- let epoch_mut = match epoch_descriptor {
+ match epoch_descriptor {
ViableEpochDescriptor::Signaled(identifier, _epoch_header) => {
- epoch_changes.epoch_mut(&identifier)
- .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?
+ let epoch_mut = epoch_changes.epoch_mut(&identifier)
+ .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?;
+
+ // mutate the current epoch
+ epoch_mut.authorities = self.authorities.clone();
+
+ let next_epoch = ConsensusLog::NextEpochData(NextEpochDescriptor {
+ authorities: self.authorities.clone(),
+ // copy the old randomness
+ randomness: epoch_mut.randomness.clone(),
+ });
+
+ vec![
+ DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode()),
+ DigestItemFor::::Consensus(BABE_ENGINE_ID, next_epoch.encode())
+ ]
},
- _ => unreachable!("we couldn't claim a slot, so this isn't the genesis epoch; qed")
- };
-
- // mutate the current epoch
- epoch_mut.authorities = self.authorities.clone();
-
- let next_epoch = ConsensusLog::NextEpochData(NextEpochDescriptor {
- authorities: self.authorities.clone(),
- // copy the old randomness
- randomness: epoch_mut.randomness.clone(),
- });
-
- vec![
- DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode()),
- DigestItemFor::::Consensus(BABE_ENGINE_ID, next_epoch.encode())
- ]
+ ViableEpochDescriptor::UnimportedGenesis(_) => {
+ // since this is the genesis, secondary predigest works for now.
+ vec![
+ DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode()),
+ ]
+ }
+ }
};
Ok(Digest { logs })
diff --git a/test-utils/test-runner/Cargo.toml b/test-utils/test-runner/Cargo.toml
index a4c2bf84ab4a4..0eb02d941712f 100644
--- a/test-utils/test-runner/Cargo.toml
+++ b/test-utils/test-runner/Cargo.toml
@@ -7,46 +7,48 @@ publish = false
[dependencies]
# client deps
-sc-executor = { version = "0.10.0-dev", path = "../../client/executor" }
-sc-service = { version = "0.10.0-dev", path = "../../client/service" }
-sc-informant = { version = "0.10.0-dev", path = "../../client/informant" }
-sc-network = { version = "0.10.0-dev", path = "../../client/network" }
-sc-cli = { version = "0.10.0-dev", path = "../../client/cli" }
-sc-basic-authorship = { version = "0.10.0-dev", path = "../../client/basic-authorship" }
-sc-rpc = { version = "4.0.0-dev", path = "../../client/rpc" }
-sc-transaction-pool = { version = "4.0.0-dev", path = "../../client/transaction-pool" }
-sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../client/transaction-pool/api" }
-sc-client-api = { version = "4.0.0-dev", path = "../../client/api" }
-sc-rpc-server = { version = "4.0.0-dev", path = "../../client/rpc-servers" }
-manual-seal = { package = "sc-consensus-manual-seal", version = "0.10.0-dev", path = "../../client/consensus/manual-seal" }
+sc-executor = { path = "../../client/executor" }
+sc-service = { path = "../../client/service" }
+sc-informant = { path = "../../client/informant" }
+sc-network = { path = "../../client/network" }
+sc-cli = { path = "../../client/cli" }
+sc-basic-authorship = { path = "../../client/basic-authorship" }
+sc-rpc = { path = "../../client/rpc" }
+sc-transaction-pool = { path = "../../client/transaction-pool" }
+grandpa = { package = "sc-finality-grandpa", path = "../../client/finality-grandpa" }
+sp-consensus-babe = { path = "../../primitives/consensus/babe" }
+sc-consensus-babe = { path = "../../client/consensus/babe" }
+sc-consensus = { path = "../../client/consensus/common" }
+sc-transaction-pool-api = { path = "../../client/transaction-pool/api" }
+sc-client-api = { path = "../../client/api" }
+sc-rpc-server = { path = "../../client/rpc-servers" }
+manual-seal = { package = "sc-consensus-manual-seal", path = "../../client/consensus/manual-seal" }
# primitive deps
-sp-core = { version = "4.0.0-dev", path = "../../primitives/core" }
-sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" }
-sp-block-builder = { version = "4.0.0-dev", path = "../../primitives/block-builder" }
-sp-api = { version = "4.0.0-dev", path = "../../primitives/api" }
-sp-io = { version = "4.0.0-dev", path = "../../primitives/io" }
-sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" }
-sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" }
-sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" }
-sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" }
-sp-session = { version = "4.0.0-dev", path = "../../primitives/session" }
-sp-offchain = { version = "4.0.0-dev", path = "../../primitives/offchain" }
-sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" }
-sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" }
+sp-core = { path = "../../primitives/core" }
+sp-blockchain = { path = "../../primitives/blockchain" }
+sp-block-builder = { path = "../../primitives/block-builder" }
+sp-api = { path = "../../primitives/api" }
+sp-transaction-pool = { path = "../../primitives/transaction-pool" }
+sp-consensus = { path = "../../primitives/consensus/common" }
+sp-keystore = { path = "../../primitives/keystore" }
+sp-runtime = { path = "../../primitives/runtime" }
+sp-session = { path = "../../primitives/session" }
+sp-offchain = { path = "../../primitives/offchain" }
+sp-inherents = { path = "../../primitives/inherents" }
+sp-keyring = { path = "../../primitives/keyring" }
-sp-externalities = { version = "0.10.0-dev", path = "../../primitives/externalities" }
-sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" }
-sp-wasm-interface = { version = "4.0.0-dev", path = "../../primitives/wasm-interface" }
-sp-runtime-interface = { version = "4.0.0-dev", path = "../../primitives/runtime-interface" }
+sp-externalities = { path = "../../primitives/externalities" }
+sp-state-machine = { path = "../../primitives/state-machine" }
+sp-wasm-interface = { path = "../../primitives/wasm-interface" }
+sp-runtime-interface = { path = "../../primitives/runtime-interface" }
# pallets
-frame-system = { version = "4.0.0-dev", path = "../../frame/system" }
+frame-system = { path = "../../frame/system" }
-env_logger = "0.7.1"
log = "0.4.8"
futures = { package = "futures", version = "0.3", features = ["compat"] }
-tokio = { version = "0.2", features = ["full"] }
-
+tokio = { version = "0.2", features = ["signal"] }
# Calling RPC
jsonrpc-core = "15.1"
+num-traits = "0.2.14"
diff --git a/test-utils/test-runner/src/client.rs b/test-utils/test-runner/src/client.rs
new file mode 100644
index 0000000000000..4c562fbc66eda
--- /dev/null
+++ b/test-utils/test-runner/src/client.rs
@@ -0,0 +1,219 @@
+// This file is part of Substrate.
+
+// Copyright (C) 2021 Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+//! Client parts
+use sp_transaction_pool::runtime_api::TaggedTransactionQueue;
+use sp_consensus_babe::BabeApi;
+use crate::{ChainInfo, default_config};
+use manual_seal::consensus::babe::{BabeConsensusDataProvider, SlotTimestampProvider};
+use sp_keyring::sr25519::Keyring::Alice;
+use std::str::FromStr;
+use sp_runtime::traits::Header;
+use futures::channel::mpsc;
+use jsonrpc_core::MetaIoHandler;
+use manual_seal::{run_manual_seal, EngineCommand, ManualSealParams, import_queue, rpc::{ManualSeal, ManualSealApi}};
+use sc_client_api::backend::Backend;
+use sc_service::{
+ build_network, spawn_tasks, BuildNetworkParams, SpawnTasksParams, TFullBackend,
+ TFullClient, TaskManager, new_full_parts, Configuration, ChainSpec, TaskExecutor,
+};
+use sc_transaction_pool::BasicPool;
+use sc_transaction_pool_api::TransactionPool;
+use sp_api::{ApiExt, ConstructRuntimeApi, Core, Metadata};
+use sp_block_builder::BlockBuilder;
+use sp_runtime::traits::Block as BlockT;
+use sp_session::SessionKeys;
+use sp_offchain::OffchainWorkerApi;
+use std::sync::Arc;
+
+type ClientParts = (
+ Arc>,
+ TaskManager,
+ Arc::Block, ::RuntimeApi, ::Executor>>,
+ Arc::Block,
+ Hash = <::Block as BlockT>::Hash,
+ Error = sc_transaction_pool::error::Error,
+ InPoolTransaction = sc_transaction_pool::Transaction<
+ <::Block as BlockT>::Hash,
+ <::Block as BlockT>::Extrinsic,
+ >,
+ >>,
+ mpsc::Sender::Block as BlockT>::Hash>>,
+ Arc::Block>>,
+);
+
+/// Provide the config or chain spec for a given chain
+pub enum ConfigOrChainSpec {
+ /// Configuration object
+ Config(Configuration),
+ /// Chain spec object
+ ChainSpec(Box, TaskExecutor)
+}
+/// Creates all the client parts you need for [`Node`]
+pub fn client_parts(config_or_chain_spec: ConfigOrChainSpec) -> Result, sc_service::Error>
+ where
+ T: ChainInfo + 'static,
+ >>::RuntimeApi:
+ Core + Metadata + OffchainWorkerApi + SessionKeys
+ + TaggedTransactionQueue + BlockBuilder + BabeApi
+ + ApiExt as Backend>::State>,
+ ::Call: From>,
+ <::Block as BlockT>::Hash: FromStr,
+ <<::Block as BlockT>::Header as Header>::Number: num_traits::cast::AsPrimitive,
+{
+ use sp_consensus_babe::AuthorityId;
+ let config = match config_or_chain_spec {
+ ConfigOrChainSpec::Config(config) => config,
+ ConfigOrChainSpec::ChainSpec(chain_spec, task_executor) => {
+ default_config(task_executor, chain_spec)
+ },
+ };
+
+ let (client, backend, keystore, mut task_manager) =
+ new_full_parts::(&config, None)?;
+ let client = Arc::new(client);
+
+ let select_chain = sc_consensus::LongestChain::new(backend.clone());
+
+ let (grandpa_block_import, ..) =
+ grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), None)?;
+
+ let slot_duration = sc_consensus_babe::Config::get_or_compute(&*client)?;
+ let (block_import, babe_link) = sc_consensus_babe::block_import(
+ slot_duration.clone(),
+ grandpa_block_import,
+ client.clone(),
+ )?;
+
+ let consensus_data_provider = BabeConsensusDataProvider::new(
+ client.clone(),
+ keystore.sync_keystore(),
+ babe_link.epoch_changes().clone(),
+ vec![(AuthorityId::from(Alice.public()), 1000)],
+ )
+ .expect("failed to create ConsensusDataProvider");
+
+ let import_queue =
+ import_queue(Box::new(block_import.clone()), &task_manager.spawn_essential_handle(), None);
+
+ let transaction_pool = BasicPool::new_full(
+ config.transaction_pool.clone(),
+ true.into(),
+ config.prometheus_registry(),
+ task_manager.spawn_essential_handle(),
+ client.clone(),
+ );
+
+ let (network, system_rpc_tx, network_starter) = {
+ let params = BuildNetworkParams {
+ config: &config,
+ client: client.clone(),
+ transaction_pool: transaction_pool.clone(),
+ spawn_handle: task_manager.spawn_handle(),
+ import_queue,
+ on_demand: None,
+ block_announce_validator_builder: None,
+ };
+ build_network(params)?
+ };
+
+ // offchain workers
+ sc_service::build_offchain_workers(
+ &config,
+ task_manager.spawn_handle(),
+ client.clone(),
+ network.clone(),
+ );
+
+ // Proposer object for block authorship.
+ let env = sc_basic_authorship::ProposerFactory::new(
+ task_manager.spawn_handle(),
+ client.clone(),
+ transaction_pool.clone(),
+ config.prometheus_registry(),
+ None
+ );
+
+ // Channel for the rpc handler to communicate with the authorship task.
+ let (command_sink, commands_stream) = mpsc::channel(10);
+
+ let rpc_sink = command_sink.clone();
+
+ let rpc_handlers = {
+ let params = SpawnTasksParams {
+ config,
+ client: client.clone(),
+ backend: backend.clone(),
+ task_manager: &mut task_manager,
+ keystore: keystore.sync_keystore(),
+ on_demand: None,
+ transaction_pool: transaction_pool.clone(),
+ rpc_extensions_builder: Box::new(move |_, _| {
+ let mut io = jsonrpc_core::IoHandler::default();
+ io.extend_with(
+ ManualSealApi::to_delegate(ManualSeal::new(rpc_sink.clone()))
+ );
+ io
+ }),
+ remote_blockchain: None,
+ network,
+ system_rpc_tx,
+ telemetry: None
+ };
+ spawn_tasks(params)?
+ };
+
+ let cloned_client = client.clone();
+ let create_inherent_data_providers = Box::new(move |_, _| {
+ let client = cloned_client.clone();
+ async move {
+ let timestamp = SlotTimestampProvider::new(client.clone()).map_err(|err| format!("{:?}", err))?;
+ let babe = sp_consensus_babe::inherents::InherentDataProvider::new(timestamp.slot().into());
+ Ok((timestamp, babe))
+ }
+ });
+
+ // Background authorship future.
+ let authorship_future = run_manual_seal(ManualSealParams {
+ block_import,
+ env,
+ client: client.clone(),
+ pool: transaction_pool.clone(),
+ commands_stream,
+ select_chain,
+ consensus_data_provider: Some(Box::new(consensus_data_provider)),
+ create_inherent_data_providers,
+ });
+
+ // spawn the authorship task as an essential task.
+ task_manager
+ .spawn_essential_handle()
+ .spawn("manual-seal", authorship_future);
+
+ network_starter.start_network();
+ let rpc_handler = rpc_handlers.io_handler();
+
+ Ok((
+ rpc_handler,
+ task_manager,
+ client,
+ transaction_pool,
+ command_sink,
+ backend,
+ ))
+}
\ No newline at end of file
diff --git a/test-utils/test-runner/src/host_functions.rs b/test-utils/test-runner/src/host_functions.rs
index ca8790683e6c4..534d4a23fdccb 100644
--- a/test-utils/test-runner/src/host_functions.rs
+++ b/test-utils/test-runner/src/host_functions.rs
@@ -16,6 +16,20 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
+/// Use this to override host functions.
+/// eg
+/// ```rust
+/// use test_runner::override_host_functions;
+/// pub struct SignatureVerificationOverride;
+///
+/// impl sp_wasm_interface::HostFunctions for SignatureVerificationOverride {
+/// fn host_functions() -> Vec<&'static dyn sp_wasm_interface::Function> {
+/// override_host_functions!(
+/// "ext_crypto_ecdsa_verify_version_1", EcdsaVerify,
+/// )
+/// }
+/// }
+/// ```
#[macro_export]
macro_rules! override_host_functions {
($($fn_name:expr, $name:ident,)*) => {{
diff --git a/test-utils/test-runner/src/lib.rs b/test-utils/test-runner/src/lib.rs
index 000d3efc3e96f..1976d132b7c50 100644
--- a/test-utils/test-runner/src/lib.rs
+++ b/test-utils/test-runner/src/lib.rs
@@ -15,6 +15,7 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
+#![deny(missing_docs, unused_extern_crates)]
//! Test runner
//! # Substrate Test Runner
@@ -226,16 +227,14 @@
//! }
//! ```
-use manual_seal::consensus::ConsensusDataProvider;
use sc_executor::NativeExecutionDispatch;
-use sc_service::{Configuration, TFullBackend, TFullClient, TaskManager, TaskExecutor};
+use sc_service::TFullClient;
use sp_api::{ConstructRuntimeApi, TransactionFor};
use sp_consensus::{BlockImport, SelectChain};
-use sp_inherents::{CreateInherentDataProviders, InherentDataProvider};
-use sp_keystore::SyncCryptoStorePtr;
+use sp_inherents::InherentDataProvider;
use sp_runtime::traits::{Block as BlockT, SignedExtension};
-use std::sync::Arc;
+mod client;
mod node;
mod utils;
mod host_functions;
@@ -243,6 +242,7 @@ mod host_functions;
pub use host_functions::*;
pub use node::*;
pub use utils::*;
+pub use client::*;
/// Wrapper trait for concrete type required by this testing framework.
pub trait ChainInfo: Sized {
@@ -282,44 +282,4 @@ pub trait ChainInfo: Sized {
/// Signed extras, this function is caled in an externalities provided environment.
fn signed_extras(from: ::AccountId) -> Self::SignedExtras;
-
- /// config factory
- fn config(task_executor: TaskExecutor) -> Configuration;
-
- /// Attempt to create client parts, including block import,
- /// select chain strategy and consensus data provider.
- fn create_client_parts(
- config: &Configuration,
- ) -> Result<
- (
- Arc>,
- Arc>,
- SyncCryptoStorePtr,
- TaskManager,
- Box<
- dyn CreateInherentDataProviders<
- Self::Block,
- (),
- InherentDataProviders = Self::InherentDataProviders
- >
- >,
- Option<
- Box<
- dyn ConsensusDataProvider<
- Self::Block,
- Transaction = TransactionFor<
- TFullClient,
- Self::Block,
- >,
- >,
- >,
- >,
- Self::SelectChain,
- Self::BlockImport,
- ),
- sc_service::Error,
- >;
-
- /// Given a call and a handle to the node, execute the call with root privileges.
- fn dispatch_with_root(call: ::Call, node: &mut Node);
}
diff --git a/test-utils/test-runner/src/node.rs b/test-utils/test-runner/src/node.rs
index 92fc3dbcda475..b1e5854798eec 100644
--- a/test-utils/test-runner/src/node.rs
+++ b/test-utils/test-runner/src/node.rs
@@ -20,31 +20,20 @@ use std::sync::Arc;
use futures::{FutureExt, SinkExt, channel::{mpsc, oneshot}};
use jsonrpc_core::MetaIoHandler;
-use manual_seal::{run_manual_seal, EngineCommand, ManualSealParams};
-use sc_cli::build_runtime;
-use sc_client_api::{
- backend::{self, Backend}, CallExecutor, ExecutorProvider,
-};
-use sc_service::{
- build_network, spawn_tasks, BuildNetworkParams, SpawnTasksParams,
- TFullBackend, TFullCallExecutor, TFullClient, TaskManager, TaskType,
-};
-use sc_transaction_pool::BasicPool;
-use sp_api::{ApiExt, ConstructRuntimeApi, Core, Metadata, OverlayedChanges, StorageTransactionCache};
-use sp_block_builder::BlockBuilder;
+use manual_seal::EngineCommand;
+use sc_client_api::{backend::{self, Backend}, CallExecutor, ExecutorProvider};
+use sc_service::{TFullBackend, TFullCallExecutor, TFullClient, TaskManager};
+use sp_api::{OverlayedChanges, StorageTransactionCache};
use sp_blockchain::HeaderBackend;
use sp_core::ExecutionContext;
-use sp_offchain::OffchainWorkerApi;
-use sp_runtime::traits::{Block as BlockT, Extrinsic};
-use sp_runtime::{generic::BlockId, transaction_validity::TransactionSource, MultiSignature, MultiAddress};
-use sp_runtime::{generic::UncheckedExtrinsic, traits::NumberFor};
-use sp_session::SessionKeys;
-use sp_state_machine::Ext;
-use sp_transaction_pool::runtime_api::TaggedTransactionQueue;
+use sp_runtime::{
+ generic::{BlockId, UncheckedExtrinsic},
+ traits::{Block as BlockT, Header, Extrinsic, NumberFor},
+ transaction_validity::TransactionSource, MultiSignature, MultiAddress
+};
+use crate::ChainInfo;
use sc_transaction_pool_api::TransactionPool;
-
-use crate::{ChainInfo, utils::logger};
-use log::LevelFilter;
+use sp_state_machine::Ext;
/// This holds a reference to a running node on another thread,
/// the node process is dropped when this struct is dropped
@@ -52,26 +41,20 @@ use log::LevelFilter;
pub struct Node {
/// rpc handler for communicating with the node over rpc.
rpc_handler: Arc>,
- /// Stream of log lines
- log_stream: mpsc::UnboundedReceiver,
- /// node tokio runtime
- _runtime: tokio::runtime::Runtime,
/// handle to the running node.
- _task_manager: Option,
+ task_manager: Option,
/// client instance
client: Arc>,
/// transaction pool
- pool: Arc<
- dyn TransactionPool<
- Block = T::Block,
- Hash = ::Hash,
- Error = sc_transaction_pool::error::Error,
- InPoolTransaction = sc_transaction_pool::Transaction<
- ::Hash,
- ::Extrinsic,
- >,
+ pool: Arc::Block,
+ Hash = <::Block as BlockT>::Hash,
+ Error = sc_transaction_pool::error::Error,
+ InPoolTransaction = sc_transaction_pool::Transaction<
+ <::Block as BlockT>::Hash,
+ <::Block as BlockT>::Extrinsic,
>,
- >,
+ >>,
/// channel to communicate with manual seal on.
manual_seal_command_sink: mpsc::Sender::Hash>>,
/// backend type.
@@ -80,149 +63,48 @@ pub struct Node {
initial_block_number: NumberFor
}
-/// Configuration options for the node.
-pub struct NodeConfig {
- /// A set of log targets you'd like to enable/disbale
- pub log_targets: Vec<(&'static str, LevelFilter)>,
-}
-
type EventRecord = frame_system::EventRecord<::Event, ::Hash>;
-impl Node {
- /// Starts a node with the manual-seal authorship.
- pub fn new(node_config: NodeConfig) -> Result
+impl Node
where
- >>::RuntimeApi:
- Core
- + Metadata
- + OffchainWorkerApi
- + SessionKeys
- + TaggedTransactionQueue
- + BlockBuilder
- + ApiExt as Backend>::State>,
- {
- let NodeConfig { log_targets, } = node_config;
- let tokio_runtime = build_runtime().unwrap();
- let runtime_handle = tokio_runtime.handle().clone();
- let task_executor = move |fut, task_type| match task_type {
- TaskType::Async => runtime_handle.spawn(fut).map(drop),
- TaskType::Blocking => runtime_handle
- .spawn_blocking(move || futures::executor::block_on(fut))
- .map(drop),
- };
- // unbounded logs, should be fine, test is shortlived.
- let (log_sink, log_stream) = mpsc::unbounded();
-
- logger(log_targets, tokio_runtime.handle().clone(), log_sink);
- let config = T::config(task_executor.into());
-
- let (
- client,
- backend,
- keystore,
- mut task_manager,
- create_inherent_data_providers,
- consensus_data_provider,
- select_chain,
- block_import,
- ) = T::create_client_parts(&config)?;
-
- let import_queue =
- manual_seal::import_queue(Box::new(block_import.clone()), &task_manager.spawn_essential_handle(), None);
-
- let transaction_pool = BasicPool::new_full(
- config.transaction_pool.clone(),
- true.into(),
- config.prometheus_registry(),
- task_manager.spawn_essential_handle(),
- client.clone(),
- );
-
- let (network, system_rpc_tx, network_starter) = {
- let params = BuildNetworkParams {
- config: &config,
- client: client.clone(),
- transaction_pool: transaction_pool.clone(),
- spawn_handle: task_manager.spawn_handle(),
- import_queue,
- on_demand: None,
- block_announce_validator_builder: None,
- };
- build_network(params)?
- };
-
- sc_service::build_offchain_workers(
- &config,
- task_manager.spawn_handle(),
- client.clone(),
- network.clone(),
- );
-
- // Proposer object for block authorship.
- let env = sc_basic_authorship::ProposerFactory::new(
- task_manager.spawn_handle(),
- client.clone(),
- transaction_pool.clone(),
- config.prometheus_registry(),
- None
- );
-
- // Channel for the rpc handler to communicate with the authorship task.
- let (command_sink, commands_stream) = mpsc::channel(10);
-
- let rpc_handlers = {
- let params = SpawnTasksParams {
- config,
- client: client.clone(),
- backend: backend.clone(),
- task_manager: &mut task_manager,
- keystore,
- on_demand: None,
- transaction_pool: transaction_pool.clone(),
- rpc_extensions_builder: Box::new(move |_, _| jsonrpc_core::IoHandler::default()),
- remote_blockchain: None,
- network,
- system_rpc_tx,
- telemetry: None
- };
- spawn_tasks(params)?
- };
-
- // Background authorship future.
- let authorship_future = run_manual_seal(ManualSealParams {
- block_import,
- env,
- client: client.clone(),
- pool: transaction_pool.clone(),
- commands_stream,
- select_chain,
- consensus_data_provider,
- create_inherent_data_providers,
- });
-
- // spawn the authorship task as an essential task.
- task_manager
- .spawn_essential_handle()
- .spawn("manual-seal", authorship_future);
-
- network_starter.start_network();
- let rpc_handler = rpc_handlers.io_handler();
- let initial_number = client.info().best_number;
-
- Ok(Self {
+ T: ChainInfo,
+ <::Header as Header>::Number: From,
+{
+ /// Creates a new node.
+ pub fn new(
+ rpc_handler: Arc>,
+ task_manager: TaskManager,
+ client: Arc>,
+ pool: Arc::Block,
+ Hash = <::Block as BlockT>::Hash,
+ Error = sc_transaction_pool::error::Error,
+ InPoolTransaction = sc_transaction_pool::Transaction<
+ <::Block as BlockT>::Hash,
+ <::Block as BlockT>::Extrinsic,
+ >,
+ >>,
+ command_sink: mpsc::Sender::Hash>>,
+ backend: Arc>,
+ ) -> Self {
+ Self {
rpc_handler,
- _task_manager: Some(task_manager),
- _runtime: tokio_runtime,
- client,
- pool: transaction_pool,
+ task_manager: Some(task_manager),
+ client: client.clone(),
+ pool,
backend,
- log_stream,
manual_seal_command_sink: command_sink,
- initial_block_number: initial_number,
- })
+ initial_block_number: client.info().best_number,
+ }
}
- /// Returns a reference to the rpc handlers.
+ /// Returns a reference to the rpc handlers, use this to send rpc requests.
+ /// eg
+ /// ```ignore
+ /// let request = r#"{"jsonrpc":"2.0","method":"engine_createBlock","params": [true, true],"id":1}"#;
+ /// let response = node.rpc_handler()
+ /// .handle_request_sync(request, Default::default());
+ /// ```
pub fn rpc_handler(&self) -> Arc> {
self.rpc_handler.clone()
}
@@ -262,11 +144,11 @@ impl Node {
}
/// submit some extrinsic to the node, providing the sending account.
- pub fn submit_extrinsic(
- &mut self,
+ pub async fn submit_extrinsic(
+ &self,
call: impl Into<::Call>,
from: ::AccountId,
- ) -> ::Hash
+ ) -> Result<::Hash, sc_transaction_pool::error::Error>
where
::Extrinsic: From<
UncheckedExtrinsic<
@@ -294,11 +176,7 @@ impl Node {
.expect("UncheckedExtrinsic::new() always returns Some");
let at = self.client.info().best_hash;
- self._runtime
- .block_on(
- self.pool.submit_one(&BlockId::Hash(at), TransactionSource::Local, ext.into()),
- )
- .unwrap()
+ self.pool.submit_one(&BlockId::Hash(at), TransactionSource::Local, ext.into()).await
}
/// Get the events of the most recently produced block
@@ -306,24 +184,9 @@ impl Node {
self.with_state(|| frame_system::Pallet::::events())
}
- /// Checks the node logs for a specific entry.
- pub fn assert_log_line(&mut self, content: &str) {
- futures::executor::block_on(async {
- use futures::StreamExt;
-
- while let Some(log_line) = self.log_stream.next().await {
- if log_line.contains(content) {
- return;
- }
- }
-
- panic!("Could not find {} in logs content", content);
- });
- }
-
/// Instructs manual seal to seal new, possibly empty blocks.
- pub fn seal_blocks(&mut self, num: usize) {
- let (tokio, sink) = (&mut self._runtime, &mut self.manual_seal_command_sink);
+ pub async fn seal_blocks(&self, num: usize) {
+ let mut sink = self.manual_seal_command_sink.clone();
for count in 0..num {
let (sender, future_block) = oneshot::channel();
@@ -334,15 +197,13 @@ impl Node {
sender: Some(sender),
});
- tokio.block_on(async {
- const ERROR: &'static str = "manual-seal authorship task is shutting down";
- future.await.expect(ERROR);
+ const ERROR: &'static str = "manual-seal authorship task is shutting down";
+ future.await.expect(ERROR);
- match future_block.await.expect(ERROR) {
- Ok(block) => log::info!("sealed {} (hash: {}) of {} blocks", count + 1, block.hash, num),
- Err(err) => log::error!("failed to seal block {} of {}, error: {:?}", count + 1, num, err),
- }
- });
+ match future_block.await.expect(ERROR) {
+ Ok(block) => log::info!("sealed {} (hash: {}) of {} blocks", count + 1, block.hash, num),
+ Err(err) => log::error!("failed to seal block {} of {}, error: {:?}", count + 1, num, err),
+ }
}
}
@@ -351,32 +212,24 @@ impl Node {
self.backend.revert(count, true).expect("Failed to revert blocks: ");
}
- /// Revert all blocks added since creation of the node.
- pub fn clean(&self) {
- // if a db path was specified, revert all blocks we've added
- if let Some(_) = std::env::var("DB_BASE_PATH").ok() {
- let diff = self.client.info().best_number - self.initial_block_number;
- self.revert_blocks(diff);
+ /// so you've decided to run the test runner as a binary, use this to shutdown gracefully.
+ pub async fn until_shutdown(mut self) {
+ let manager = self.task_manager.take();
+ if let Some(mut task_manager) = manager {
+ let task = task_manager.future().fuse();
+ let signal = tokio::signal::ctrl_c();
+ futures::pin_mut!(signal);
+ futures::future::select(task, signal).await;
+ // we don't really care whichever comes first.
+ task_manager.clean_shutdown().await
}
}
-
- /// Performs a runtime upgrade given a wasm blob.
- pub fn upgrade_runtime(&mut self, wasm: Vec)
- where
- ::Call: From>
- {
- let call = frame_system::Call::set_code(wasm);
- T::dispatch_with_root(call.into(), self);
- }
}
impl Drop for Node {
fn drop(&mut self) {
- self.clean();
-
- if let Some(mut task_manager) = self._task_manager.take() {
- // if this isn't called the node will live forever
- task_manager.terminate()
- }
+ // Revert all blocks added since creation of the node.
+ let diff = self.client.info().best_number - self.initial_block_number;
+ self.revert_blocks(diff);
}
}
diff --git a/test-utils/test-runner/src/utils.rs b/test-utils/test-runner/src/utils.rs
index fae527ededf97..9e722bcc510aa 100644
--- a/test-utils/test-runner/src/utils.rs
+++ b/test-utils/test-runner/src/utils.rs
@@ -16,17 +16,20 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
-use futures::{Sink, SinkExt};
-use std::fmt;
-use std::io::Write;
-use log::LevelFilter;
-use sc_service::{BasePath, ChainSpec, Configuration, TaskExecutor, DatabaseConfig, KeepBlocks, TransactionStorageMode};
+use sc_service::{
+ BasePath, ChainSpec, Configuration, TaskExecutor,
+ DatabaseConfig, KeepBlocks, TransactionStorageMode, TaskType,
+};
use sp_keyring::sr25519::Keyring::Alice;
use sc_network::{multiaddr, config::{NetworkConfiguration, TransportConfig, Role}};
use sc_informant::OutputFormat;
use sc_service::config::KeystoreConfig;
use sc_executor::WasmExecutionMethod;
use sc_client_api::execution_extensions::ExecutionStrategies;
+use tokio::runtime::Handle;
+use futures::FutureExt;
+
+pub use sc_cli::build_runtime;
/// Base db path gotten from env
pub fn base_path() -> BasePath {
@@ -37,35 +40,6 @@ pub fn base_path() -> BasePath {
}
}
-/// Builds the global logger.
-pub fn logger(
- log_targets: Vec<(&'static str, LevelFilter)>,
- executor: tokio::runtime::Handle,
- log_sink: S,
-)
-where
- S: Sink + Clone + Unpin + Send + Sync + 'static,
- S::Error: Send + Sync + fmt::Debug,
-{
- let mut builder = env_logger::builder();
- builder.format(move |buf: &mut env_logger::fmt::Formatter, record: &log::Record| {
- let entry = format!("{} {} {}", record.level(), record.target(), record.args());
- let res = writeln!(buf, "{}", entry);
-
- let mut log_sink_clone = log_sink.clone();
- let _ = executor.spawn(async move {
- log_sink_clone.send(entry).await.expect("log_stream is dropped");
- });
- res
- });
- builder.write_style(env_logger::WriteStyle::Always);
-
- for (module, level) in log_targets {
- builder.filter_module(module, level);
- }
- let _ = builder.is_test(true).try_init();
-}
-
/// Produces a default configuration object, suitable for use with most set ups.
pub fn default_config(task_executor: TaskExecutor, mut chain_spec: Box) -> Configuration {
let base_path = base_path();
@@ -150,3 +124,13 @@ pub fn default_config(task_executor: TaskExecutor, mut chain_spec: Box TaskExecutor {
+ let task_executor = move |fut, task_type| match task_type {
+ TaskType::Async => handle.spawn(fut).map(drop),
+ TaskType::Blocking => handle.spawn_blocking(move || futures::executor::block_on(fut)).map(drop),
+ };
+
+ task_executor.into()
+}