Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Polkadot sdk communication fix #93

Closed
wants to merge 10 commits into from
9,456 changes: 6,635 additions & 2,821 deletions Cargo.lock

Large diffs are not rendered by default.

229 changes: 114 additions & 115 deletions Cargo.toml

Large diffs are not rendered by default.

3 changes: 0 additions & 3 deletions node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ sp-runtime = { workspace = true }
sp-timestamp = { workspace = true }
substrate-frame-rpc-system = { workspace = true }
substrate-prometheus-endpoint = { workspace = true }
try-runtime-cli = { workspace = true }

# Polkadot
polkadot-cli = { workspace = true }
Expand Down Expand Up @@ -97,11 +96,9 @@ substrate-build-script-utils = { workspace = true }
[features]
default = ["fc-rpc/rpc-binary-search-estimate"]
runtime-benchmarks = [
"try-runtime-cli/try-runtime",
"neuroweb-runtime/runtime-benchmarks",
"polkadot-cli/runtime-benchmarks",
]
try-runtime = [
"try-runtime-cli/try-runtime",
"neuroweb-runtime/try-runtime",
]
4 changes: 0 additions & 4 deletions node/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,6 @@ pub enum Subcommand {
/// Try some testing command against a specified runtime state.
#[cfg(feature = "try-runtime")]
TryRuntime(try_runtime_cli::TryRuntimeCmd),

/// Errors since the binary was not build with `--features try-runtime`.
#[cfg(not(feature = "try-runtime"))]
TryRuntime,
}

#[derive(Debug, clap::Parser)]
Expand Down
7 changes: 1 addition & 6 deletions node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,12 +222,7 @@ pub fn run() -> Result<()> {
#[allow(unreachable_patterns)]
_ => Err("Benchmarking sub-command unsupported".into()),
}
}
Some(Subcommand::TryRuntime) => Err("The `try-runtime` subcommand has been migrated to a \
standalone CLI (https://github.com/paritytech/try-runtime-cli). It is no longer \
being maintained here and will be removed entirely some time after January 2024. \
Please remove this subcommand from your runtime and use the standalone CLI."
.into()),
},
None => {
let runner = cli.create_runner(&cli.run.normalize())?;
let collator_options = cli.run.collator_options();
Expand Down
23 changes: 12 additions & 11 deletions node/src/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

use std::sync::Arc;

use neuroweb_runtime::{opaque::Block, AccountId, Balance, Hash, Nonce};
use neuroweb_runtime::{opaque::Block, AccountId, Balance, Nonce};
use cumulus_primitives_parachain_inherent::ParachainInherentData;
use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder;
use polkadot_primitives::PersistedValidationData;
Expand All @@ -16,7 +16,7 @@ use sc_client_api::{
};
pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor};
use fc_rpc::{
pending::ConsensusDataProvider, EthBlockDataCacheTask, OverrideHandle, EthFilter, EthFilterApiServer, EthPubSub,
pending::ConsensusDataProvider, EthBlockDataCacheTask, EthFilter, EthFilterApiServer, EthPubSub,
EthPubSubApiServer, Web3, Web3ApiServer,
};
use sp_consensus_aura::{sr25519::AuthorityId as AuraId, AuraApi};
Expand All @@ -28,7 +28,8 @@ use sc_transaction_pool_api::TransactionPool;
use sp_api::{CallApiAt, ProvideRuntimeApi};
use sp_block_builder::BlockBuilder;
use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata};
use sc_network::NetworkService;
use sc_network::service::traits::NetworkService;
use fc_storage::StorageOverride;

/// A type representing all RPC extensions.
pub type RpcExtension = jsonrpsee::RpcModule<()>;
Expand All @@ -48,18 +49,18 @@ pub struct FullDeps<C, P, A: ChainApi> {
/// The Node authority flag
pub is_authority: bool,
/// Network service
pub network: Arc<NetworkService<Block, Hash>>,
/// Backend.
pub network: Arc<dyn NetworkService>,
/// Backend.
pub backend: Arc<dyn fc_api::Backend<Block>>,
/// EthFilterApi pool.
pub filter_pool: FilterPool,
/// Maximum fee history cache size.
pub fee_history_cache_limit: u64,
/// Fee history cache.
pub fee_history_cache: FeeHistoryCache,
/// Ethereum data access overrides.
pub overrides: Arc<OverrideHandle<Block>>,
/// Cache for Ethereum block data.
/// Ethereum data access storage_override.
pub storage_override: Arc<dyn StorageOverride<Block>>,
/// Cache for Ethereum block data.
pub block_data_cache: Arc<EthBlockDataCacheTask<Block>>,
}

Expand Down Expand Up @@ -104,7 +105,7 @@ where

let mut module = RpcExtension::new(());
let FullDeps { client, pool, graph, deny_unsafe, network, backend, is_authority, filter_pool,
sync, fee_history_cache, fee_history_cache_limit, overrides, block_data_cache
sync, fee_history_cache, fee_history_cache_limit, storage_override, block_data_cache
} = deps;

module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?;
Expand Down Expand Up @@ -152,7 +153,7 @@ where
no_tx_converter,
sync.clone(),
signers,
overrides.clone(),
storage_override.clone(),
backend.clone(),
is_authority,
block_data_cache.clone(),
Expand Down Expand Up @@ -199,7 +200,7 @@ where
client.clone(),
sync,
subscription_task_executor,
overrides,
storage_override,
pubsub_notification_sinks,
)
.into_rpc(),
Expand Down
65 changes: 34 additions & 31 deletions node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use std::{sync::Arc, time::Duration, collections::BTreeMap};
use cumulus_client_cli::CollatorOptions;
// Local Runtime Types
use neuroweb_runtime::{
opaque::Block, RuntimeApi,
opaque::Block, RuntimeApi, Hash
};

// Cumulus Imports
Expand All @@ -20,13 +20,15 @@ use cumulus_client_service::{
};
use cumulus_primitives_core::ParaId;
use cumulus_relay_chain_interface::RelayChainInterface;
use polkadot_service::CollatorPair;
// Changed when moonbeam polkadot sdk used for communication fix
use cumulus_primitives_core::relay_chain::CollatorPair;
// use polkadot_service::CollatorPair;

// Substrate Imports
use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE;
use sc_consensus::ImportQueue;
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
use sc_network::NetworkBlock;
use sc_network::{NetworkBackend, NetworkBlock};
use sc_network_sync::SyncingService;
use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
Expand All @@ -36,6 +38,7 @@ use substrate_prometheus_endpoint::Registry;
use futures::StreamExt;
use sc_client_api::BlockchainEvents;
use fc_rpc_core::types::{FeeHistoryCache, FilterPool};
use fc_storage::StorageOverrideHandler;

type ParachainClient = TFullClient<
Block,
Expand Down Expand Up @@ -66,14 +69,14 @@ pub struct AdditionalConfig {
pub fn open_frontier_backend<C>(
client: Arc<C>,
config: &sc_service::Configuration,
) -> Result<Arc<fc_db::kv::Backend<Block>>, String>
) -> Result<Arc<fc_db::kv::Backend<Block, C>>, String>
where
C: sp_blockchain::HeaderBackend<Block>,
{
let config_dir = config.base_path.config_dir(config.chain_spec.id());
let path = config_dir.join("frontier").join("db");

Ok(Arc::new(fc_db::kv::Backend::<Block>::new(
Ok(Arc::new(fc_db::kv::Backend::<Block, C>::new(
client,
&fc_db::kv::DatabaseSettings {
source: fc_db::DatabaseSource::RocksDb {
Expand Down Expand Up @@ -105,7 +108,7 @@ pub fn new_partial(
ParachainBlockImport,
Option<Telemetry>,
Option<TelemetryWorkerHandle>,
Arc<fc_db::kv::Backend<Block>>,
Arc<fc_db::kv::Backend<Block, ParachainClient>>,
),
>,
sc_service::Error,
Expand Down Expand Up @@ -187,7 +190,7 @@ pub fn new_partial(
///
/// This is the actual implementation that is abstract over the executor and the runtime api.
#[sc_tracing::logging::prefix_logs_with("Parachain")]
async fn start_node_impl(
async fn start_node_impl<N>(
parachain_config: Configuration,
polkadot_config: Configuration,
collator_options: CollatorOptions,
Expand All @@ -197,7 +200,9 @@ async fn start_node_impl(
) -> sc_service::error::Result<(
TaskManager,
Arc<ParachainClient>,
)> {
)>
where N: NetworkBackend<Block, Hash>,
{

let parachain_config = prepare_node_config(parachain_config);

Expand All @@ -224,7 +229,7 @@ async fn start_node_impl(
let is_authority = parachain_config.role.is_authority();
let transaction_pool = params.transaction_pool.clone();
let import_queue_service = params.import_queue.service();
let net_config = sc_network::config::FullNetworkConfiguration::new(&parachain_config.network);
let net_config = sc_network::config::FullNetworkConfiguration::<_, _, N>::new(&parachain_config.network);

let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
build_network(BuildNetworkParams {
Expand All @@ -242,7 +247,7 @@ async fn start_node_impl(

let filter_pool: FilterPool = Arc::new(std::sync::Mutex::new(BTreeMap::new()));
let fee_history_cache: FeeHistoryCache = Arc::new(std::sync::Mutex::new(BTreeMap::new()));
let overrides = fc_storage::overrides_handle(client.clone());
let storage_override = Arc::new(StorageOverrideHandler::new(client.clone()));

// Sinks for pubsub notifications.
// Everytime a new subscription is created, a new mpsc channel is added to the sink pool.
Expand All @@ -263,7 +268,7 @@ async fn start_node_impl(
Duration::new(6, 0),
client.clone(),
backend.clone(),
overrides.clone(),
storage_override.clone(),
frontier_backend.clone(),
3,
0,
Expand Down Expand Up @@ -293,16 +298,16 @@ async fn start_node_impl(
Some("frontier"),
fc_rpc::EthTask::fee_history_task(
client.clone(),
overrides.clone(),
fee_history_cache.clone(),
storage_override.clone(),
fee_history_cache.clone(),
FEE_HISTORY_LIMIT,
),
);

let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
task_manager.spawn_handle(),
overrides.clone(),
50,
storage_override.clone(),
50,
50,
prometheus_registry.clone(),
));
Expand All @@ -317,19 +322,19 @@ async fn start_node_impl(

Box::new(move |deny_unsafe, subscription_task_executor| {
let deps = crate::rpc::FullDeps {
client: client.clone(),
pool: transaction_pool.clone(),
graph: transaction_pool.pool().clone(),
sync: sync.clone(),
deny_unsafe,
is_authority,
network: network.clone(),
backend: frontier_backend.clone(),
filter_pool: filter_pool.clone(),
fee_history_cache_limit: FEE_HISTORY_LIMIT,
fee_history_cache: fee_history_cache.clone(),
overrides: overrides.clone(),
block_data_cache: block_data_cache.clone(),
client: client.clone(),
pool: transaction_pool.clone(),
graph: transaction_pool.pool().clone(),
sync: sync.clone(),
deny_unsafe,
is_authority,
network: network.clone(),
backend: frontier_backend.clone(),
filter_pool: filter_pool.clone(),
fee_history_cache_limit: FEE_HISTORY_LIMIT,
fee_history_cache: fee_history_cache.clone(),
storage_override: storage_override.clone(),
block_data_cache: block_data_cache.clone(),
};

let pending_consensus_data_provider = Box::new(
Expand Down Expand Up @@ -485,7 +490,6 @@ fn start_aura_consensus(
collator_key: CollatorPair,
additional_config: AdditionalConfig,
) -> Result<(), sc_service::Error> {
let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;

let mut proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
task_manager.spawn_handle(),
Expand Down Expand Up @@ -526,7 +530,6 @@ fn start_aura_consensus(
collator_key,
para_id,
overseer_handle,
slot_duration,
relay_chain_slot_duration: Duration::from_secs(6),
proposer: cumulus_client_consensus_proposer::Proposer::new(proposer_factory),
collator_service,
Expand Down Expand Up @@ -554,5 +557,5 @@ pub async fn start_parachain_node(
TaskManager,
Arc<ParachainClient>,
)> {
start_node_impl(parachain_config, polkadot_config, collator_options, para_id, hwbench, additional_config.clone()).await
start_node_impl::<sc_network::NetworkWorker<_, _>>(parachain_config, polkadot_config, collator_options, para_id, hwbench, additional_config.clone()).await
}
Loading