diff --git a/Cargo.lock b/Cargo.lock index a3df3f338e..5799f8ef03 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1944,6 +1944,8 @@ dependencies = [ name = "dc-db" version = "2.6.3" dependencies = [ + "dp-storage", + "dvm-ethereum", "kvdb", "kvdb-rocksdb", "parity-scale-codec", @@ -2278,9 +2280,11 @@ dependencies = [ "darwinia-evm", "frame-support", "frame-system", + "pallet-timestamp", "parity-scale-codec", "sp-core", "sp-inherents", + "sp-io", "sp-runtime", ] diff --git a/client/dvm/db/Cargo.toml b/client/dvm/db/Cargo.toml index 3ed03d9e93..2aed3679ba 100644 --- a/client/dvm/db/Cargo.toml +++ b/client/dvm/db/Cargo.toml @@ -19,3 +19,6 @@ parking_lot = { version = "0.11.1" } sp-core = { git = "https://github.com/darwinia-network/substrate", branch = "main" } sp-database = { git = "https://github.com/darwinia-network/substrate", branch = "main" } sp-runtime = { git = "https://github.com/darwinia-network/substrate", branch = "main" } +# darwinia-network +dp-storage = { path = "../../../primitives/storage/" } +dvm-ethereum = { path = "../../../frame/dvm"} diff --git a/client/dvm/db/src/lib.rs b/client/dvm/db/src/lib.rs index c4639defd3..5f9564964d 100644 --- a/client/dvm/db/src/lib.rs +++ b/client/dvm/db/src/lib.rs @@ -18,19 +18,23 @@ mod utils; -// --- paritytech --- pub use sp_database::Database; -// --- darwinia-network --- -use codec::{Decode, Encode}; -use parking_lot::Mutex; -use sp_core::H256; -use sp_runtime::traits::Block as BlockT; + // --- std --- use std::{ marker::PhantomData, path::{Path, PathBuf}, sync::Arc, }; +// --- crates.io --- +use codec::{Decode, Encode}; +use parking_lot::Mutex; +// --- paritytech --- +use sp_core::H256; +use sp_runtime::traits::Block as BlockT; +// --- darwinia-network --- +use dp_storage::PALLET_ETHEREUM_SCHEMA_CACHE; +use dvm_ethereum::EthereumStorageSchema; const DB_HASH_LEN: usize = 32; /// Hash type that this backend uses for the database. @@ -140,6 +144,37 @@ impl MetaDb { Ok(()) } + + pub fn ethereum_schema(&self) -> Result>, String> { + match self + .db + .get(crate::columns::META, &PALLET_ETHEREUM_SCHEMA_CACHE.encode()) + { + Some(raw) => Ok(Some( + Decode::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, + )), + None => Ok(None), + } + } + + pub fn write_ethereum_schema( + &self, + new_cache: Vec<(EthereumStorageSchema, H256)>, + ) -> Result<(), String> { + let mut transaction = sp_database::Transaction::new(); + + transaction.set( + crate::columns::META, + &PALLET_ETHEREUM_SCHEMA_CACHE.encode(), + &new_cache.encode(), + ); + + self.db + .commit(transaction) + .map_err(|e| format!("{:?}", e))?; + + Ok(()) + } } pub struct MappingCommitment { diff --git a/client/dvm/mapping-sync/src/lib.rs b/client/dvm/mapping-sync/src/lib.rs index 410120a087..b99668bd7e 100644 --- a/client/dvm/mapping-sync/src/lib.rs +++ b/client/dvm/mapping-sync/src/lib.rs @@ -18,7 +18,7 @@ mod worker; -pub use worker::MappingSyncWorker; +pub use worker::{MappingSyncWorker, SyncStrategy}; // --- darwinia-network --- use dp_consensus::FindLogError; @@ -99,6 +99,7 @@ pub fn sync_one_block( client: &C, substrate_backend: &B, frontier_backend: &dc_db::Backend, + strategy: SyncStrategy, ) -> Result where C: ProvideRuntimeApi + Send + Sync + HeaderBackend + BlockOf, @@ -152,6 +153,11 @@ where .write_current_syncing_tips(current_syncing_tips)?; Ok(true) } else { + if SyncStrategy::Parachain == strategy + && operating_header.number() > &client.info().best_number + { + return Ok(false); + } sync_block(frontier_backend, &operating_header)?; current_syncing_tips.push(*operating_header.parent_hash()); @@ -167,6 +173,7 @@ pub fn sync_blocks( substrate_backend: &B, frontier_backend: &dc_db::Backend, limit: usize, + strategy: SyncStrategy, ) -> Result where C: ProvideRuntimeApi + Send + Sync + HeaderBackend + BlockOf, @@ -176,7 +183,8 @@ where let mut synced_any = false; for _ in 0..limit { - synced_any = synced_any || sync_one_block(client, substrate_backend, frontier_backend)?; + synced_any = + synced_any || sync_one_block(client, substrate_backend, frontier_backend, strategy)?; } Ok(synced_any) diff --git a/client/dvm/mapping-sync/src/worker.rs b/client/dvm/mapping-sync/src/worker.rs index 6dbb8b19e1..ce263a2636 100644 --- a/client/dvm/mapping-sync/src/worker.rs +++ b/client/dvm/mapping-sync/src/worker.rs @@ -34,6 +34,12 @@ use log::debug; const LIMIT: usize = 8; +#[derive(PartialEq, Copy, Clone)] +pub enum SyncStrategy { + Normal, + Parachain, +} + pub struct MappingSyncWorker { import_notifications: ImportNotifications, timeout: Duration, @@ -44,6 +50,7 @@ pub struct MappingSyncWorker { frontier_backend: Arc>, have_next: bool, + strategy: SyncStrategy, } impl MappingSyncWorker { @@ -53,6 +60,7 @@ impl MappingSyncWorker { client: Arc, substrate_backend: Arc, frontier_backend: Arc>, + strategy: SyncStrategy, ) -> Self { Self { import_notifications, @@ -64,6 +72,7 @@ impl MappingSyncWorker { frontier_backend, have_next: true, + strategy, } } } @@ -111,6 +120,7 @@ where self.substrate_backend.blockchain(), self.frontier_backend.as_ref(), LIMIT, + self.strategy, ) { Ok(have_next) => { self.have_next = have_next; diff --git a/client/dvm/rpc/src/eth.rs b/client/dvm/rpc/src/eth.rs index c91562bfd2..ed8d9eefc5 100644 --- a/client/dvm/rpc/src/eth.rs +++ b/client/dvm/rpc/src/eth.rs @@ -25,6 +25,8 @@ use dp_rpc::{ PendingTransactions, Receipt, Rich, RichBlock, SyncInfo, SyncStatus, Transaction, TransactionRequest, Work, }; +use dp_storage::PALLET_ETHEREUM_SCHEMA; +use dvm_ethereum::EthereumStorageSchema; use dvm_rpc_core::{ EthApi as EthApiT, EthFilterApi as EthFilterApiT, NetApi as NetApiT, Web3Api as Web3ApiT, }; @@ -41,9 +43,10 @@ use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::BlakeTwo256; use sp_runtime::traits::{Block as BlockT, NumberFor, One, Saturating, UniqueSaturatedInto, Zero}; use sp_runtime::transaction_validity::TransactionSource; +use sp_storage::{StorageData, StorageKey}; use sp_transaction_pool::{InPoolTransaction, TransactionPool}; // --- std --- -use codec::{self, Encode}; +use codec::{self, Decode, Encode}; use ethereum::{Block as EthereumBlock, Transaction as EthereumTransaction}; use ethereum_types::{H160, H256, H512, H64, U256, U64}; use futures::{future::TryFutureExt, StreamExt}; @@ -51,6 +54,7 @@ use jsonrpc_core::{ futures::future::{self, Future}, BoxFuture, ErrorCode, Result, }; +use log::warn; use sha3::{Digest, Keccak256}; use std::collections::{BTreeMap, HashMap}; use std::{ @@ -235,6 +239,7 @@ fn transaction_build( } fn filter_range_logs( client: &C, + backend: &dc_db::Backend, overrides: &OverrideHandle, ret: &mut Vec, max_past_logs: u32, @@ -266,10 +271,48 @@ where }; let bloom_filter = FilteredParams::bloom_filter(&filter.address, &topics_input); + // Get schema cache. A single AuxStore read before the block range iteration. + // This prevents having to do an extra DB read per block range iteration to get the actual schema. + let mut local_cache: BTreeMap, EthereumStorageSchema> = BTreeMap::new(); + if let Ok(Some(schema_cache)) = frontier_backend_client::load_cached_schema::(backend) { + for (schema, hash) in schema_cache { + if let Ok(Some(header)) = client.header(BlockId::Hash(hash)) { + let number = *header.number(); + local_cache.insert(number, schema); + } + } + } + let cache_keys: Vec> = local_cache.keys().cloned().collect(); + let mut default_schema: Option<&EthereumStorageSchema> = None; + if cache_keys.len() == 1 { + // There is only one schema and that's the one we use. + default_schema = local_cache.get(&cache_keys[0]); + } + while current_number >= from { let id = BlockId::Number(current_number); - - let schema = frontier_backend_client::onchain_storage_schema::(client, id); + let schema = match default_schema { + // If there is a single schema, we just assign. + Some(default_schema) => *default_schema, + _ => { + // If there are multiple schemas, we iterate over the - hopefully short - list + // of keys and assign the one belonging to the current_number. + // Because there are more than 1 schema, and current_number cannot be < 0, + // (i - 1) will always be >= 0. + let mut default_schema: Option<&EthereumStorageSchema> = None; + for (i, k) in cache_keys.iter().enumerate() { + if ¤t_number < k { + default_schema = local_cache.get(&cache_keys[i - 1]); + } + } + match default_schema { + Some(schema) => *schema, + // Fallback to DB read. This will happen i.e. when there is no cache + // task configured at service level. + _ => frontier_backend_client::onchain_storage_schema::(client, id), + } + } + }; let handler = overrides .schemas .get(&schema) @@ -908,7 +951,26 @@ where } fn estimate_gas(&self, request: CallRequest, _: Option) -> Result { - let calculate_gas_used = |request| -> Result { + let gas_limit = { + // query current block's gas limit + let id = BlockId::Hash(self.client.info().best_hash); + let schema = + frontier_backend_client::onchain_storage_schema::(&self.client, id); + let handler = self + .overrides + .schemas + .get(&schema) + .unwrap_or(&self.overrides.fallback); + + let block = handler.current_block(&id); + if let Some(block) = block { + block.header.gas_limit + } else { + return Err(internal_err("block unavailable, cannot query gas limit")); + } + }; + + let calculate_gas_used = |request, gas_limit| -> Result { let hash = self.client.info().best_hash; let CallRequest { @@ -920,25 +982,8 @@ where data, nonce, } = request; - - // use given gas limit or query current block's limit - let gas_limit = match gas { - Some(amount) => amount, - None => { - let block = self - .client - .runtime_api() - .current_block(&BlockId::Hash(hash)) - .map_err(|err| internal_err(format!("runtime error: {:?}", err)))?; - if let Some(block) = block { - block.header.gas_limit - } else { - return Err(internal_err(format!( - "block unavailable, cannot query gas limit" - ))); - } - } - }; + // Use request gas limit only if it less than gas_limit parameter + let gas_limit = core::cmp::min(gas.unwrap_or(gas_limit), gas_limit); let data = data.map(|d| d.0).unwrap_or_default(); let used_gas = match to { @@ -991,8 +1036,7 @@ where if cfg!(feature = "rpc_binary_search_estimate") { let mut lower = U256::from(21_000); - // TODO: get a good upper limit, but below U64::max to operation overflow - let mut upper = U256::from(1_000_000_000); + let mut upper = U256::from(gas_limit); let mut mid = upper; let mut best = mid; let mut old_best: U256; @@ -1007,7 +1051,7 @@ where while change_pct > threshold_pct { let mut test_request = request.clone(); test_request.gas = Some(mid); - match calculate_gas_used(test_request) { + match calculate_gas_used(test_request, gas_limit) { // if Ok -- try to reduce the gas used Ok(used_gas) => { old_best = best; @@ -1034,7 +1078,7 @@ where } Ok(best) } else { - calculate_gas_used(request) + calculate_gas_used(request, gas_limit) } } @@ -1308,6 +1352,7 @@ where .unwrap_or(self.client.info().best_number); let _ = filter_range_logs( self.client.as_ref(), + self.backend.as_ref(), &self.overrides, &mut ret, self.max_past_logs, @@ -1442,6 +1487,7 @@ where pub struct EthFilterApi { client: Arc, + backend: Arc>, filter_pool: FilterPool, max_stored_filters: usize, overrides: Arc>, @@ -1460,6 +1506,7 @@ where { pub fn new( client: Arc, + backend: Arc>, filter_pool: FilterPool, max_stored_filters: usize, overrides: Arc>, @@ -1467,6 +1514,7 @@ where ) -> Self { Self { client: client.clone(), + backend: backend.clone(), filter_pool, max_stored_filters, overrides, @@ -1618,6 +1666,7 @@ where let mut ret: Vec = Vec::new(); let _ = filter_range_logs( self.client.as_ref(), + self.backend.as_ref(), &self.overrides, &mut ret, self.max_past_logs, @@ -1683,6 +1732,7 @@ where let mut ret: Vec = Vec::new(); let _ = filter_range_logs( self.client.as_ref(), + self.backend.as_ref(), &self.overrides, &mut ret, self.max_past_logs, @@ -1727,9 +1777,84 @@ pub struct EthTask(PhantomData<(B, C)>); impl EthTask where - C: ProvideRuntimeApi + BlockchainEvents, - B: BlockT, + C: ProvideRuntimeApi + BlockchainEvents + HeaderBackend, + B: BlockT, { + /// Task that caches at which best hash a new EthereumStorageSchema was inserted in the Runtime Storage. + pub async fn ethereum_schema_cache_task(client: Arc, backend: Arc>) { + if let Ok(None) = frontier_backend_client::load_cached_schema::(backend.as_ref()) { + let mut cache: Vec<(EthereumStorageSchema, H256)> = Vec::new(); + if let Ok(Some(header)) = client.header(BlockId::Number(Zero::zero())) { + cache.push((EthereumStorageSchema::V1, header.hash())); + let _ = frontier_backend_client::write_cached_schema::(backend.as_ref(), cache) + .map_err(|err| { + warn!("Error schema cache insert for genesis: {:?}", err); + }); + } else { + warn!("Error genesis header unreachable"); + } + } + + // Subscribe to changes for the dvm-ethereum Schema. + if let Ok(mut stream) = client.storage_changes_notification_stream( + Some(&[StorageKey(PALLET_ETHEREUM_SCHEMA.to_vec())]), + None, + ) { + while let Some((hash, changes)) = stream.next().await { + // Make sure only block hashes marked as best are referencing cache checkpoints. + if hash == client.info().best_hash { + // Just map the change set to the actual data. + let storage: Vec> = changes + .iter() + .filter_map(|(o_sk, _k, v)| { + if o_sk.is_none() { + Some(v.cloned()) + } else { + None + } + }) + .collect(); + for change in storage { + if let Some(data) = change { + // Decode the wrapped blob which's type is known. + let new_schema: EthereumStorageSchema = + Decode::decode(&mut &data.0[..]).unwrap(); + // Cache new entry and overwrite the AuxStore value. + if let Ok(Some(old_cache)) = + frontier_backend_client::load_cached_schema::(backend.as_ref()) + { + let mut new_cache: Vec<(EthereumStorageSchema, H256)> = old_cache; + match &new_cache[..] { + [.., (schema, _)] if *schema == new_schema => { + warn!( + "Schema version already in AuxStore, ignoring: {:?}", + new_schema + ); + } + _ => { + new_cache.push((new_schema, hash)); + let _ = frontier_backend_client::write_cached_schema::( + backend.as_ref(), + new_cache, + ) + .map_err(|err| { + warn!( + "Error schema cache insert for genesis: {:?}", + err + ); + }); + } + } + } else { + warn!("Error schema cache is corrupted"); + } + } + } + } + } + } + } + pub async fn pending_transaction_task( client: Arc, pending_transactions: Arc>>, @@ -1738,28 +1863,30 @@ where let mut notification_st = client.import_notification_stream(); while let Some(notification) = notification_st.next().await { - if let Ok(mut pending_transactions) = pending_transactions.lock() { - // As pending transactions have a finite lifespan anyway - // we can ignore MultiplePostRuntimeLogs error checks. - let log = dp_consensus::find_log(¬ification.header.digest()).ok(); - let post_hashes = log.map(|log| log.into_hashes()); - - if let Some(post_hashes) = post_hashes { - // Retain all pending transactions that were not - // processed in the current block. - pending_transactions - .retain(|&k, _| !post_hashes.transaction_hashes.contains(&k)); - } + if notification.is_new_best { + if let Ok(mut pending_transactions) = pending_transactions.lock() { + // As pending transactions have a finite lifespan anyway + // we can ignore MultiplePostRuntimeLogs error checks. + let log = dp_consensus::find_log(¬ification.header.digest()).ok(); + let post_hashes = log.map(|log| log.into_hashes()); + + if let Some(post_hashes) = post_hashes { + // Retain all pending transactions that were not + // processed in the current block. + pending_transactions + .retain(|&k, _| !post_hashes.transaction_hashes.contains(&k)); + } - let imported_number: u64 = UniqueSaturatedInto::::unique_saturated_into( - *notification.header.number(), - ); + let imported_number: u64 = UniqueSaturatedInto::::unique_saturated_into( + *notification.header.number(), + ); - pending_transactions.retain(|_, v| { - // Drop all the transactions that exceeded the given lifespan. - let lifespan_limit = v.at_block + retain_threshold; - lifespan_limit > imported_number - }); + pending_transactions.retain(|_, v| { + // Drop all the transactions that exceeded the given lifespan. + let lifespan_limit = v.at_block + retain_threshold; + lifespan_limit > imported_number + }); + } } } } diff --git a/client/dvm/rpc/src/lib.rs b/client/dvm/rpc/src/lib.rs index d2009807bc..7b10daf1a8 100644 --- a/client/dvm/rpc/src/lib.rs +++ b/client/dvm/rpc/src/lib.rs @@ -88,6 +88,35 @@ pub mod frontier_backend_client { Ok(None) } + pub fn load_cached_schema( + backend: &dc_db::Backend, + ) -> RpcResult>> + where + B: BlockT, + B: BlockT + Send + Sync + 'static, + { + let cache = backend + .meta() + .ethereum_schema() + .map_err(|err| internal_err(format!("fetch backend failed: {:?}", err)))?; + Ok(cache) + } + + pub fn write_cached_schema( + backend: &dc_db::Backend, + new_cache: Vec<(EthereumStorageSchema, H256)>, + ) -> RpcResult<()> + where + B: BlockT, + B: BlockT + Send + Sync + 'static, + { + backend + .meta() + .write_ethereum_schema(new_cache) + .map_err(|err| internal_err(format!("write backend failed: {:?}", err)))?; + Ok(()) + } + pub fn onchain_storage_schema( client: &C, at: BlockId, diff --git a/frame/dvm-dynamic-fee/Cargo.toml b/frame/dvm-dynamic-fee/Cargo.toml index b33ceffdf6..f0bd673c1c 100644 --- a/frame/dvm-dynamic-fee/Cargo.toml +++ b/frame/dvm-dynamic-fee/Cargo.toml @@ -23,6 +23,10 @@ sp-core = { default-features = false, git = "https://github.com/darwinia-n sp-inherents = { default-features = false, git = "https://github.com/darwinia-network/substrate", branch = "main" } sp-runtime = { default-features = false, git = "https://github.com/darwinia-network/substrate", branch = "main" } +[dev-dependencies] +pallet-timestamp = { default-features = false, git = "https://github.com/darwinia-network/substrate", branch = "main" } +sp-io = { default-features = false, git = "https://github.com/darwinia-network/substrate", branch = "main" } + [features] default = ["std"] diff --git a/frame/dvm-dynamic-fee/src/lib.rs b/frame/dvm-dynamic-fee/src/lib.rs index 312bd9aaa9..1a8c87522e 100644 --- a/frame/dvm-dynamic-fee/src/lib.rs +++ b/frame/dvm-dynamic-fee/src/lib.rs @@ -68,7 +68,7 @@ pub mod pallet { } fn on_finalize(_: BlockNumberFor) { - if let Some(target) = >::get() { + if let Some(target) = >::take() { let bound = >::get() / T::MinGasPriceBoundDivisor::get() + U256::one(); let upper_limit = >::get().saturating_add(bound); @@ -81,12 +81,17 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Set the target gas price. - #[pallet::weight(T::DbWeight::get().writes(1))] - fn note_min_gas_price_target( + #[pallet::weight((T::DbWeight::get().writes(1), DispatchClass::Mandatory))] + pub fn note_min_gas_price_target( origin: OriginFor, target: U256, ) -> DispatchResultWithPostInfo { ensure_none(origin)?; + // When a block author create a fake block with multiple noting, then other validators will reject that block because of failed import block verification. + assert!( + >::get().is_none(), + "TargetMinGasPrice must be updated only once in the block" + ); >::set(Some(target)); @@ -160,3 +165,116 @@ pub mod pallet { } } pub use pallet::*; + +#[cfg(test)] +mod tests { + use super::*; + use crate as dvm_dynamic_fee; + + use frame_support::{ + assert_ok, parameter_types, + traits::{OnFinalize, OnInitialize}, + }; + use sp_core::{H256, U256}; + use sp_io::TestExternalities; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + }; + + pub fn new_test_ext() -> TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + TestExternalities::new(t) + } + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); + } + impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + } + + frame_support::parameter_types! { + pub const MinimumPeriod: u64 = 1000; + } + impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); + } + + frame_support::parameter_types! { + pub BoundDivision: U256 = 1024.into(); + } + impl Config for Test { + type MinGasPriceBoundDivisor = BoundDivision; + } + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage}, + DynamicFee: dvm_dynamic_fee::{Pallet, Call, Storage, Inherent}, + } + ); + + fn run_to_block(n: u64) { + while System::block_number() < n { + DynamicFee::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + DynamicFee::on_initialize(System::block_number()); + } + } + + #[test] + #[should_panic(expected = "TargetMinGasPrice must be updated only once in the block")] + fn double_set_in_a_block_failed() { + new_test_ext().execute_with(|| { + run_to_block(3); + assert_ok!(DynamicFee::note_min_gas_price_target( + Origin::none(), + U256::zero() + )); + let _ = DynamicFee::note_min_gas_price_target(Origin::none(), U256::zero()); + run_to_block(4); + assert_ok!(DynamicFee::note_min_gas_price_target( + Origin::none(), + U256::zero() + )); + }); + } +} diff --git a/frame/dvm/src/lib.rs b/frame/dvm/src/lib.rs index 129f79abf5..280b8b89d7 100644 --- a/frame/dvm/src/lib.rs +++ b/frame/dvm/src/lib.rs @@ -518,13 +518,13 @@ impl Pallet { } let ommers = Vec::::new(); + let receipts_root = + ethereum::util::ordered_trie_root(receipts.iter().map(|r| rlp::encode(r))); let partial_header = ethereum::PartialHeader { parent_hash: Self::current_block_hash().unwrap_or_default(), beneficiary: darwinia_evm::Pallet::::find_author(), state_root: T::StateRoot::get(), - receipts_root: H256::from_slice( - Keccak256::digest(&rlp::encode_list(&receipts)[..]).as_slice(), - ), // TODO: check receipts hash. + receipts_root, logs_bloom, difficulty: U256::zero(), number: block_number, @@ -639,7 +639,7 @@ impl BlockHashMapping for EthereumBlockHashMapping { } /// The schema version for Pallet Ethereum's storage -#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Clone, Copy, Debug, Encode, Decode, PartialEq, Eq, PartialOrd, Ord)] pub enum EthereumStorageSchema { Undefined, V1, diff --git a/node/rpc/src/pangolin.rs b/node/rpc/src/pangolin.rs index 40b9845c60..2564a9d909 100644 --- a/node/rpc/src/pangolin.rs +++ b/node/rpc/src/pangolin.rs @@ -198,13 +198,14 @@ where network.clone(), overrides.clone(), pending_transactions.clone(), - backend, + backend.clone(), is_authority, max_past_logs, ))); if let Some(filter_pool) = filter_pool { io.extend_with(EthFilterApiServer::to_delegate(EthFilterApi::new( client.clone(), + backend, filter_pool.clone(), 500 as usize, // max stored filters overrides.clone(), diff --git a/node/runtime/pangoro/src/pallets/transaction_payment.rs b/node/runtime/pangoro/src/pallets/transaction_payment.rs index 9f99f35b6e..33884df5a4 100644 --- a/node/runtime/pangoro/src/pallets/transaction_payment.rs +++ b/node/runtime/pangoro/src/pallets/transaction_payment.rs @@ -1,4 +1,4 @@ - // --- paritytech --- +// --- paritytech --- use pallet_transaction_payment::{Config, CurrencyAdapter}; // --- darwinia-network --- use crate::*; diff --git a/node/service/src/service/pangolin.rs b/node/service/src/service/pangolin.rs index 846e796dcc..f969713d6f 100644 --- a/node/service/src/service/pangolin.rs +++ b/node/service/src/service/pangolin.rs @@ -66,7 +66,7 @@ use crate::{ }; use common_primitives::{AccountId, Balance, Hash, Nonce, OpaqueBlock as Block, Power}; use dc_db::{Backend, DatabaseSettings, DatabaseSettingsSrc}; -use dc_mapping_sync::MappingSyncWorker; +use dc_mapping_sync::{MappingSyncWorker, SyncStrategy}; use dc_rpc::EthTask; use dp_rpc::{FilterPool, PendingTransactions}; use drml_rpc::{ @@ -570,6 +570,10 @@ where ), ); } + task_manager.spawn_essential_handle().spawn( + "frontier-schema-cache-task", + EthTask::ethereum_schema_cache_task(Arc::clone(&client), Arc::clone(&dvm_backend)), + ); if is_archive { task_manager.spawn_essential_handle().spawn( @@ -580,6 +584,7 @@ where client.clone(), backend.clone(), dvm_backend.clone(), + SyncStrategy::Normal, ) .for_each(|()| futures::future::ready(())), ); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index d2382d0419..f77bca57d0 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -19,3 +19,5 @@ /// Current version of pallet Ethereum's storage schema is stored under this key. pub const PALLET_ETHEREUM_SCHEMA: &'static [u8] = b":ethereum_schema"; +/// Cached version of pallet Ethereum's storage schema is stored under this key in the AuxStore. +pub const PALLET_ETHEREUM_SCHEMA_CACHE: &'static [u8] = b":ethereum_schema_cache"; diff --git a/tests/dvm/tests/0_test-rpc-block.js b/tests/dvm/tests/0_test-rpc-block.js index 0a9031d473..77f8ddbcc9 100644 --- a/tests/dvm/tests/0_test-rpc-block.js +++ b/tests/dvm/tests/0_test-rpc-block.js @@ -20,7 +20,7 @@ describe("Test Block RPC", function () { "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", miner: "0x0000000000000000000000000000000000000000", number: 0, - receiptsRoot: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + receiptsRoot: "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", size: 505, timestamp: 0, totalDifficulty: "0",