From 6001b59f9f64a133d55fc13a495acc76eb4b532f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Paulo=20Silva=20de=20Souza?= <77391175+joao-paulo-parity@users.noreply.github.com> Date: Mon, 20 Jun 2022 09:18:06 -0300 Subject: [PATCH 001/135] check-dependent-cumulus should only be executed for PRs (#11693) the script executed by check-dependent-cumulus only works for PRs, as shown in https://gitlab.parity.io/parity/mirrors/substrate/-/jobs/1630771#L87, which comes from https://github.com/paritytech/pipeline-scripts/blob/3ad10ddc0d985ef5326974a1143229c6429befab/check_dependent_project.sh#L443 --- scripts/ci/gitlab/pipeline/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index da6391fb7c278..25ecad5bc5264 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -42,6 +42,8 @@ check-dependent-cumulus: COMPANION_OVERRIDES: | substrate: polkadot-v* polkadot: release-v* + rules: + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ #PRs build-linux-substrate: stage: build From b66c051f118898ef78e68e0ccf78b1cd8f00090b Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 21 Jun 2022 00:52:43 +0900 Subject: [PATCH 002/135] Pump the gossip engine while waiting for the BEEFY runtime pallet (memory leak fix) (#11694) * Pump the gossip engine while waiting for the BEEFY runtime pallet This fixes a memory leak when the BEEFY gadget is turned on, but the runtime doesn't actually use BEEFY. * Implement `FusedFuture` for `GossipEngine` * Fuse futures outside of loops --- client/beefy/src/worker.rs | 77 +++++++++++++++-------------- client/network-gossip/src/bridge.rs | 15 +++++- 2 files changed, 55 insertions(+), 37 deletions(-) diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index ae466a71abb57..735dea0170a62 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -25,11 +25,10 @@ use std::{ }; use codec::{Codec, Decode, Encode}; -use futures::{future, FutureExt, StreamExt}; +use futures::StreamExt; use log::{debug, error, info, log_enabled, trace, warn}; -use parking_lot::Mutex; -use sc_client_api::{Backend, FinalityNotification, FinalityNotifications}; +use sc_client_api::{Backend, FinalityNotification}; use sc_network_gossip::GossipEngine; use sp_api::{BlockId, ProvideRuntimeApi}; @@ -80,7 +79,7 @@ pub(crate) struct BeefyWorker { runtime: Arc, key_store: BeefyKeystore, signed_commitment_sender: BeefySignedCommitmentSender, - gossip_engine: Arc>>, + gossip_engine: GossipEngine, gossip_validator: Arc>, /// Min delta in block numbers between two blocks, BEEFY should vote on min_block_delta: u32, @@ -88,7 +87,6 @@ pub(crate) struct BeefyWorker { rounds: Option>, /// Buffer holding votes for blocks that the client hasn't seen finality for. pending_votes: BTreeMap, Vec, AuthorityId, Signature>>>, - finality_notifications: FinalityNotifications, /// Best block we received a GRANDPA notification for best_grandpa_block_header: ::Header, /// Best block a BEEFY voting round has been concluded for @@ -143,14 +141,13 @@ where runtime, key_store, signed_commitment_sender, - gossip_engine: Arc::new(Mutex::new(gossip_engine)), + gossip_engine, gossip_validator, // always target at least one block better than current best beefy min_block_delta: min_block_delta.max(1), metrics, rounds: None, pending_votes: BTreeMap::new(), - finality_notifications: client.finality_notification_stream(), best_grandpa_block_header: last_finalized_header, best_beefy_block: None, last_signed_id: 0, @@ -471,15 +468,21 @@ where true, ); - self.gossip_engine.lock().gossip_message(topic::(), encoded_message, false); + self.gossip_engine.gossip_message(topic::(), encoded_message, false); } /// Wait for BEEFY runtime pallet to be available. async fn wait_for_runtime_pallet(&mut self) { - self.client - .finality_notification_stream() - .take_while(|notif| { - let at = BlockId::hash(notif.header.hash()); + let mut gossip_engine = &mut self.gossip_engine; + let mut finality_stream = self.client.finality_notification_stream().fuse(); + loop { + futures::select! { + notif = finality_stream.next() => { + let notif = match notif { + Some(notif) => notif, + None => break + }; + let at = BlockId::hash(notif.header.hash()); if let Some(active) = self.runtime.runtime_api().validator_set(&at).ok().flatten() { if active.id() == GENESIS_AUTHORITY_SET_ID { // When starting from genesis, there is no session boundary digest. @@ -490,18 +493,18 @@ where // worker won't vote until it witnesses a session change. // Once we'll implement 'initial sync' (catch-up), the worker will be able to // start voting right away. - self.handle_finality_notification(notif); - future::ready(false) + self.handle_finality_notification(¬if); + break } else { trace!(target: "beefy", "🥩 Finality notification: {:?}", notif); debug!(target: "beefy", "🥩 Waiting for BEEFY pallet to become available..."); - future::ready(true) } - }) - .for_each(|_| future::ready(())) - .await; - // get a new stream that provides _new_ notifications (from here on out) - self.finality_notifications = self.client.finality_notification_stream(); + }, + _ = gossip_engine => { + break + } + } + } } /// Main loop for BEEFY worker. @@ -512,16 +515,20 @@ where info!(target: "beefy", "🥩 run BEEFY worker, best grandpa: #{:?}.", self.best_grandpa_block_header.number()); self.wait_for_runtime_pallet().await; - let mut votes = Box::pin(self.gossip_engine.lock().messages_for(topic::()).filter_map( - |notification| async move { - trace!(target: "beefy", "🥩 Got vote message: {:?}", notification); - - VoteMessage::, AuthorityId, Signature>::decode( - &mut ¬ification.message[..], - ) - .ok() - }, - )); + let mut finality_notifications = self.client.finality_notification_stream().fuse(); + let mut votes = Box::pin( + self.gossip_engine + .messages_for(topic::()) + .filter_map(|notification| async move { + trace!(target: "beefy", "🥩 Got vote message: {:?}", notification); + + VoteMessage::, AuthorityId, Signature>::decode( + &mut ¬ification.message[..], + ) + .ok() + }) + .fuse(), + ); loop { while self.sync_oracle.is_major_syncing() { @@ -529,18 +536,16 @@ where futures_timer::Delay::new(Duration::from_secs(5)).await; } - let engine = self.gossip_engine.clone(); - let gossip_engine = future::poll_fn(|cx| engine.lock().poll_unpin(cx)); - + let mut gossip_engine = &mut self.gossip_engine; futures::select! { - notification = self.finality_notifications.next().fuse() => { + notification = finality_notifications.next() => { if let Some(notification) = notification { self.handle_finality_notification(¬ification); } else { return; } }, - vote = votes.next().fuse() => { + vote = votes.next() => { if let Some(vote) = vote { let block_num = vote.commitment.block_number; if block_num > *self.best_grandpa_block_header.number() { @@ -563,7 +568,7 @@ where return; } }, - _ = gossip_engine.fuse() => { + _ = gossip_engine => { error!(target: "beefy", "🥩 Gossip engine has terminated."); return; } diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 2e09e7cc614a4..2d086e89b4a10 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -53,6 +53,8 @@ pub struct GossipEngine { message_sinks: HashMap>>, /// Buffered messages (see [`ForwardingState`]). forwarding_state: ForwardingState, + + is_terminated: bool, } /// A gossip engine receives messages from the network via the `network_event_stream` and forwards @@ -94,6 +96,8 @@ impl GossipEngine { network_event_stream, message_sinks: HashMap::new(), forwarding_state: ForwardingState::Idle, + + is_terminated: false, } } @@ -214,7 +218,10 @@ impl Future for GossipEngine { Event::Dht(_) => {}, }, // The network event stream closed. Do the same for [`GossipValidator`]. - Poll::Ready(None) => return Poll::Ready(()), + Poll::Ready(None) => { + self.is_terminated = true; + return Poll::Ready(()) + }, Poll::Pending => break, } }, @@ -288,6 +295,12 @@ impl Future for GossipEngine { } } +impl futures::future::FusedFuture for GossipEngine { + fn is_terminated(&self) -> bool { + self.is_terminated + } +} + #[cfg(test)] mod tests { use super::*; From 0fbf917676824cda9c3cf7e71e83b20c26b5f92e Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Tue, 21 Jun 2022 08:49:43 +0200 Subject: [PATCH 003/135] Implement Serialize/Deserialize on WeakBoundedVec (#11713) * Implement Serialize/Deserialize on WeakBoundedVec * cargo fmt * Warn when there are too many elements while deserializing WeakBoundedVec --- .../runtime/src/bounded/weak_bounded_vec.rs | 70 ++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/primitives/runtime/src/bounded/weak_bounded_vec.rs b/primitives/runtime/src/bounded/weak_bounded_vec.rs index 08460f7096357..ed9f4bba62b55 100644 --- a/primitives/runtime/src/bounded/weak_bounded_vec.rs +++ b/primitives/runtime/src/bounded/weak_bounded_vec.rs @@ -25,6 +25,11 @@ use core::{ ops::{Deref, Index, IndexMut}, slice::SliceIndex, }; +#[cfg(feature = "std")] +use serde::{ + de::{Error, SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, +}; use sp_std::{marker::PhantomData, prelude::*}; /// A weakly bounded vector. @@ -34,9 +39,72 @@ use sp_std::{marker::PhantomData, prelude::*}; /// /// The length of the vec is not strictly bounded. Decoding a vec with more element that the bound /// is accepted, and some method allow to bypass the restriction with warnings. +#[cfg_attr(feature = "std", derive(Serialize), serde(transparent))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] -pub struct WeakBoundedVec(pub(super) Vec, PhantomData); +pub struct WeakBoundedVec( + pub(super) Vec, + #[cfg_attr(feature = "std", serde(skip_serializing))] PhantomData, +); + +#[cfg(feature = "std")] +impl<'de, T, S: Get> Deserialize<'de> for WeakBoundedVec +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct VecVisitor>(PhantomData<(T, S)>); + + impl<'de, T, S: Get> Visitor<'de> for VecVisitor + where + T: Deserialize<'de>, + { + type Value = Vec; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let size = seq.size_hint().unwrap_or(0); + let max = match usize::try_from(S::get()) { + Ok(n) => n, + Err(_) => return Err(A::Error::custom("can't convert to usize")), + }; + if size > max { + log::warn!( + target: "runtime", + "length of a bounded vector while deserializing is not respected.", + ); + } + let mut values = Vec::with_capacity(size); + + while let Some(value) = seq.next_element()? { + values.push(value); + if values.len() > max { + log::warn!( + target: "runtime", + "length of a bounded vector while deserializing is not respected.", + ); + } + } + + Ok(values) + } + } + + let visitor: VecVisitor = VecVisitor(PhantomData); + deserializer.deserialize_seq(visitor).map(|v| { + WeakBoundedVec::::try_from(v).map_err(|_| Error::custom("out of bounds")) + })? + } +} impl> Decode for WeakBoundedVec { fn decode(input: &mut I) -> Result { From aacc3e99c0750c3e6c851a8270dc2c4eebca3290 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 21 Jun 2022 11:50:51 +0200 Subject: [PATCH 004/135] More robust grandpa revert procedure (#11719) * More robust revert procedure Return an error if revert is called in a node that is not actively running grandpa, i.e. grandpa genesis data has not been initialized. Previous implementation was just firing an `unreachable!` code exception. Furthermore we skip revert hassle if there is nothing to revert. * Nit --- client/consensus/babe/src/lib.rs | 4 ++++ client/finality-grandpa/src/lib.rs | 12 ++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 84a3c618b3803..8478122375319 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -1873,7 +1873,11 @@ where { let best_number = client.info().best_number; let finalized = client.info().finalized_number; + let revertible = blocks.min(best_number - finalized); + if revertible == Zero::zero() { + return Ok(()) + } let revert_up_to_number = best_number - revertible; let revert_up_to_hash = client.hash(revert_up_to_number)?.ok_or(ClientError::Backend( diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 17e04affa0f98..cb32957c0b0bf 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -1170,11 +1170,15 @@ fn local_authority_id( pub fn revert(client: Arc, blocks: NumberFor) -> ClientResult<()> where Block: BlockT, - Client: AuxStore + HeaderMetadata + HeaderBackend, + Client: AuxStore + HeaderMetadata + HeaderBackend, { let best_number = client.info().best_number; let finalized = client.info().finalized_number; + let revertible = blocks.min(best_number - finalized); + if revertible == Zero::zero() { + return Ok(()) + } let number = best_number - revertible; let hash = client @@ -1185,8 +1189,12 @@ where )))?; let info = client.info(); + let persistent_data: PersistentData = - aux_schema::load_persistent(&*client, info.genesis_hash, Zero::zero(), || unreachable!())?; + aux_schema::load_persistent(&*client, info.genesis_hash, Zero::zero(), || { + const MSG: &str = "Unexpected missing grandpa data during revert"; + Err(ClientError::Application(Box::from(MSG))) + })?; let shared_authority_set = persistent_data.authority_set; let mut authority_set = shared_authority_set.inner(); From b49d8ee54f4bd6e4b5b48f45d08f75705fad95b4 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Wed, 22 Jun 2022 04:12:57 +0900 Subject: [PATCH 005/135] Fix typo in weights.rs (#11724) overriden -> overridden --- frame/support/src/weights.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index a3df199496922..24240c7cc4e6a 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -680,7 +680,7 @@ where /// Calculates the fee from the passed `weight` according to the `polynomial`. /// - /// This should not be overriden in most circumstances. Calculation is done in the + /// This should not be overridden in most circumstances. Calculation is done in the /// `Balance` type and never overflows. All evaluation is saturating. fn weight_to_fee(weight: &Weight) -> Self::Balance { Self::polynomial() From 90d08f77aaaebe8f7012216948e54b6684a175b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 22 Jun 2022 11:02:59 +0200 Subject: [PATCH 006/135] WrapperOpaque: Use `decode_all` to decode from the `Vec` (#11726) This ensures that there isn't any extra data attached that isn't used. --- frame/support/src/traits/misc.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 1f0ba1e769c24..ccbb47909d5f4 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -745,7 +745,7 @@ impl Encode for WrapperOpaque { impl Decode for WrapperOpaque { fn decode(input: &mut I) -> Result { - Ok(Self(T::decode(&mut &>::decode(input)?[..])?)) + Ok(Self(T::decode_all(&mut &>::decode(input)?[..])?)) } fn skip(input: &mut I) -> Result<(), codec::Error> { @@ -967,6 +967,10 @@ mod test { 2usize.pow(14) - 1 + 2 ); assert_eq!(>::max_encoded_len(), 2usize.pow(14) + 4); + + let data = 4u64; + // Ensure that we check that the `Vec` is consumed completly on decode. + assert!(WrapperOpaque::::decode(&mut &data.encode().encode()[..]).is_err()); } #[test] From 85cfb817d8392d8acb791d80fdef3f8e298f6c03 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 22 Jun 2022 17:27:46 +0200 Subject: [PATCH 007/135] Respect cargo offline env variable in wasm builder (#11735) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Support offline env variable in wasm builder * Clean up * Improve checks Co-authored-by: Bastian Köcher * Update crate docs * Add docs to `lib.rs` and introduce helper method `offline_build` Co-authored-by: Bastian Köcher --- utils/wasm-builder/README.md | 1 + utils/wasm-builder/src/lib.rs | 5 ++++ utils/wasm-builder/src/wasm_project.rs | 32 ++++++++++++++++++++------ 3 files changed, 31 insertions(+), 7 deletions(-) diff --git a/utils/wasm-builder/README.md b/utils/wasm-builder/README.md index 3868faf1acab5..547468c7ca11d 100644 --- a/utils/wasm-builder/README.md +++ b/utils/wasm-builder/README.md @@ -64,6 +64,7 @@ By using environment variables, you can configure which Wasm binaries are built to be absolute. - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. +- `CARGO_NET_OFFLINE` - If `true`, `--offline` will be passed to all processes launched to prevent network access. Useful in offline environments. Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. Where `PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `node-runtime` will diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index 6a7f0d7ca3cd5..919290655368b 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -87,6 +87,8 @@ //! required as we walk up from the target directory until we find a `Cargo.toml`. If the target //! directory is changed for the build, this environment variable can be used to point to the //! actual workspace. +//! - `CARGO_NET_OFFLINE` - If `true`, `--offline` will be passed to all processes launched to +//! prevent network access. Useful in offline environments. //! //! Each project can be skipped individually by using the environment variable //! `SKIP_PROJECT_NAME_WASM_BUILD`. Where `PROJECT_NAME` needs to be replaced by the name of the @@ -119,6 +121,9 @@ pub use builder::{WasmBuilder, WasmBuilderSelectProject}; /// Environment variable that tells us to skip building the wasm binary. const SKIP_BUILD_ENV: &str = "SKIP_WASM_BUILD"; +/// Environment variable that tells us whether we should avoid network requests +const OFFLINE: &str = "CARGO_NET_OFFLINE"; + /// Environment variable to force a certain build type when building the wasm binary. /// Expects "debug", "release" or "production" as value. /// diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index be84f2fecfb53..197c1d1b220bb 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{write_file_if_changed, CargoCommandVersioned}; +use crate::{write_file_if_changed, CargoCommandVersioned, OFFLINE}; use build_helper::rerun_if_changed; use cargo_metadata::{CargoOpt, Metadata, MetadataCommand}; @@ -88,12 +88,12 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { cargo_manifest.to_path_buf() }; - let crate_metadata = MetadataCommand::new() - .manifest_path(cargo_manifest) - .features(CargoOpt::AllFeatures) + let mut crate_metadata_command = create_metadata_command(cargo_manifest); + crate_metadata_command.features(CargoOpt::AllFeatures); + + let crate_metadata = crate_metadata_command .exec() .expect("`cargo metadata` can not fail on project `Cargo.toml`; qed"); - // If the `Cargo.lock` didn't exist, we need to remove it after // calling `cargo metadata`. This is required to ensure that we don't change // the build directory outside of the `target` folder. Commands like @@ -593,6 +593,11 @@ impl Profile { } } +/// Check environment whether we should build without network +fn offline_build() -> bool { + env::var(OFFLINE).map_or(false, |v| v == "true") +} + /// Build the project to create the WASM binary. fn build_project( project: &Path, @@ -631,6 +636,10 @@ fn build_project( build_cmd.arg("--profile"); build_cmd.arg(profile.name()); + if offline_build() { + build_cmd.arg("--offline"); + } + println!("{}", colorize_info_message("Information that should be included in a bug report.")); println!("{} {:?}", colorize_info_message("Executing build command:"), build_cmd); println!("{} {}", colorize_info_message("Using rustc version:"), cargo_cmd.rustc_version()); @@ -751,6 +760,16 @@ impl<'a> Deref for DeduplicatePackage<'a> { } } +fn create_metadata_command(path: impl Into) -> MetadataCommand { + let mut metadata_command = MetadataCommand::new(); + metadata_command.manifest_path(path); + + if offline_build() { + metadata_command.other_options(vec!["--offline".to_owned()]); + } + metadata_command +} + /// Generate the `rerun-if-changed` instructions for cargo to make sure that the WASM binary is /// rebuilt when needed. fn generate_rerun_if_changed_instructions( @@ -765,8 +784,7 @@ fn generate_rerun_if_changed_instructions( rerun_if_changed(cargo_lock); } - let metadata = MetadataCommand::new() - .manifest_path(project_folder.join("Cargo.toml")) + let metadata = create_metadata_command(project_folder.join("Cargo.toml")) .exec() .expect("`cargo metadata` can not fail!"); From 0adc17e69fd1d66f2238adc2218b7728545fee53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 22 Jun 2022 17:54:57 +0200 Subject: [PATCH 008/135] contracts: Reduce size of deletion queue depth (#11696) * contracts: Reduce size of deletion queue depth * Remove unused import --- bin/node/runtime/src/lib.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index addcd0b35a9d9..54742cd91c430 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -47,7 +47,6 @@ use frame_system::{ }; pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; -use pallet_contracts::weights::WeightInfo; use pallet_election_provider_multi_phase::SolutionAccuracyOf; use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, @@ -1116,18 +1115,13 @@ parameter_types! { pub const DepositPerItem: Balance = deposit(1, 0); pub const DepositPerByte: Balance = deposit(0, 1); pub const MaxValueSize: u32 = 16 * 1024; + pub const DeletionQueueDepth: u32 = 128; // The lazy deletion runs inside on_initialize. pub DeletionWeightLimit: Weight = RuntimeBlockWeights::get() .per_class .get(DispatchClass::Normal) .max_total .unwrap_or(RuntimeBlockWeights::get().max_block); - // The weight needed for decoding the queue should be less or equal than a fifth - // of the overall weight dedicated to the lazy deletion. - pub DeletionQueueDepth: u32 = ((DeletionWeightLimit::get() / ( - ::WeightInfo::on_initialize_per_queue_item(1) - - ::WeightInfo::on_initialize_per_queue_item(0) - )) / 5) as u32; pub Schedule: pallet_contracts::Schedule = Default::default(); } From b0f294b7976ddd5129e1245f8ddb2f1bc4fe9be2 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 22 Jun 2022 19:00:00 +0300 Subject: [PATCH 009/135] Put `rusty-cachier` before PR merge into `master` for `cargo-check-benches` job (#11737) --- scripts/ci/gitlab/pipeline/test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index b2348c48355d0..0c3fd4d33798d 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -80,6 +80,8 @@ cargo-check-benches: - .test-refs - .collect-artifacts before_script: + - !reference [.rust-info-script, script] + - !reference [.rusty-cachier, before_script] # merges in the master branch on PRs - if [ $CI_COMMIT_REF_NAME != "master" ]; then git fetch origin +master:master; @@ -88,8 +90,6 @@ cargo-check-benches: git config user.email "ci@gitlab.parity.io"; git merge $CI_COMMIT_REF_NAME --verbose --no-edit; fi - - !reference [.rust-info-script, script] - - !reference [.rusty-cachier, before_script] script: - rusty-cachier snapshot create - mkdir -p ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA From 656c62fd6cbacc393f61b6749e6699e4f02e9d5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Wed, 22 Jun 2022 17:03:03 +0100 Subject: [PATCH 010/135] epochs: don't use gap when there's at least one genesis epoch imported (#11725) * epochs: don't use gap when there's at least one genesis epoch imported * epochs: add test for genesis gap fix --- client/consensus/epochs/src/lib.rs | 115 ++++++++++++++++++++++++++++- 1 file changed, 112 insertions(+), 3 deletions(-) diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 3a943e4851a4d..fee69613debf0 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -755,7 +755,10 @@ where Err(e) => PersistedEpoch::Regular(e), } } - } else if epoch.is_genesis() && !self.epochs.values().all(|e| e.is_genesis()) { + } else if epoch.is_genesis() && + !self.epochs.is_empty() && + !self.epochs.values().any(|e| e.is_genesis()) + { // There's a genesis epoch imported when we already have an active epoch. // This happens after the warp sync as the ancient blocks download start. // We need to start tracking gap epochs here. @@ -1170,15 +1173,17 @@ mod tests { epoch_changes.clear_gap(); // Check that both epochs are available. - epoch_changes + let epoch_a = epoch_changes .epoch_data_for_child_of(&is_descendent_of, b"A", 1, 101, &make_genesis) .unwrap() .unwrap(); - epoch_changes + let epoch_x = epoch_changes .epoch_data_for_child_of(&is_descendent_of, b"X", 1, 1001, &make_genesis) .unwrap() .unwrap(); + + assert!(epoch_a != epoch_x) } #[test] @@ -1288,4 +1293,108 @@ mod tests { epoch_changes.clear_gap(); assert!(epoch_changes.gap.is_none()); } + + /// Test that ensures that the gap is not enabled when there's still genesis + /// epochs imported, regardless of whether there are already other further + /// epochs imported descending from such genesis epochs. + #[test] + fn gap_is_not_enabled_when_at_least_one_genesis_epoch_is_still_imported() { + // A (#1) - B (#201) + // / + // 0 - C (#1) + // + // The epoch duration is 100 slots, each of these blocks represents + // an epoch change block. block B starts a new epoch at #201 since the + // genesis epoch spans two epochs. + + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, block) { + (b"0", _) => Ok(true), + (b"A", b"B") => Ok(true), + _ => Ok(false), + } + }; + + let duration = 100; + let make_genesis = |slot| Epoch { start_slot: slot, duration }; + let mut epoch_changes = EpochChanges::new(); + let next_descriptor = (); + + // insert genesis epoch for A at slot 1 + { + let genesis_epoch_a_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 1) + .unwrap() + .unwrap(); + + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); + + epoch_changes + .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) + .unwrap(); + } + + // insert regular epoch for B at slot 201, descending from A + { + let epoch_b_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"A", 1, 201) + .unwrap() + .unwrap(); + + let incremented_epoch = epoch_changes + .viable_epoch(&epoch_b_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); + + epoch_changes + .import(&is_descendent_of, *b"B", 201, *b"A", incremented_epoch) + .unwrap(); + } + + // insert genesis epoch for C at slot 1000 + { + let genesis_epoch_x_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 1000) + .unwrap() + .unwrap(); + + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); + + epoch_changes + .import(&is_descendent_of, *b"C", 1, *b"0", incremented_epoch) + .unwrap(); + } + + // Clearing the gap should be a no-op. + epoch_changes.clear_gap(); + + // Check that all three epochs are available. + let epoch_a = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, 10, &make_genesis) + .unwrap() + .unwrap(); + + let epoch_b = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"B", 201, 201, &make_genesis) + .unwrap() + .unwrap(); + + assert!(epoch_a != epoch_b); + + // the genesis epoch A will span slots [1, 200] with epoch B starting at slot 201 + assert_eq!(epoch_b.start_slot(), 201); + + let epoch_c = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"C", 1, 1001, &make_genesis) + .unwrap() + .unwrap(); + + assert!(epoch_a != epoch_c); + } } From 12b0503102480eb80fd2803b7475feabf8d6f6a2 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 22 Jun 2022 21:39:24 +0300 Subject: [PATCH 011/135] pallet-beefy-mmr: add API for BEEFY Authority Sets (#11406) * pallet-beefy: add Config::OnNewValidatorSet type Add a hook to pallet-beefy for doing specific work when BEEFY validator set changes. For example, this can be used by pallet-beefy-mmr to cache a lightweight MMR root over validators and make it available to light clients. * pallet-beefy-mmr: implement OnNewValidatorSet Implement pallet-beefy::OnNewValidatorSet to be notified of BEEFY validator set changes. Use the notifications to compute and cache a light weight 'BEEFY authority set' which is an MMR root over BEEFY validator set plus some extra info. Previously, pallet-beefy-mmr was interogating pallet-beefy about validator set id on every block to find out when it needs to recompute the authority set. By using the event-driven approach in this commit, we also save one extra state interogation per block. * pallet-beefy-mmr: add new authority_set() API Expose current and next BEEFY authority sets through runtime API. These can be directly used by light clients to avoid having them compute them themselves based on BEEFY validator sets. Signed-off-by: acatangiu * rename BeefyMmr exposed runtime api --- Cargo.lock | 3 ++ frame/beefy-mmr/primitives/Cargo.toml | 8 +++- frame/beefy-mmr/primitives/src/lib.rs | 17 +++++++ frame/beefy-mmr/src/lib.rs | 67 +++++++++++++++++++-------- frame/beefy-mmr/src/mock.rs | 1 + frame/beefy-mmr/src/tests.rs | 50 ++++++++++++++++++++ frame/beefy/src/lib.rs | 51 ++++++++++++++++---- frame/beefy/src/mock.rs | 1 + primitives/beefy/src/lib.rs | 14 ++++++ primitives/beefy/src/mmr.rs | 13 ++++-- test-utils/runtime/Cargo.toml | 2 + test-utils/runtime/src/lib.rs | 10 ++++ 12 files changed, 202 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7b1350c1f529..cfcf5573c0bff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -511,10 +511,12 @@ dependencies = [ name = "beefy-merkle-tree" version = "4.0.0-dev" dependencies = [ + "beefy-primitives", "env_logger 0.9.0", "hex", "hex-literal", "log", + "sp-api", "tiny-keccak", ] @@ -10646,6 +10648,7 @@ dependencies = [ name = "substrate-test-runtime" version = "2.0.0" dependencies = [ + "beefy-merkle-tree", "beefy-primitives", "cfg-if 1.0.0", "frame-support", diff --git a/frame/beefy-mmr/primitives/Cargo.toml b/frame/beefy-mmr/primitives/Cargo.toml index 7878dc3d22837..f30a418def042 100644 --- a/frame/beefy-mmr/primitives/Cargo.toml +++ b/frame/beefy-mmr/primitives/Cargo.toml @@ -13,6 +13,9 @@ hex = { version = "0.4", default-features = false, optional = true } log = { version = "0.4", default-features = false, optional = true } tiny-keccak = { version = "2.0.2", features = ["keccak"], optional = true } +beefy-primitives = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/beefy" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } + [dev-dependencies] env_logger = "0.9" hex = "0.4" @@ -22,4 +25,7 @@ hex-literal = "0.3" debug = ["hex", "hex/std", "log"] default = ["debug", "keccak", "std"] keccak = ["tiny-keccak"] -std = [] +std = [ + "beefy-primitives/std", + "sp-api/std" +] diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs index 04fa11760765b..664fd18199dd0 100644 --- a/frame/beefy-mmr/primitives/src/lib.rs +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -36,6 +36,8 @@ extern crate alloc; #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use beefy_primitives::mmr::{BeefyAuthoritySet, BeefyNextAuthoritySet}; + /// Supported hashing output size. /// /// The size is restricted to 32 bytes to allow for a more optimised implementation. @@ -375,6 +377,21 @@ where } } +sp_api::decl_runtime_apis! { + /// API useful for BEEFY light clients. + pub trait BeefyMmrApi + where + H: From + Into, + BeefyAuthoritySet: sp_api::Decode, + { + /// Return the currently active BEEFY authority set proof. + fn authority_set_proof() -> BeefyAuthoritySet; + + /// Return the next/queued BEEFY authority set proof. + fn next_authority_set_proof() -> BeefyNextAuthoritySet; + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs index 8b904d6aefd5f..456d6e77aa8eb 100644 --- a/frame/beefy-mmr/src/lib.rs +++ b/frame/beefy-mmr/src/lib.rs @@ -36,7 +36,10 @@ use sp_runtime::traits::{Convert, Hash, Member}; use sp_std::prelude::*; -use beefy_primitives::mmr::{BeefyDataProvider, BeefyNextAuthoritySet, MmrLeaf, MmrLeafVersion}; +use beefy_primitives::{ + mmr::{BeefyAuthoritySet, BeefyDataProvider, BeefyNextAuthoritySet, MmrLeaf, MmrLeafVersion}, + ValidatorSet as BeefyValidatorSet, +}; use pallet_mmr::{LeafDataProvider, ParentNumberAndHash}; use frame_support::{crypto::ecdsa::ECDSAExt, traits::Get}; @@ -124,6 +127,12 @@ pub mod pallet { type BeefyDataProvider: BeefyDataProvider; } + /// Details of current BEEFY authority set. + #[pallet::storage] + #[pallet::getter(fn beefy_authorities)] + pub type BeefyAuthorities = + StorageValue<_, BeefyAuthoritySet>, ValueQuery>; + /// Details of next BEEFY authority set. /// /// This storage entry is used as cache for calls to `update_beefy_next_authority_set`. @@ -149,7 +158,7 @@ where version: T::LeafVersion::get(), parent_number_and_hash: ParentNumberAndHash::::leaf_data(), leaf_extra: T::BeefyDataProvider::extra_data(), - beefy_next_authority_set: Pallet::::update_beefy_next_authority_set(), + beefy_next_authority_set: Pallet::::beefy_next_authorities(), } } } @@ -163,35 +172,55 @@ where } } +impl beefy_primitives::OnNewValidatorSet<::BeefyId> for Pallet +where + T: pallet::Config, + MerkleRootOf: From + Into, +{ + /// Compute and cache BEEFY authority sets based on updated BEEFY validator sets. + fn on_new_validator_set( + current_set: &BeefyValidatorSet<::BeefyId>, + next_set: &BeefyValidatorSet<::BeefyId>, + ) { + let current = Pallet::::compute_authority_set(current_set); + let next = Pallet::::compute_authority_set(next_set); + // cache the result + BeefyAuthorities::::put(¤t); + BeefyNextAuthorities::::put(&next); + } +} + impl Pallet where MerkleRootOf: From + Into, { - /// Returns details of the next BEEFY authority set. + /// Return the currently active BEEFY authority set proof. + pub fn authority_set_proof() -> BeefyAuthoritySet> { + Pallet::::beefy_authorities() + } + + /// Return the next/queued BEEFY authority set proof. + pub fn next_authority_set_proof() -> BeefyNextAuthoritySet> { + Pallet::::beefy_next_authorities() + } + + /// Returns details of a BEEFY authority set. /// /// Details contain authority set id, authority set length and a merkle root, /// constructed from uncompressed secp256k1 public keys converted to Ethereum addresses /// of the next BEEFY authority set. - /// - /// This function will use a storage-cached entry in case the set didn't change, or compute and - /// cache new one in case it did. - fn update_beefy_next_authority_set() -> BeefyNextAuthoritySet> { - let id = pallet_beefy::Pallet::::validator_set_id() + 1; - let current_next = Self::beefy_next_authorities(); - // avoid computing the merkle tree if validator set id didn't change. - if id == current_next.id { - return current_next - } - - let beefy_addresses = pallet_beefy::Pallet::::next_authorities() + fn compute_authority_set( + validator_set: &BeefyValidatorSet<::BeefyId>, + ) -> BeefyAuthoritySet> { + let id = validator_set.id(); + let beefy_addresses = validator_set + .validators() .into_iter() + .cloned() .map(T::BeefyAuthorityToMerkleLeaf::convert) .collect::>(); let len = beefy_addresses.len() as u32; let root = beefy_merkle_tree::merkle_root::(beefy_addresses).into(); - let next_set = BeefyNextAuthoritySet { id, len, root }; - // cache the result - BeefyNextAuthorities::::put(&next_set); - next_set + BeefyAuthoritySet { id, len, root } } } diff --git a/frame/beefy-mmr/src/mock.rs b/frame/beefy-mmr/src/mock.rs index c9dbdb3b2e16a..8a673c9d4e914 100644 --- a/frame/beefy-mmr/src/mock.rs +++ b/frame/beefy-mmr/src/mock.rs @@ -125,6 +125,7 @@ impl pallet_mmr::Config for Test { impl pallet_beefy::Config for Test { type BeefyId = BeefyId; type MaxAuthorities = ConstU32<100>; + type OnNewValidatorSet = BeefyMmr; } parameter_types! { diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs index fd3ecd5067155..d9cd8c8a5d8c8 100644 --- a/frame/beefy-mmr/src/tests.rs +++ b/frame/beefy-mmr/src/tests.rs @@ -149,3 +149,53 @@ fn should_contain_valid_leaf_data() { } ); } + +#[test] +fn should_update_authorities() { + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + let auth_set = BeefyMmr::authority_set_proof(); + let next_auth_set = BeefyMmr::next_authority_set_proof(); + + // check current authority set + assert_eq!(0, auth_set.id); + assert_eq!(2, auth_set.len); + let want: H256 = + hex!("176e73f1bf656478b728e28dd1a7733c98621b8acf830bff585949763dca7a96").into(); + assert_eq!(want, auth_set.root); + + // next authority set should have same validators but different id + assert_eq!(1, next_auth_set.id); + assert_eq!(auth_set.len, next_auth_set.len); + assert_eq!(auth_set.root, next_auth_set.root); + + let announced_set = next_auth_set; + init_block(1); + let auth_set = BeefyMmr::authority_set_proof(); + let next_auth_set = BeefyMmr::next_authority_set_proof(); + + // check new auth are expected ones + assert_eq!(announced_set, auth_set); + assert_eq!(1, auth_set.id); + // check next auth set + assert_eq!(2, next_auth_set.id); + let want: H256 = + hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5").into(); + assert_eq!(2, next_auth_set.len); + assert_eq!(want, next_auth_set.root); + + let announced_set = next_auth_set; + init_block(2); + let auth_set = BeefyMmr::authority_set_proof(); + let next_auth_set = BeefyMmr::next_authority_set_proof(); + + // check new auth are expected ones + assert_eq!(announced_set, auth_set); + assert_eq!(2, auth_set.id); + // check next auth set + assert_eq!(3, next_auth_set.id); + let want: H256 = + hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5").into(); + assert_eq!(2, next_auth_set.len); + assert_eq!(want, next_auth_set.root); + }); +} diff --git a/frame/beefy/src/lib.rs b/frame/beefy/src/lib.rs index a0cba2270b37d..fce531d3f5dd7 100644 --- a/frame/beefy/src/lib.rs +++ b/frame/beefy/src/lib.rs @@ -32,7 +32,9 @@ use sp_runtime::{ }; use sp_std::prelude::*; -use beefy_primitives::{AuthorityIndex, ConsensusLog, ValidatorSet, BEEFY_ENGINE_ID}; +use beefy_primitives::{ + AuthorityIndex, ConsensusLog, OnNewValidatorSet, ValidatorSet, BEEFY_ENGINE_ID, +}; #[cfg(test)] mod mock; @@ -58,6 +60,13 @@ pub mod pallet { /// The maximum number of authorities that can be added. type MaxAuthorities: Get; + + /// A hook to act on the new BEEFY validator set. + /// + /// For some applications it might be beneficial to make the BEEFY validator set available + /// externally apart from having it in the storage. For instance you might cache a light + /// weight MMR root over validators and make it available for Light Clients. + type OnNewValidatorSet: OnNewValidatorSet<::BeefyId>; } #[pallet::pallet] @@ -118,20 +127,29 @@ impl Pallet { ) { >::put(&new); - let next_id = Self::validator_set_id() + 1u64; - >::put(next_id); - if let Some(validator_set) = ValidatorSet::::new(new, next_id) { + let new_id = Self::validator_set_id() + 1u64; + >::put(new_id); + + >::put(&queued); + + if let Some(validator_set) = ValidatorSet::::new(new, new_id) { let log = DigestItem::Consensus( BEEFY_ENGINE_ID, - ConsensusLog::AuthoritiesChange(validator_set).encode(), + ConsensusLog::AuthoritiesChange(validator_set.clone()).encode(), ); >::deposit_log(log); - } - >::put(&queued); + let next_id = new_id + 1; + if let Some(next_validator_set) = ValidatorSet::::new(queued, next_id) { + >::on_new_validator_set( + &validator_set, + &next_validator_set, + ); + } + } } - fn initialize_authorities(authorities: &[T::BeefyId]) -> Result<(), ()> { + fn initialize_authorities(authorities: &Vec) -> Result<(), ()> { if authorities.is_empty() { return Ok(()) } @@ -141,12 +159,25 @@ impl Pallet { } let bounded_authorities = - BoundedSlice::::try_from(authorities)?; + BoundedSlice::::try_from(authorities.as_slice())?; + let id = 0; >::put(bounded_authorities); - >::put(0); + >::put(id); // Like `pallet_session`, initialize the next validator set as well. >::put(bounded_authorities); + + if let Some(validator_set) = ValidatorSet::::new(authorities.clone(), id) { + let next_id = id + 1; + if let Some(next_validator_set) = + ValidatorSet::::new(authorities.clone(), next_id) + { + >::on_new_validator_set( + &validator_set, + &next_validator_set, + ); + } + } Ok(()) } } diff --git a/frame/beefy/src/mock.rs b/frame/beefy/src/mock.rs index 27796b5b2206c..3bb59c7c39485 100644 --- a/frame/beefy/src/mock.rs +++ b/frame/beefy/src/mock.rs @@ -87,6 +87,7 @@ impl frame_system::Config for Test { impl pallet_beefy::Config for Test { type BeefyId = BeefyId; type MaxAuthorities = ConstU32<100>; + type OnNewValidatorSet = (); } parameter_types! { diff --git a/primitives/beefy/src/lib.rs b/primitives/beefy/src/lib.rs index 8dbdd66f3559b..87f1b8756af65 100644 --- a/primitives/beefy/src/lib.rs +++ b/primitives/beefy/src/lib.rs @@ -154,6 +154,20 @@ pub struct VoteMessage { pub signature: Signature, } +/// New BEEFY validator set notification hook. +pub trait OnNewValidatorSet { + /// Function called by the pallet when BEEFY validator set changes. + fn on_new_validator_set( + validator_set: &ValidatorSet, + next_validator_set: &ValidatorSet, + ); +} + +/// No-op implementation of [OnNewValidatorSet]. +impl OnNewValidatorSet for () { + fn on_new_validator_set(_: &ValidatorSet, _: &ValidatorSet) {} +} + sp_api::decl_runtime_apis! { /// API necessary for BEEFY voters. pub trait BeefyApi diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs index 426a1ba5ff80b..761eee9f8ef85 100644 --- a/primitives/beefy/src/mmr.rs +++ b/primitives/beefy/src/mmr.rs @@ -95,10 +95,10 @@ impl MmrLeafVersion { } } -/// Details of the next BEEFY authority set. +/// Details of a BEEFY authority set. #[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo, MaxEncodedLen)] -pub struct BeefyNextAuthoritySet { - /// Id of the next set. +pub struct BeefyAuthoritySet { + /// Id of the set. /// /// Id is required to correlate BEEFY signed commitments with the validator set. /// Light Client can easily verify that the commitment witness it is getting is @@ -106,11 +106,11 @@ pub struct BeefyNextAuthoritySet { pub id: crate::ValidatorSetId, /// Number of validators in the set. /// - /// Some BEEFY Light Clients may use an interactive protocol to verify only subset + /// Some BEEFY Light Clients may use an interactive protocol to verify only a subset /// of signatures. We put set length here, so that these clients can verify the minimal /// number of required signatures. pub len: u32, - /// Merkle Root Hash build from BEEFY AuthorityIds. + /// Merkle Root Hash built from BEEFY AuthorityIds. /// /// This is used by Light Clients to confirm that the commitments are signed by the correct /// validator set. Light Clients using interactive protocol, might verify only subset of @@ -118,6 +118,9 @@ pub struct BeefyNextAuthoritySet { pub root: MerkleRoot, } +/// Details of the next BEEFY authority set. +pub type BeefyNextAuthoritySet = BeefyAuthoritySet; + #[cfg(test)] mod tests { use super::*; diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 739be9ec8bdd8..1c2707b3719ad 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -14,6 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] beefy-primitives = { version = "4.0.0-dev", default-features = false, path = "../../primitives/beefy" } +beefy-merkle-tree = { version = "4.0.0-dev", default-features = false, path = "../../frame/beefy-mmr/primitives" } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura" } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } @@ -67,6 +68,7 @@ default = [ ] std = [ "beefy-primitives/std", + "beefy-merkle-tree/std", "sp-application-crypto/std", "sp-consensus-aura/std", "sp-consensus-babe/std", diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 555f29e5991f7..ea62f2ac84f3d 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -947,6 +947,16 @@ cfg_if! { } } + impl beefy_merkle_tree::BeefyMmrApi for RuntimeApi { + fn authority_set_proof() -> beefy_primitives::mmr::BeefyAuthoritySet { + Default::default() + } + + fn next_authority_set_proof() -> beefy_primitives::mmr::BeefyNextAuthoritySet { + Default::default() + } + } + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(_account: AccountId) -> Index { 0 From ee3eb220974f55146cf238f28ec2b1ec61141999 Mon Sep 17 00:00:00 2001 From: Doordashcon Date: Thu, 23 Jun 2022 05:18:40 +0100 Subject: [PATCH 012/135] make pallet-tips & pallet-bounties instantiable (#11473) * make pallet-tips & pallet-bounties instantiable * update test * add default instance * update * cargo fmt * update * update * update * update * fix merge * fix tests * bounties benchmarking instantiable * fix benchmarks * make tips benchmarks instantible Co-authored-by: Shawn Tabrizi --- frame/bounties/src/benchmarking.rs | 156 ++++++++++++----------- frame/bounties/src/lib.rs | 193 +++++++++++++++-------------- frame/bounties/src/tests.rs | 89 +++++++++++-- frame/tips/src/benchmarking.rs | 58 +++++---- frame/tips/src/lib.rs | 105 ++++++++-------- frame/tips/src/tests.rs | 87 +++++++++++-- 6 files changed, 417 insertions(+), 271 deletions(-) diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 912e461501be5..7566c32f6e9a1 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; -use frame_benchmarking::{account, benchmarks, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -31,25 +31,25 @@ use pallet_treasury::Pallet as Treasury; const SEED: u32 = 0; // Create bounties that are approved for use in `on_initialize`. -fn create_approved_bounties(n: u32) -> Result<(), &'static str> { +fn create_approved_bounties, I: 'static>(n: u32) -> Result<(), &'static str> { for i in 0..n { let (caller, _curator, _fee, value, reason) = - setup_bounty::(i, T::MaximumReasonLength::get()); - Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + setup_bounty::(i, T::MaximumReasonLength::get()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; } - ensure!(BountyApprovals::::get().len() == n as usize, "Not all bounty approved"); + ensure!(BountyApprovals::::get().len() == n as usize, "Not all bounty approved"); Ok(()) } // Create the pre-requisite information needed to create a treasury `propose_bounty`. -fn setup_bounty( +fn setup_bounty, I: 'static>( u: u32, d: u32, -) -> (T::AccountId, T::AccountId, BalanceOf, BalanceOf, Vec) { +) -> (T::AccountId, T::AccountId, BalanceOf, BalanceOf, Vec) { let caller = account("caller", u, SEED); - let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); + let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); let fee = value / 2u32.into(); let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); @@ -60,97 +60,103 @@ fn setup_bounty( (caller, curator, fee, value, reason) } -fn create_bounty( +fn create_bounty, I: 'static>( ) -> Result<(::Source, BountyIndex), &'static str> { - let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); + let (caller, curator, fee, value, reason) = + setup_bounty::(0, T::MaximumReasonLength::get()); let curator_lookup = T::Lookup::unlookup(curator.clone()); - Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); - Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup.clone(), fee)?; - Bounties::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Treasury::::on_initialize(T::BlockNumber::zero()); + Bounties::::propose_curator( + RawOrigin::Root.into(), + bounty_id, + curator_lookup.clone(), + fee, + )?; + Bounties::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; Ok((curator_lookup, bounty_id)) } -fn setup_pot_account() { - let pot_account = Bounties::::account_id(); +fn setup_pot_account, I: 'static>() { + let pot_account = Bounties::::account_id(); let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); let _ = T::Currency::make_free_balance_be(&pot_account, value); } -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -benchmarks! { +benchmarks_instance_pallet! { propose_bounty { let d in 0 .. T::MaximumReasonLength::get(); - let (caller, curator, fee, value, description) = setup_bounty::(0, d); + let (caller, curator, fee, value, description) = setup_bounty::(0, d); }: _(RawOrigin::Signed(caller), value, description) approve_bounty { - let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); - Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; + let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; }: _(RawOrigin::Root, bounty_id) propose_curator { - setup_pot_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); + setup_pot_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); let curator_lookup = T::Lookup::unlookup(curator); - Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Bounties::::on_initialize(T::BlockNumber::zero()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Treasury::::on_initialize(T::BlockNumber::zero()); }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) // Worst case when curator is inactive and any sender unassigns the curator. unassign_curator { - setup_pot_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Bounties::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; - frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Treasury::::on_initialize(T::BlockNumber::zero()); + let bounty_id = BountyCount::::get() - 1; + frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 2u32.into()); let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), bounty_id) accept_curator { - setup_pot_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); + setup_pot_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); let curator_lookup = T::Lookup::unlookup(curator.clone()); - Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Bounties::::on_initialize(T::BlockNumber::zero()); - Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Treasury::::on_initialize(T::BlockNumber::zero()); + Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; }: _(RawOrigin::Signed(curator), bounty_id) award_bounty { - setup_pot_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Bounties::::on_initialize(T::BlockNumber::zero()); + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Treasury::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; + let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); }: _(RawOrigin::Signed(curator), bounty_id, beneficiary) claim_bounty { - setup_pot_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Bounties::::on_initialize(T::BlockNumber::zero()); + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Treasury::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; + let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); - Bounties::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; + Bounties::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; - frame_system::Pallet::::set_block_number(T::BountyDepositPayoutDelay::get()); + frame_system::Pallet::::set_block_number(T::BountyDepositPayoutDelay::get() + 1u32.into()); ensure!(T::Currency::free_balance(&beneficiary_account).is_zero(), "Beneficiary already has balance"); }: _(RawOrigin::Signed(curator), bounty_id) @@ -159,45 +165,45 @@ benchmarks! { } close_bounty_proposed { - setup_pot_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); - Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; + setup_pot_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; }: close_bounty(RawOrigin::Root, bounty_id) close_bounty_active { - setup_pot_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Bounties::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Treasury::::on_initialize(T::BlockNumber::zero()); + let bounty_id = BountyCount::::get() - 1; }: close_bounty(RawOrigin::Root, bounty_id) verify { - assert_last_event::(Event::BountyCanceled { index: bounty_id }.into()) + assert_last_event::(Event::BountyCanceled { index: bounty_id }.into()) } extend_bounty_expiry { - setup_pot_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Bounties::::on_initialize(T::BlockNumber::zero()); + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Treasury::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; + let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) verify { - assert_last_event::(Event::BountyExtended { index: bounty_id }.into()) + assert_last_event::(Event::BountyExtended { index: bounty_id }.into()) } spend_funds { let b in 1 .. 100; - setup_pot_account::(); - create_approved_bounties::(b)?; + setup_pot_account::(); + create_approved_bounties::(b)?; - let mut budget_remaining = BalanceOf::::max_value(); - let mut imbalance = PositiveImbalanceOf::::zero(); + let mut budget_remaining = BalanceOf::::max_value(); + let mut imbalance = PositiveImbalanceOf::::zero(); let mut total_weight = Weight::zero(); let mut missed_any = false; }: { - as pallet_treasury::SpendFunds>::spend_funds( + as pallet_treasury::SpendFunds>::spend_funds( &mut budget_remaining, &mut imbalance, &mut total_weight, @@ -205,9 +211,9 @@ benchmarks! { ); } verify { - ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); + ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); ensure!(missed_any == false, "Missed some"); - assert_last_event::(Event::BountyBecameActive { index: b - 1 }.into()) + assert_last_event::(Event::BountyBecameActive { index: b - 1 }.into()) } impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test) diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index b01a36b430a4e..8e28cacb9d929 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -107,9 +107,9 @@ pub use weights::WeightInfo; pub use pallet::*; -type BalanceOf = pallet_treasury::BalanceOf; +type BalanceOf = pallet_treasury::BalanceOf; -type PositiveImbalanceOf = pallet_treasury::PositiveImbalanceOf; +type PositiveImbalanceOf = pallet_treasury::PositiveImbalanceOf; /// An index of a bounty. Just a `u32`. pub type BountyIndex = u32; @@ -188,13 +188,13 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); + pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + pallet_treasury::Config { + pub trait Config: frame_system::Config + pallet_treasury::Config { /// The amount held on deposit for placing a bounty proposal. #[pallet::constant] - type BountyDepositBase: Get>; + type BountyDepositBase: Get>; /// The delay period for which a bounty beneficiary need to wait before claim the payout. #[pallet::constant] @@ -213,22 +213,22 @@ pub mod pallet { /// Maximum amount of funds that should be placed in a deposit for making a proposal. #[pallet::constant] - type CuratorDepositMax: Get>>; + type CuratorDepositMax: Get>>; /// Minimum amount of funds that should be placed in a deposit for making a proposal. #[pallet::constant] - type CuratorDepositMin: Get>>; + type CuratorDepositMin: Get>>; /// Minimum value for a bounty. #[pallet::constant] - type BountyValueMinimum: Get>; + type BountyValueMinimum: Get>; /// The amount held on deposit per byte within the tip report reason or bounty description. #[pallet::constant] - type DataDepositPerByte: Get>; + type DataDepositPerByte: Get>; /// The overarching event type. - type Event: From> + IsType<::Event>; + type Event: From> + IsType<::Event>; /// Maximum acceptable reason length. /// @@ -240,11 +240,11 @@ pub mod pallet { type WeightInfo: WeightInfo; /// The child bounty manager. - type ChildBountyManager: ChildBountyManager>; + type ChildBountyManager: ChildBountyManager>; } #[pallet::error] - pub enum Error { + pub enum Error { /// Proposer's balance is too low. InsufficientProposersBalance, /// No proposal or bounty at that index. @@ -272,17 +272,17 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { + pub enum Event, I: 'static = ()> { /// New bounty proposal. BountyProposed { index: BountyIndex }, /// A bounty proposal was rejected; funds were slashed. - BountyRejected { index: BountyIndex, bond: BalanceOf }, + BountyRejected { index: BountyIndex, bond: BalanceOf }, /// A bounty proposal is funded and became active. BountyBecameActive { index: BountyIndex }, /// A bounty is awarded to a beneficiary. BountyAwarded { index: BountyIndex, beneficiary: T::AccountId }, /// A bounty is claimed by beneficiary. - BountyClaimed { index: BountyIndex, payout: BalanceOf, beneficiary: T::AccountId }, + BountyClaimed { index: BountyIndex, payout: BalanceOf, beneficiary: T::AccountId }, /// A bounty is cancelled. BountyCanceled { index: BountyIndex }, /// A bounty expiry is extended. @@ -292,32 +292,32 @@ pub mod pallet { /// Number of bounty proposals that have been made. #[pallet::storage] #[pallet::getter(fn bounty_count)] - pub type BountyCount = StorageValue<_, BountyIndex, ValueQuery>; + pub type BountyCount, I: 'static = ()> = StorageValue<_, BountyIndex, ValueQuery>; /// Bounties that have been made. #[pallet::storage] #[pallet::getter(fn bounties)] - pub type Bounties = StorageMap< + pub type Bounties, I: 'static = ()> = StorageMap< _, Twox64Concat, BountyIndex, - Bounty, T::BlockNumber>, + Bounty, T::BlockNumber>, >; /// The description of each bounty. #[pallet::storage] #[pallet::getter(fn bounty_descriptions)] - pub type BountyDescriptions = + pub type BountyDescriptions, I: 'static = ()> = StorageMap<_, Twox64Concat, BountyIndex, BoundedVec>; /// Bounty indices that have been approved but not yet funded. #[pallet::storage] #[pallet::getter(fn bounty_approvals)] - pub type BountyApprovals = + pub type BountyApprovals, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; #[pallet::call] - impl Pallet { + impl, I: 'static> Pallet { /// Propose a new bounty. /// /// The dispatch origin for this call must be _Signed_. @@ -330,10 +330,10 @@ pub mod pallet { /// - `fee`: The curator fee. /// - `value`: The total payment amount of this bounty, curator fee included. /// - `description`: The description of this bounty. - #[pallet::weight(::WeightInfo::propose_bounty(description.len() as u32))] + #[pallet::weight(>::WeightInfo::propose_bounty(description.len() as u32))] pub fn propose_bounty( origin: OriginFor, - #[pallet::compact] value: BalanceOf, + #[pallet::compact] value: BalanceOf, description: Vec, ) -> DispatchResult { let proposer = ensure_signed(origin)?; @@ -349,21 +349,21 @@ pub mod pallet { /// # /// - O(1). /// # - #[pallet::weight(::WeightInfo::approve_bounty())] + #[pallet::weight(>::WeightInfo::approve_bounty())] pub fn approve_bounty( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, ) -> DispatchResult { T::ApproveOrigin::ensure_origin(origin)?; - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); bounty.status = BountyStatus::Approved; - BountyApprovals::::try_append(bounty_id) - .map_err(|()| Error::::TooManyQueued)?; + BountyApprovals::::try_append(bounty_id) + .map_err(|()| Error::::TooManyQueued)?; Ok(()) })?; @@ -377,24 +377,24 @@ pub mod pallet { /// # /// - O(1). /// # - #[pallet::weight(::WeightInfo::propose_curator())] + #[pallet::weight(>::WeightInfo::propose_curator())] pub fn propose_curator( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, curator: ::Source, - #[pallet::compact] fee: BalanceOf, + #[pallet::compact] fee: BalanceOf, ) -> DispatchResult { T::ApproveOrigin::ensure_origin(origin)?; let curator = T::Lookup::lookup(curator)?; - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; match bounty.status { BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => {}, - _ => return Err(Error::::UnexpectedStatus.into()), + _ => return Err(Error::::UnexpectedStatus.into()), }; - ensure!(fee < bounty.value, Error::::InvalidFee); + ensure!(fee < bounty.value, Error::::InvalidFee); bounty.status = BountyStatus::CuratorProposed { curator }; bounty.fee = fee; @@ -422,7 +422,7 @@ pub mod pallet { /// # /// - O(1). /// # - #[pallet::weight(::WeightInfo::unassign_curator())] + #[pallet::weight(>::WeightInfo::unassign_curator())] pub fn unassign_curator( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, @@ -431,10 +431,11 @@ pub mod pallet { .map(Some) .or_else(|_| T::RejectOrigin::ensure_origin(origin).map(|_| None))?; - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - let slash_curator = |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { + let slash_curator = |curator: &T::AccountId, + curator_deposit: &mut BalanceOf| { let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; T::OnSlash::on_unbalanced(imbalance); *curator_deposit = Zero::zero(); @@ -443,7 +444,7 @@ pub mod pallet { match bounty.status { BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { // No curator to unassign at this point. - return Err(Error::::UnexpectedStatus.into()) + return Err(Error::::UnexpectedStatus.into()) }, BountyStatus::CuratorProposed { ref curator } => { // A curator has been proposed, but not accepted yet. @@ -468,7 +469,7 @@ pub mod pallet { // Continue to change bounty status below... } else { // Curator has more time to give an update. - return Err(Error::::Premature.into()) + return Err(Error::::Premature.into()) } } else { // Else this is the curator, willingly giving up their role. @@ -506,19 +507,19 @@ pub mod pallet { /// # /// - O(1). /// # - #[pallet::weight(::WeightInfo::accept_curator())] + #[pallet::weight(>::WeightInfo::accept_curator())] pub fn accept_curator( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, ) -> DispatchResult { let signer = ensure_signed(origin)?; - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; match bounty.status { BountyStatus::CuratorProposed { ref curator } => { - ensure!(signer == *curator, Error::::RequireCurator); + ensure!(signer == *curator, Error::::RequireCurator); let deposit = Self::calculate_curator_deposit(&bounty.fee); T::Currency::reserve(curator, deposit)?; @@ -531,7 +532,7 @@ pub mod pallet { Ok(()) }, - _ => Err(Error::::UnexpectedStatus.into()), + _ => Err(Error::::UnexpectedStatus.into()), } })?; Ok(()) @@ -548,7 +549,7 @@ pub mod pallet { /// # /// - O(1). /// # - #[pallet::weight(::WeightInfo::award_bounty())] + #[pallet::weight(>::WeightInfo::award_bounty())] pub fn award_bounty( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, @@ -557,20 +558,20 @@ pub mod pallet { let signer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; // Ensure no active child bounties before processing the call. ensure!( T::ChildBountyManager::child_bounties_count(bounty_id) == 0, - Error::::HasActiveChildBounty + Error::::HasActiveChildBounty ); match &bounty.status { BountyStatus::Active { curator, .. } => { - ensure!(signer == *curator, Error::::RequireCurator); + ensure!(signer == *curator, Error::::RequireCurator); }, - _ => return Err(Error::::UnexpectedStatus.into()), + _ => return Err(Error::::UnexpectedStatus.into()), } bounty.status = BountyStatus::PendingPayout { curator: signer, @@ -582,7 +583,7 @@ pub mod pallet { Ok(()) })?; - Self::deposit_event(Event::::BountyAwarded { index: bounty_id, beneficiary }); + Self::deposit_event(Event::::BountyAwarded { index: bounty_id, beneficiary }); Ok(()) } @@ -595,21 +596,21 @@ pub mod pallet { /// # /// - O(1). /// # - #[pallet::weight(::WeightInfo::claim_bounty())] + #[pallet::weight(>::WeightInfo::claim_bounty())] pub fn claim_bounty( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, ) -> DispatchResult { let _ = ensure_signed(origin)?; // anyone can trigger claim - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { ensure!( frame_system::Pallet::::block_number() >= unlock_at, - Error::::Premature + Error::::Premature ); let bounty_account = Self::bounty_account_id(bounty_id); let balance = T::Currency::free_balance(&bounty_account); @@ -633,16 +634,16 @@ pub mod pallet { *maybe_bounty = None; - BountyDescriptions::::remove(bounty_id); + BountyDescriptions::::remove(bounty_id); - Self::deposit_event(Event::::BountyClaimed { + Self::deposit_event(Event::::BountyClaimed { index: bounty_id, payout, beneficiary, }); Ok(()) } else { - Err(Error::::UnexpectedStatus.into()) + Err(Error::::UnexpectedStatus.into()) } })?; Ok(()) @@ -658,47 +659,47 @@ pub mod pallet { /// # /// - O(1). /// # - #[pallet::weight(::WeightInfo::close_bounty_proposed() - .max(::WeightInfo::close_bounty_active()))] + #[pallet::weight(>::WeightInfo::close_bounty_proposed() + .max(>::WeightInfo::close_bounty_active()))] pub fn close_bounty( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, ) -> DispatchResultWithPostInfo { T::RejectOrigin::ensure_origin(origin)?; - Bounties::::try_mutate_exists( + Bounties::::try_mutate_exists( bounty_id, |maybe_bounty| -> DispatchResultWithPostInfo { - let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; + let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; // Ensure no active child bounties before processing the call. ensure!( T::ChildBountyManager::child_bounties_count(bounty_id) == 0, - Error::::HasActiveChildBounty + Error::::HasActiveChildBounty ); match &bounty.status { BountyStatus::Proposed => { // The reject origin would like to cancel a proposed bounty. - BountyDescriptions::::remove(bounty_id); + BountyDescriptions::::remove(bounty_id); let value = bounty.bond; let imbalance = T::Currency::slash_reserved(&bounty.proposer, value).0; T::OnSlash::on_unbalanced(imbalance); *maybe_bounty = None; - Self::deposit_event(Event::::BountyRejected { + Self::deposit_event(Event::::BountyRejected { index: bounty_id, bond: value, }); // Return early, nothing else to do. return Ok( - Some(::WeightInfo::close_bounty_proposed()).into() + Some(>::WeightInfo::close_bounty_proposed()).into() ) }, BountyStatus::Approved => { // For weight reasons, we don't allow a council to cancel in this phase. // We ask for them to wait until it is funded before they can cancel. - return Err(Error::::UnexpectedStatus.into()) + return Err(Error::::UnexpectedStatus.into()) }, BountyStatus::Funded | BountyStatus::CuratorProposed { .. } => { // Nothing extra to do besides the removal of the bounty below. @@ -715,13 +716,13 @@ pub mod pallet { // this bounty, it should mean the curator was acting maliciously. // So the council should first unassign the curator, slashing their // deposit. - return Err(Error::::PendingPayout.into()) + return Err(Error::::PendingPayout.into()) }, } let bounty_account = Self::bounty_account_id(bounty_id); - BountyDescriptions::::remove(bounty_id); + BountyDescriptions::::remove(bounty_id); let balance = T::Currency::free_balance(&bounty_account); let res = T::Currency::transfer( @@ -733,8 +734,8 @@ pub mod pallet { debug_assert!(res.is_ok()); *maybe_bounty = None; - Self::deposit_event(Event::::BountyCanceled { index: bounty_id }); - Ok(Some(::WeightInfo::close_bounty_active()).into()) + Self::deposit_event(Event::::BountyCanceled { index: bounty_id }); + Ok(Some(>::WeightInfo::close_bounty_active()).into()) }, ) } @@ -749,7 +750,7 @@ pub mod pallet { /// # /// - O(1). /// # - #[pallet::weight(::WeightInfo::extend_bounty_expiry())] + #[pallet::weight(>::WeightInfo::extend_bounty_expiry())] pub fn extend_bounty_expiry( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, @@ -757,30 +758,30 @@ pub mod pallet { ) -> DispatchResult { let signer = ensure_signed(origin)?; - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; match bounty.status { BountyStatus::Active { ref curator, ref mut update_due } => { - ensure!(*curator == signer, Error::::RequireCurator); + ensure!(*curator == signer, Error::::RequireCurator); *update_due = (frame_system::Pallet::::block_number() + T::BountyUpdatePeriod::get()) .max(*update_due); }, - _ => return Err(Error::::UnexpectedStatus.into()), + _ => return Err(Error::::UnexpectedStatus.into()), } Ok(()) })?; - Self::deposit_event(Event::::BountyExtended { index: bounty_id }); + Self::deposit_event(Event::::BountyExtended { index: bounty_id }); Ok(()) } } } -impl Pallet { - pub fn calculate_curator_deposit(fee: &BalanceOf) -> BalanceOf { +impl, I: 'static> Pallet { + pub fn calculate_curator_deposit(fee: &BalanceOf) -> BalanceOf { let mut deposit = T::CuratorDepositMultiplier::get() * *fee; if let Some(max_deposit) = T::CuratorDepositMax::get() { @@ -812,11 +813,11 @@ impl Pallet { fn create_bounty( proposer: T::AccountId, description: Vec, - value: BalanceOf, + value: BalanceOf, ) -> DispatchResult { let bounded_description: BoundedVec<_, _> = - description.try_into().map_err(|()| Error::::ReasonTooBig)?; - ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); + description.try_into().map_err(|()| Error::::ReasonTooBig)?; + ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); let index = Self::bounty_count(); @@ -824,9 +825,9 @@ impl Pallet { let bond = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * (bounded_description.len() as u32).into(); T::Currency::reserve(&proposer, bond) - .map_err(|_| Error::::InsufficientProposersBalance)?; + .map_err(|_| Error::::InsufficientProposersBalance)?; - BountyCount::::put(index + 1); + BountyCount::::put(index + 1); let bounty = Bounty { proposer, @@ -837,26 +838,26 @@ impl Pallet { status: BountyStatus::Proposed, }; - Bounties::::insert(index, &bounty); - BountyDescriptions::::insert(index, bounded_description); + Bounties::::insert(index, &bounty); + BountyDescriptions::::insert(index, bounded_description); - Self::deposit_event(Event::::BountyProposed { index }); + Self::deposit_event(Event::::BountyProposed { index }); Ok(()) } } -impl pallet_treasury::SpendFunds for Pallet { +impl, I: 'static> pallet_treasury::SpendFunds for Pallet { fn spend_funds( - budget_remaining: &mut BalanceOf, - imbalance: &mut PositiveImbalanceOf, + budget_remaining: &mut BalanceOf, + imbalance: &mut PositiveImbalanceOf, total_weight: &mut Weight, missed_any: &mut bool, ) { - let bounties_len = BountyApprovals::::mutate(|v| { + let bounties_len = BountyApprovals::::mutate(|v| { let bounties_approval_len = v.len() as u32; v.retain(|&index| { - Bounties::::mutate(index, |bounty| { + Bounties::::mutate(index, |bounty| { // Should always be true, but shouldn't panic if false or we're screwed. if let Some(bounty) = bounty { if bounty.value <= *budget_remaining { @@ -874,7 +875,7 @@ impl pallet_treasury::SpendFunds for Pallet { bounty.value, )); - Self::deposit_event(Event::::BountyBecameActive { index }); + Self::deposit_event(Event::::BountyBecameActive { index }); false } else { *missed_any = true; @@ -888,7 +889,7 @@ impl pallet_treasury::SpendFunds for Pallet { bounties_approval_len }); - *total_weight += ::WeightInfo::spend_funds(bounties_len); + *total_weight += >::WeightInfo::spend_funds(bounties_len); } } diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index 0904e3a2901bb..ff220600794d4 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -35,7 +35,7 @@ use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, - Perbill, Storage, + BuildStorage, Perbill, Storage, }; use super::Event as BountiesEvent; @@ -52,7 +52,9 @@ frame_support::construct_runtime!( System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, + Bounties1: pallet_bounties::::{Pallet, Call, Storage, Event}, Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, + Treasury1: pallet_treasury::::{Pallet, Call, Storage, Config, Event}, } ); @@ -105,8 +107,9 @@ thread_local! { } parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); - pub const Burn: Permill = Permill::from_percent(50); + pub static Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); + pub const TreasuryPalletId2: PalletId = PalletId(*b"py/trsr2"); } impl pallet_treasury::Config for Test { @@ -128,6 +131,25 @@ impl pallet_treasury::Config for Test { type SpendOrigin = frame_support::traits::NeverEnsureOrigin; } +impl pallet_treasury::Config for Test { + type PalletId = TreasuryPalletId2; + type Currency = pallet_balances::Pallet; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Event = Event; + type OnSlash = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ConstU64<1>; + type ProposalBondMaximum = (); + type SpendPeriod = ConstU64<2>; + type Burn = Burn; + type BurnDestination = (); // Just gets burned. + type WeightInfo = (); + type SpendFunds = Bounties1; + type MaxApprovals = ConstU32<100>; + type SpendOrigin = frame_support::traits::NeverEnsureOrigin; +} + parameter_types! { // This will be 50% of the bounty fee. pub const CuratorDepositMultiplier: Permill = Permill::from_percent(50); @@ -151,18 +173,35 @@ impl Config for Test { type ChildBountyManager = (); } +impl Config for Test { + type Event = Event; + type BountyDepositBase = ConstU64<80>; + type BountyDepositPayoutDelay = ConstU64<3>; + type BountyUpdatePeriod = ConstU64<20>; + type CuratorDepositMultiplier = CuratorDepositMultiplier; + type CuratorDepositMax = CuratorDepositMax; + type CuratorDepositMin = CuratorDepositMin; + type BountyValueMinimum = ConstU64<1>; + type DataDepositPerByte = ConstU64<1>; + type MaximumReasonLength = ConstU32<16384>; + type WeightInfo = (); + type ChildBountyManager = (); +} + type TreasuryError = pallet_treasury::Error; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - // Total issuance will be 200 with treasury account initialized at ED. - balances: vec![(0, 100), (1, 98), (2, 1)], + let mut ext: sp_io::TestExternalities = GenesisConfig { + system: frame_system::GenesisConfig::default(), + balances: pallet_balances::GenesisConfig { balances: vec![(0, 100), (1, 98), (2, 1)] }, + treasury: Default::default(), + treasury_1: Default::default(), } - .assimilate_storage(&mut t) - .unwrap(); - GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); - t.into() + .build_storage() + .unwrap() + .into(); + ext.execute_with(|| System::set_block_number(1)); + ext } fn last_event() -> BountiesEvent { @@ -276,7 +315,7 @@ fn reject_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { assert_noop!( Treasury::reject_proposal(Origin::root(), 0), - pallet_treasury::Error::::InvalidIndex + pallet_treasury::Error::::InvalidIndex ); }); } @@ -469,7 +508,7 @@ fn close_bounty_works() { assert_eq!(Balances::free_balance(0), 100 - deposit); assert_eq!(Bounties::bounties(0), None); - assert!(!pallet_treasury::Proposals::::contains_key(0)); + assert!(!pallet_treasury::Proposals::::contains_key(0)); assert_eq!(Bounties::bounty_descriptions(0), None); }); @@ -1122,3 +1161,29 @@ fn accept_curator_handles_different_deposit_calculations() { assert_eq!(Balances::reserved_balance(&user), expected_deposit); }); } + +#[test] +fn approve_bounty_works_second_instance() { + new_test_ext().execute_with(|| { + // Set burn to 0 to make tracking funds easier. + Burn::set(Permill::from_percent(0)); + + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&Treasury1::account_id(), 201); + assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); + assert_eq!(Balances::free_balance(&Treasury1::account_id()), 201); + + assert_ok!(Bounties1::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties1::approve_bounty(Origin::root(), 0)); + >::on_initialize(2); + >::on_initialize(2); + + // Bounties 1 is funded... but from where? + assert_eq!(Balances::free_balance(Bounties1::bounty_account_id(0)), 50); + // Treasury 1 unchanged + assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); + // Treasury 2 has funds removed + assert_eq!(Balances::free_balance(&Treasury1::account_id()), 201 - 50); + }); +} diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index 190ef60f3b810..33e455bd3b9fd 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -19,7 +19,7 @@ #![cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::{account, benchmarks, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelisted_caller}; use frame_support::ensure; use frame_system::RawOrigin; use sp_runtime::traits::Saturating; @@ -30,7 +30,7 @@ use crate::Pallet as TipsMod; const SEED: u32 = 0; // Create the pre-requisite information needed to create a `report_awesome`. -fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId) { +fn setup_awesome, I: 'static>(length: u32) -> (T::AccountId, Vec, T::AccountId) { let caller = whitelisted_caller(); let value = T::TipReportDepositBase::get() + T::DataDepositPerByte::get() * length.into() + @@ -42,10 +42,10 @@ fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId } // Create the pre-requisite information needed to call `tip_new`. -fn setup_tip( +fn setup_tip, I: 'static>( r: u32, t: u32, -) -> Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> { +) -> Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> { let tippers_count = T::Tippers::count(); for i in 0..t { @@ -64,13 +64,17 @@ fn setup_tip( // Create `t` new tips for the tip proposal with `hash`. // This function automatically makes the tip able to close. -fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> Result<(), &'static str> { +fn create_tips, I: 'static>( + t: u32, + hash: T::Hash, + value: BalanceOf, +) -> Result<(), &'static str> { for i in 0..t { let caller = account("member", i, SEED); ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); - TipsMod::::tip(RawOrigin::Signed(caller).into(), hash, value)?; + TipsMod::::tip(RawOrigin::Signed(caller).into(), hash, value)?; } - Tips::::mutate(hash, |maybe_tip| { + Tips::::mutate(hash, |maybe_tip| { if let Some(open_tip) = maybe_tip { open_tip.closes = Some(T::BlockNumber::zero()); } @@ -78,16 +82,16 @@ fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> Result< Ok(()) } -fn setup_pot_account() { - let pot_account = TipsMod::::account_id(); +fn setup_pot_account, I: 'static>() { + let pot_account = TipsMod::::account_id(); let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); let _ = T::Currency::make_free_balance_be(&pot_account, value); } -benchmarks! { +benchmarks_instance_pallet! { report_awesome { let r in 0 .. T::MaximumReasonLength::get(); - let (caller, reason, awesome_person) = setup_awesome::(r); + let (caller, reason, awesome_person) = setup_awesome::(r); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); @@ -95,8 +99,8 @@ benchmarks! { retract_tip { let r = T::MaximumReasonLength::get(); - let (caller, reason, awesome_person) = setup_awesome::(r); - TipsMod::::report_awesome( + let (caller, reason, awesome_person) = setup_awesome::(r); + TipsMod::::report_awesome( RawOrigin::Signed(caller.clone()).into(), reason.clone(), awesome_person.clone() @@ -112,7 +116,7 @@ benchmarks! { let r in 0 .. T::MaximumReasonLength::get(); let t in 1 .. T::Tippers::max_len() as u32; - let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); @@ -120,9 +124,9 @@ benchmarks! { tip { let t in 1 .. T::Tippers::max_len() as u32; - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); - TipsMod::::tip_new( + TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), beneficiary.clone(), @@ -130,8 +134,8 @@ benchmarks! { )?; let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t - 1, hash, value)?; + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t - 1, hash, value)?; let caller = account("member", t - 1, SEED); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); @@ -142,12 +146,12 @@ benchmarks! { let t in 1 .. T::Tippers::max_len() as u32; // Make sure pot is funded - setup_pot_account::(); + setup_pot_account::(); // Set up a new tip proposal - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); - TipsMod::::tip_new( + TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), beneficiary.clone(), @@ -157,9 +161,9 @@ benchmarks! { // Create a bunch of tips let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); + ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t, hash, value)?; + create_tips::(t, hash, value)?; let caller = account("caller", t, SEED); // Whitelist caller account from further DB operations. @@ -171,12 +175,12 @@ benchmarks! { let t in 1 .. T::Tippers::max_len() as u32; // Make sure pot is funded - setup_pot_account::(); + setup_pot_account::(); // Set up a new tip proposal - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); - TipsMod::::tip_new( + TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), beneficiary.clone(), @@ -185,7 +189,7 @@ benchmarks! { let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); + ensure!(Tips::::contains_key(hash), "tip does not exist"); }: _(RawOrigin::Root, hash) impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index e1c7b5e77c062..71af87b42b55b 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -78,8 +78,8 @@ use frame_support::{ pub use pallet::*; pub use weights::WeightInfo; -pub type BalanceOf = pallet_treasury::BalanceOf; -pub type NegativeImbalanceOf = pallet_treasury::NegativeImbalanceOf; +pub type BalanceOf = pallet_treasury::BalanceOf; +pub type NegativeImbalanceOf = pallet_treasury::NegativeImbalanceOf; /// An open tipping "motion". Retains all details of a tip including information on the finder /// and the members who have voted. @@ -121,12 +121,12 @@ pub mod pallet { #[pallet::generate_store(pub(super) trait Store)] #[pallet::storage_version(STORAGE_VERSION)] #[pallet::without_storage_info] - pub struct Pallet(_); + pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + pallet_treasury::Config { + pub trait Config: frame_system::Config + pallet_treasury::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type Event: From> + IsType<::Event>; /// Maximum acceptable reason length. /// @@ -136,7 +136,7 @@ pub mod pallet { /// The amount held on deposit per byte within the tip report reason or bounty description. #[pallet::constant] - type DataDepositPerByte: Get>; + type DataDepositPerByte: Get>; /// The period for which a tip remains open after is has achieved threshold tippers. #[pallet::constant] @@ -148,7 +148,7 @@ pub mod pallet { /// The amount held on deposit for placing a tip report. #[pallet::constant] - type TipReportDepositBase: Get>; + type TipReportDepositBase: Get>; /// Origin from which tippers must come. /// @@ -166,11 +166,11 @@ pub mod pallet { /// guaranteed to be a secure hash. #[pallet::storage] #[pallet::getter(fn tips)] - pub type Tips = StorageMap< + pub type Tips, I: 'static = ()> = StorageMap< _, Twox64Concat, T::Hash, - OpenTip, T::BlockNumber, T::Hash>, + OpenTip, T::BlockNumber, T::Hash>, OptionQuery, >; @@ -178,25 +178,26 @@ pub mod pallet { /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. #[pallet::storage] #[pallet::getter(fn reasons)] - pub type Reasons = StorageMap<_, Identity, T::Hash, Vec, OptionQuery>; + pub type Reasons, I: 'static = ()> = + StorageMap<_, Identity, T::Hash, Vec, OptionQuery>; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { + pub enum Event, I: 'static = ()> { /// A new tip suggestion has been opened. NewTip { tip_hash: T::Hash }, /// A tip suggestion has reached threshold and is closing. TipClosing { tip_hash: T::Hash }, /// A tip suggestion has been closed. - TipClosed { tip_hash: T::Hash, who: T::AccountId, payout: BalanceOf }, + TipClosed { tip_hash: T::Hash, who: T::AccountId, payout: BalanceOf }, /// A tip suggestion has been retracted. TipRetracted { tip_hash: T::Hash }, /// A tip suggestion has been slashed. - TipSlashed { tip_hash: T::Hash, finder: T::AccountId, deposit: BalanceOf }, + TipSlashed { tip_hash: T::Hash, finder: T::AccountId, deposit: BalanceOf }, } #[pallet::error] - pub enum Error { + pub enum Error { /// The reason given is just too big. ReasonTooBig, /// The tip was already found/started. @@ -212,7 +213,7 @@ pub mod pallet { } #[pallet::call] - impl Pallet { + impl, I: 'static> Pallet { /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. /// /// The dispatch origin for this call must be _Signed_. @@ -232,7 +233,7 @@ pub mod pallet { /// - DbReads: `Reasons`, `Tips` /// - DbWrites: `Reasons`, `Tips` /// # - #[pallet::weight(::WeightInfo::report_awesome(reason.len() as u32))] + #[pallet::weight(>::WeightInfo::report_awesome(reason.len() as u32))] pub fn report_awesome( origin: OriginFor, reason: Vec, @@ -242,19 +243,19 @@ pub mod pallet { ensure!( reason.len() <= T::MaximumReasonLength::get() as usize, - Error::::ReasonTooBig + Error::::ReasonTooBig ); let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); + ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); let deposit = T::TipReportDepositBase::get() + T::DataDepositPerByte::get() * (reason.len() as u32).into(); T::Currency::reserve(&finder, deposit)?; - Reasons::::insert(&reason_hash, &reason); + Reasons::::insert(&reason_hash, &reason); let tip = OpenTip { reason: reason_hash, who, @@ -264,7 +265,7 @@ pub mod pallet { tips: vec![], finders_fee: true, }; - Tips::::insert(&hash, tip); + Tips::::insert(&hash, tip); Self::deposit_event(Event::NewTip { tip_hash: hash }); Ok(()) } @@ -288,14 +289,14 @@ pub mod pallet { /// - DbReads: `Tips`, `origin account` /// - DbWrites: `Reasons`, `Tips`, `origin account` /// # - #[pallet::weight(::WeightInfo::retract_tip())] + #[pallet::weight(>::WeightInfo::retract_tip())] pub fn retract_tip(origin: OriginFor, hash: T::Hash) -> DispatchResult { let who = ensure_signed(origin)?; - let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; - ensure!(tip.finder == who, Error::::NotFinder); + let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; + ensure!(tip.finder == who, Error::::NotFinder); - Reasons::::remove(&tip.reason); - Tips::::remove(&hash); + Reasons::::remove(&tip.reason); + Tips::::remove(&hash); if !tip.deposit.is_zero() { let err_amount = T::Currency::unreserve(&who, tip.deposit); debug_assert!(err_amount.is_zero()); @@ -326,20 +327,20 @@ pub mod pallet { /// - DbReads: `Tippers`, `Reasons` /// - DbWrites: `Reasons`, `Tips` /// # - #[pallet::weight(::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32))] + #[pallet::weight(>::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32))] pub fn tip_new( origin: OriginFor, reason: Vec, who: T::AccountId, - #[pallet::compact] tip_value: BalanceOf, + #[pallet::compact] tip_value: BalanceOf, ) -> DispatchResult { let tipper = ensure_signed(origin)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - Reasons::::insert(&reason_hash, &reason); + Reasons::::insert(&reason_hash, &reason); Self::deposit_event(Event::NewTip { tip_hash: hash }); let tips = vec![(tipper.clone(), tip_value)]; let tip = OpenTip { @@ -351,7 +352,7 @@ pub mod pallet { tips, finders_fee: false, }; - Tips::::insert(&hash, tip); + Tips::::insert(&hash, tip); Ok(()) } @@ -379,20 +380,20 @@ pub mod pallet { /// - DbReads: `Tippers`, `Tips` /// - DbWrites: `Tips` /// # - #[pallet::weight(::WeightInfo::tip(T::Tippers::max_len() as u32))] + #[pallet::weight(>::WeightInfo::tip(T::Tippers::max_len() as u32))] pub fn tip( origin: OriginFor, hash: T::Hash, - #[pallet::compact] tip_value: BalanceOf, + #[pallet::compact] tip_value: BalanceOf, ) -> DispatchResult { let tipper = ensure_signed(origin)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); - let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { Self::deposit_event(Event::TipClosing { tip_hash: hash }); } - Tips::::insert(&hash, tip); + Tips::::insert(&hash, tip); Ok(()) } @@ -412,16 +413,16 @@ pub mod pallet { /// - DbReads: `Tips`, `Tippers`, `tip finder` /// - DbWrites: `Reasons`, `Tips`, `Tippers`, `tip finder` /// # - #[pallet::weight(::WeightInfo::close_tip(T::Tippers::max_len() as u32))] + #[pallet::weight(>::WeightInfo::close_tip(T::Tippers::max_len() as u32))] pub fn close_tip(origin: OriginFor, hash: T::Hash) -> DispatchResult { ensure_signed(origin)?; - let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; - let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; - ensure!(frame_system::Pallet::::block_number() >= *n, Error::::Premature); + let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; + ensure!(frame_system::Pallet::::block_number() >= *n, Error::::Premature); // closed. - Reasons::::remove(&tip.reason); - Tips::::remove(hash); + Reasons::::remove(&tip.reason); + Tips::::remove(hash); Self::payout_tip(hash, tip); Ok(()) } @@ -438,17 +439,17 @@ pub mod pallet { /// `T` is charged as upper bound given by `ContainsLengthBound`. /// The actual cost depends on the implementation of `T::Tippers`. /// # - #[pallet::weight(::WeightInfo::slash_tip(T::Tippers::max_len() as u32))] + #[pallet::weight(>::WeightInfo::slash_tip(T::Tippers::max_len() as u32))] pub fn slash_tip(origin: OriginFor, hash: T::Hash) -> DispatchResult { T::RejectOrigin::ensure_origin(origin)?; - let tip = Tips::::take(hash).ok_or(Error::::UnknownTip)?; + let tip = Tips::::take(hash).ok_or(Error::::UnknownTip)?; if !tip.deposit.is_zero() { let imbalance = T::Currency::slash_reserved(&tip.finder, tip.deposit).0; T::OnSlash::on_unbalanced(imbalance); } - Reasons::::remove(&tip.reason); + Reasons::::remove(&tip.reason); Self::deposit_event(Event::TipSlashed { tip_hash: hash, finder: tip.finder, @@ -459,7 +460,7 @@ pub mod pallet { } } -impl Pallet { +impl, I: 'static> Pallet { // Add public immutables and private mutables. /// The account ID of the treasury pot. @@ -475,9 +476,9 @@ impl Pallet { /// /// `O(T)` and one storage access. fn insert_tip_and_check_closing( - tip: &mut OpenTip, T::BlockNumber, T::Hash>, + tip: &mut OpenTip, T::BlockNumber, T::Hash>, tipper: T::AccountId, - tip_value: BalanceOf, + tip_value: BalanceOf, ) -> bool { match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { Ok(pos) => tip.tips[pos] = (tipper, tip_value), @@ -494,7 +495,7 @@ impl Pallet { } /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. - fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { + fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { let members = T::Tippers::sorted_members(); let mut members_iter = members.iter(); let mut member = members_iter.next(); @@ -520,14 +521,14 @@ impl Pallet { /// Plus `O(T)` (`T` is Tippers length). fn payout_tip( hash: T::Hash, - tip: OpenTip, T::BlockNumber, T::Hash>, + tip: OpenTip, T::BlockNumber, T::Hash>, ) { let mut tips = tip.tips; Self::retain_active_tips(&mut tips); tips.sort_by_key(|i| i.1); let treasury = Self::account_id(); - let max_payout = pallet_treasury::Pallet::::pot(); + let max_payout = pallet_treasury::Pallet::::pot(); let mut payout = tips[tips.len() / 2].1.min(max_payout); if !tip.deposit.is_zero() { @@ -582,7 +583,7 @@ impl Pallet { for (hash, old_tip) in storage_key_iter::< T::Hash, - OldOpenTip, T::BlockNumber, T::Hash>, + OldOpenTip, T::BlockNumber, T::Hash>, Twox64Concat, >(module, item) .drain() @@ -600,7 +601,7 @@ impl Pallet { tips: old_tip.tips, finders_fee, }; - Tips::::insert(hash, new_tip) + Tips::::insert(hash, new_tip) } } } diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index 235952fd1092c..194ecc60d9890 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -25,7 +25,7 @@ use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, - Perbill, Permill, + BuildStorage, Perbill, Permill, }; use sp_storage::Storage; @@ -53,7 +53,9 @@ frame_support::construct_runtime!( System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, + Treasury1: pallet_treasury::::{Pallet, Call, Storage, Config, Event}, Tips: pallet_tips::{Pallet, Call, Storage, Event}, + Tips1: pallet_tips::::{Pallet, Call, Storage, Event}, } ); @@ -127,6 +129,7 @@ parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); + pub const TreasuryPalletId2: PalletId = PalletId(*b"py/trsr2"); } impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId; @@ -146,6 +149,26 @@ impl pallet_treasury::Config for Test { type MaxApprovals = ConstU32<100>; type SpendOrigin = frame_support::traits::NeverEnsureOrigin; } + +impl pallet_treasury::Config for Test { + type PalletId = TreasuryPalletId2; + type Currency = pallet_balances::Pallet; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Event = Event; + type OnSlash = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ConstU64<1>; + type ProposalBondMaximum = (); + type SpendPeriod = ConstU64<2>; + type Burn = Burn; + type BurnDestination = (); // Just gets burned. + type WeightInfo = (); + type SpendFunds = (); + type MaxApprovals = ConstU32<100>; + type SpendOrigin = frame_support::traits::NeverEnsureOrigin; +} + parameter_types! { pub const TipFindersFee: Percent = Percent::from_percent(20); } @@ -160,16 +183,29 @@ impl Config for Test { type WeightInfo = (); } +impl Config for Test { + type MaximumReasonLength = ConstU32<16384>; + type Tippers = TenToFourteen; + type TipCountdown = ConstU64<1>; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = ConstU64<1>; + type DataDepositPerByte = ConstU64<1>; + type Event = Event; + type WeightInfo = (); +} + pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - // Total issuance will be 200 with treasury account initialized at ED. - balances: vec![(0, 100), (1, 98), (2, 1)], + let mut ext: sp_io::TestExternalities = GenesisConfig { + system: frame_system::GenesisConfig::default(), + balances: pallet_balances::GenesisConfig { balances: vec![(0, 100), (1, 98), (2, 1)] }, + treasury: Default::default(), + treasury_1: Default::default(), } - .assimilate_storage(&mut t) - .unwrap(); - GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); - t.into() + .build_storage() + .unwrap() + .into(); + ext.execute_with(|| System::set_block_number(1)); + ext } fn last_event() -> TipEvent { @@ -540,3 +576,36 @@ fn genesis_funding_works() { assert_eq!(Treasury::pot(), initial_funding - Balances::minimum_balance()); }); } + +#[test] +fn report_awesome_and_tip_works_second_instance() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&Treasury1::account_id(), 201); + assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); + assert_eq!(Balances::free_balance(&Treasury1::account_id()), 201); + + assert_ok!(Tips1::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + // duplicate report in tips1 reports don't count. + assert_noop!( + Tips1::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), + Error::::AlreadyKnown + ); + // but tips is separate + assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + + let h = tip_hash(); + assert_ok!(Tips1::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Tips1::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips1::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Tips1::tip(Origin::signed(9), h.clone(), 10), BadOrigin); + + System::set_block_number(2); + + assert_ok!(Tips1::close_tip(Origin::signed(100), h.into())); + // Treasury 1 unchanged + assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); + // Treasury 2 gave the funds + assert_eq!(Balances::free_balance(&Treasury1::account_id()), 191); + }); +} From 622f532e605270c9d4f932aea666073e409e02f9 Mon Sep 17 00:00:00 2001 From: Alexander Gryaznov Date: Thu, 23 Jun 2022 15:10:35 +0300 Subject: [PATCH 013/135] [contracts] Implement transparent hashing for contract storage (#11501) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * save * builds and old tests pass save: temporary value dropped while borrowed save: finally builds test updated but still fails * type names enhanced * VarSizedKey bounded to new Config param * improved wasm runtime updated funcs * unstable-interface tests fixed * benchmarks fixed * Apply suggestions from code review Co-authored-by: Alexander Theißen * fixes on feedback * fixes on feedback applied + make it build * benchmarks build but fail (old) * "Original code too large" * seal_clear_storage bench fixed (code size workaround hack removal tbd) * bench_seal_clear_storage pass * bench_seal_take_storage ... ok * added new seal_set_storage + updated benchmarks * added new seal_get_storage + updated benchmarks * added new seal_contains_storage + updated benchmarks * added tests for _transparent exec functions * wasm test for clear_storage * wasm test for take_storage * wasm test for new set_storage * wasm test for new get_storage * wasm test for new contains_storage * CI fix * ci fix * ci fix * ci fix * cargo run --quiet --profile=production --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark pallet --chain=dev --steps=50 --repeat=20 --pallet=pallet_contracts --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/contracts/src/weights.rs --template=./.maintain/frame-weight-template.hbs * fixes according to the review feedback * tests & benchmarks fixed * cargo run --quiet --profile=production --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark pallet --chain=dev --steps=50 --repeat=20 --pallet=pallet_contracts --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/contracts/src/weights.rs --template=./.maintain/frame-weight-template.hbs * refactoring * fix to runtime api * ci fix * ctx.get_storage() factored out * ctx.contains_storage() factored out * number of batches reduced for transparent hashing storage benchmarks * contracts RPC & pallet::get_storage to use transparent hashing * node and rpc updated to use get_storage with VarSizedKey * refactored (more concize) * refactored contains_storage (DRYed) * refactored contains_storage (DRYed) * fix rpc * fmt fix * more fixes in rpc * rollback `Pallet:get_storage` to Vec and rpc and node parts related to it * added `KeyDecodingFailed` error * Revert weird "fmt fix" This reverts commit c582cfff4b5cb2c9929fd5e3b45519bb24aeb657. * node-executor basic test update * fix node-executor basic test * benchmarks fix * more benchmarks fix * FixedSizedKey is hidden from pub, VarSizedKey is exported as StorageKey * ci fix * set_storage benchmark fix * ci fix * ci fix * comments improved * new error code to rpc: KEY_DECODING_FAILED * Put `rusty-cachier` before PR merge into `master` for `cargo-check-benches` job * cargo run --quiet --profile=production --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark pallet --chain=dev --steps=50 --repeat=20 --pallet=pallet_contracts --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/contracts/src/weights.rs --template=./.maintain/frame-weight-template.hbs * minor optimization Co-authored-by: Alexander Theißen Co-authored-by: Parity Bot Co-authored-by: Vladimir Istyufeev Co-authored-by: command-bot <> --- bin/node/executor/tests/basic.rs | 6 +- bin/node/runtime/src/lib.rs | 3 +- frame/contracts/common/src/lib.rs | 2 + frame/contracts/rpc/runtime-api/src/lib.rs | 2 +- frame/contracts/rpc/src/lib.rs | 15 +- frame/contracts/src/benchmarking/mod.rs | 245 +-- frame/contracts/src/exec.rs | 408 ++++- frame/contracts/src/lib.rs | 21 +- frame/contracts/src/storage.rs | 16 +- frame/contracts/src/tests.rs | 23 +- frame/contracts/src/wasm/mod.rs | 433 +++++- frame/contracts/src/wasm/runtime.rs | 220 ++- frame/contracts/src/weights.rs | 1644 +++++++++++--------- 13 files changed, 2040 insertions(+), 998 deletions(-) diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index de2aa6c18369d..31a9bd0a90496 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -764,7 +764,11 @@ fn deploying_wasm_contract_should_work() { t.execute_with(|| { // Verify that the contract does exist by querying some of its storage items // It does not matter that the storage item itself does not exist. - assert!(&pallet_contracts::Pallet::::get_storage(addr, Default::default()).is_ok()); + assert!(&pallet_contracts::Pallet::::get_storage( + addr, + pallet_contracts::StorageKey::::default().to_vec() + ) + .is_ok()); }); } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 54742cd91c430..2d5981339fab3 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1151,6 +1151,7 @@ impl pallet_contracts::Config for Runtime { type ContractAccessWeight = pallet_contracts::DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; type RelaxedMaxCodeLen = ConstU32<{ 256 * 1024 }>; + type MaxStorageKeyLen = ConstU32<128>; } impl pallet_sudo::Config for Runtime { @@ -1936,7 +1937,7 @@ impl_runtime_apis! { fn get_storage( address: AccountId, - key: [u8; 32], + key: Vec, ) -> pallet_contracts_primitives::GetStorageResult { Contracts::get_storage(address, key) } diff --git a/frame/contracts/common/src/lib.rs b/frame/contracts/common/src/lib.rs index 49f91f6769290..f810725afcd36 100644 --- a/frame/contracts/common/src/lib.rs +++ b/frame/contracts/common/src/lib.rs @@ -106,6 +106,8 @@ pub type GetStorageResult = Result>, ContractAccessError>; pub enum ContractAccessError { /// The given address doesn't point to a contract. DoesntExist, + /// Storage key cannot be decoded from the provided input data. + KeyDecodingFailed, } bitflags! { diff --git a/frame/contracts/rpc/runtime-api/src/lib.rs b/frame/contracts/rpc/runtime-api/src/lib.rs index 59622a21a6593..9765b37057c7b 100644 --- a/frame/contracts/rpc/runtime-api/src/lib.rs +++ b/frame/contracts/rpc/runtime-api/src/lib.rs @@ -79,7 +79,7 @@ sp_api::decl_runtime_apis! { /// doesn't exist, or doesn't have a contract then `Err` is returned. fn get_storage( address: AccountId, - key: [u8; 32], + key: Vec, ) -> GetStorageResult; } } diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index 77ae3f3ed35e3..0df8f90237ed3 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -33,7 +33,7 @@ use pallet_contracts_primitives::{ use serde::{Deserialize, Serialize}; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; -use sp_core::{Bytes, H256}; +use sp_core::Bytes; use sp_rpc::number::NumberOrHex; use sp_runtime::{ generic::BlockId, @@ -44,6 +44,7 @@ pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; const RUNTIME_ERROR: i32 = 1; const CONTRACT_DOESNT_EXIST: i32 = 2; +const KEY_DECODING_FAILED: i32 = 3; pub type Weight = u64; @@ -74,6 +75,12 @@ impl From for JsonRpseeError { None::<()>, )) .into(), + KeyDecodingFailed => CallError::Custom(ErrorObject::owned( + KEY_DECODING_FAILED, + "Failed to decode the specified storage key.", + None::<()>, + )) + .into(), } } } @@ -167,7 +174,7 @@ where fn get_storage( &self, address: AccountId, - key: H256, + key: Bytes, at: Option, ) -> RpcResult>; } @@ -292,13 +299,13 @@ where fn get_storage( &self, address: AccountId, - key: H256, + key: Bytes, at: Option<::Hash>, ) -> RpcResult> { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); let result = api - .get_storage(&at, address, key.into()) + .get_storage(&at, address, key.to_vec()) .map_err(runtime_error_into_rpc_err)? .map_err(ContractAccessError)? .map(Bytes); diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 8f17cacff328d..bea469bd0f5a9 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -30,7 +30,7 @@ use self::{ sandbox::Sandbox, }; use crate::{ - exec::{AccountIdOf, StorageKey}, + exec::{AccountIdOf, FixSizedKey, VarSizedKey}, schedule::{API_BENCHMARK_BATCH_SIZE, INSTR_BENCHMARK_BATCH_SIZE}, storage::Storage, wasm::CallFlags, @@ -132,11 +132,17 @@ where } /// Store the supplied storage items into this contracts storage. - fn store(&self, items: &Vec<(StorageKey, Vec)>) -> Result<(), &'static str> { + fn store(&self, items: &Vec<(FixSizedKey, Vec)>) -> Result<(), &'static str> { let info = self.info()?; for item in items { - Storage::::write(&info.trie_id, &item.0, Some(item.1.clone()), None, false) - .map_err(|_| "Failed to write storage to restoration dest")?; + Storage::::write( + &info.trie_id, + &item.0 as &FixSizedKey, + Some(item.1.clone()), + None, + false, + ) + .map_err(|_| "Failed to write storage to restoration dest")?; } >::insert(&self.account_id, info); Ok(()) @@ -896,33 +902,37 @@ benchmarks! { }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) // Only the overhead of calling the function itself with minimal arguments. - // The contract is a bit more complex because I needs to use different keys in order + // The contract is a bit more complex because it needs to use different keys in order // to generate unique storage accesses. However, it is still dominated by the storage - // accesses. + // accesses. We store all the keys that we are about to write at beforehand + // because re-writing at an existing key is always more expensive than writing + // it at a virgin key. #[skip_meta] seal_set_storage { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_BATCHES/2; + let max_key_len = T::MaxStorageKeyLen::get(); let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); - let key_bytes = keys.iter().flatten().cloned().collect::>(); + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); + let keys_bytes = keys.iter().flatten().cloned().collect::>(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal1", + module: "__unstable__", name: "seal_set_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], + params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ DataSegment { offset: 0, - value: key_bytes, + value: keys_bytes, }, ], call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), // key_ptr + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::I32Const(0)), // value_ptr Regular(Instruction::I32Const(0)), // value_len Regular(Instruction::Call(0)), @@ -935,7 +945,7 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -947,18 +957,19 @@ benchmarks! { #[skip_meta] seal_set_storage_per_new_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 1024; - let keys = (0 .. API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); + let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + let max_key_len = T::MaxStorageKeyLen::get(); + let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal1", + module: "__unstable__", name: "seal_set_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], + params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ @@ -968,9 +979,10 @@ benchmarks! { }, ], call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), // key_ptr + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const((n * 1024) as i32)), // value_len + Regular(Instruction::I32Const((n * 2048) as i32)), // value_len increments by 2kb up to max payload_len Regular(Instruction::Call(0)), Regular(Instruction::Drop), ])), @@ -981,7 +993,7 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -993,18 +1005,19 @@ benchmarks! { #[skip_meta] seal_set_storage_per_old_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 1024; - let keys = (0 .. API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); + let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + let max_key_len = T::MaxStorageKeyLen::get(); + let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal1", + module: "__unstable__", name: "seal_set_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], + params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ @@ -1014,9 +1027,10 @@ benchmarks! { }, ], call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), // key_ptr + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // value_len + Regular(Instruction::I32Const(0)), // value_len is 0 as testing vs pre-existing value len Regular(Instruction::Call(0)), Regular(Instruction::Drop), ])), @@ -1027,8 +1041,8 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, - Some(vec![42u8; (n * 1024) as usize]), + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, + Some(vec![42u8; (n * 2048) as usize]), // value_len increments by 2kb up to max payload_len None, false, ) @@ -1037,23 +1051,25 @@ benchmarks! { let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - // Similar to seal_set_storage. However, we store all the keys that we are about to + // Similar to seal_set_storage. We store all the keys that we are about to // delete beforehand in order to prevent any optimizations that could occur when - // deleting a non existing key. + // deleting a non existing key. We generate keys of a maximum length, and have to + // reduce batch size in order to make resulting contract code size less than MaxCodeLen. #[skip_meta] seal_clear_storage { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_BATCHES/2; + let max_key_len = T::MaxStorageKeyLen::get(); let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", name: "seal_clear_storage", - params: vec![ValueType::I32], + params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ @@ -1063,7 +1079,8 @@ benchmarks! { }, ], call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::Call(0)), Regular(Instruction::Drop), ])), @@ -1074,7 +1091,7 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -1087,18 +1104,19 @@ benchmarks! { #[skip_meta] seal_clear_storage_per_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 1024; - let keys = (0 .. API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); + let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + let max_key_len = T::MaxStorageKeyLen::get(); + let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", name: "seal_clear_storage", - params: vec![ValueType::I32], + params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ @@ -1108,7 +1126,8 @@ benchmarks! { }, ], call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), // key_ptr + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::Call(0)), Regular(Instruction::Drop), ])), @@ -1119,8 +1138,8 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, - Some(vec![42u8; (n * 1024) as usize]), + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, + Some(vec![42u8; (n * 2048) as usize]), // value_len increments by 2kb up to max payload_len None, false, ) @@ -1132,19 +1151,20 @@ benchmarks! { // We make sure that all storage accesses are to unique keys. #[skip_meta] seal_get_storage { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_BATCHES/2; + let max_key_len = T::MaxStorageKeyLen::get(); let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_bytes_len = key_bytes.len(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal0", + module: "__unstable__", name: "seal_get_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], + params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ @@ -1158,7 +1178,8 @@ benchmarks! { }, ], call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), // key_ptr + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr Regular(Instruction::Call(0)), @@ -1171,7 +1192,7 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -1184,19 +1205,20 @@ benchmarks! { #[skip_meta] seal_get_storage_per_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 1024; - let keys = (0 .. API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); + let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + let max_key_len = T::MaxStorageKeyLen::get(); + let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_bytes_len = key_bytes.len(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal0", + module: "__unstable__", name: "seal_get_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], + params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ @@ -1210,7 +1232,8 @@ benchmarks! { }, ], call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), // key_ptr + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr Regular(Instruction::Call(0)), @@ -1223,8 +1246,8 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, - Some(vec![42u8; (n * 1024) as usize]), + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, + Some(vec![42u8; (n * 2048) as usize]), // value_len increments by 2kb up to max payload_len None, false, ) @@ -1237,19 +1260,20 @@ benchmarks! { // We make sure that all storage accesses are to unique keys. #[skip_meta] seal_contains_storage { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_BATCHES/2; + let max_key_len = T::MaxStorageKeyLen::get(); let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_bytes_len = key_bytes.len(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal0", + module: "__unstable__", name: "seal_contains_storage", - params: vec![ValueType::I32], + params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ @@ -1259,7 +1283,8 @@ benchmarks! { }, ], call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), // key_ptr + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::Call(0)), Regular(Instruction::Drop), ])), @@ -1270,7 +1295,7 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -1283,18 +1308,19 @@ benchmarks! { #[skip_meta] seal_contains_storage_per_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 1024; - let keys = (0 .. API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); + let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + let max_key_len = T::MaxStorageKeyLen::get(); + let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal0", + module: "__unstable__", name: "seal_contains_storage", - params: vec![ValueType::I32], + params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ @@ -1304,7 +1330,8 @@ benchmarks! { }, ], call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), // key_ptr + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::Call(0)), Regular(Instruction::Drop), ])), @@ -1315,8 +1342,8 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, - Some(vec![42u8; (n * 1024) as usize]), + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, + Some(vec![42u8; (n * 2048) as usize]), // value_len increments by 2kb up to max payload_len None, false, ) @@ -1328,11 +1355,12 @@ benchmarks! { #[skip_meta] seal_take_storage { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. API_BENCHMARK_BATCHES/2; + let max_key_len = T::MaxStorageKeyLen::get(); let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_bytes_len = key_bytes.len(); let code = WasmModule::::from(ModuleDefinition { @@ -1340,7 +1368,7 @@ benchmarks! { imported_functions: vec![ImportedFunction { module: "__unstable__", name: "seal_take_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], + params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ @@ -1354,7 +1382,8 @@ benchmarks! { }, ], call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), // key_ptr + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr Regular(Instruction::Call(0)), @@ -1367,7 +1396,7 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, Some(vec![]), None, false, @@ -1380,11 +1409,12 @@ benchmarks! { #[skip_meta] seal_take_storage_per_kb { - let n in 0 .. T::Schedule::get().limits.payload_len / 1024; - let keys = (0 .. API_BENCHMARK_BATCH_SIZE) - .map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) - .collect::>(); - let key_len = keys.get(0).map(|i| i.len() as u32).unwrap_or(0); + let n in 0 .. T::Schedule::get().limits.payload_len / 2048; // half of the max payload_len in kb + let max_key_len = T::MaxStorageKeyLen::get(); + let keys = (0 .. n * API_BENCHMARK_BATCH_SIZE) + .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_bytes_len = key_bytes.len(); let code = WasmModule::::from(ModuleDefinition { @@ -1392,7 +1422,7 @@ benchmarks! { imported_functions: vec![ImportedFunction { module: "__unstable__", name: "seal_take_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], + params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], data_segments: vec![ @@ -1406,7 +1436,8 @@ benchmarks! { }, ], call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ - Counter(0, key_len as u32), // key_ptr + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr Regular(Instruction::Call(0)), @@ -1419,8 +1450,8 @@ benchmarks! { for key in keys { Storage::::write( &info.trie_id, - key.as_slice().try_into().map_err(|e| "Key has wrong length")?, - Some(vec![42u8; (n * 1024) as usize]), + &VarSizedKey::::try_from(key).map_err(|e| "Key has wrong length")?, + Some(vec![42u8; (n * 2048) as usize]), // value_len increments by 2kb up to max payload_len None, false, ) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index e3ff4788791a5..ec7e1e9335182 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -27,12 +27,13 @@ use frame_support::{ storage::{with_transaction, TransactionOutcome}, traits::{Contains, Currency, ExistenceRequirement, OriginTrait, Randomness, Time}, weights::Weight, + Blake2_128Concat, BoundedVec, StorageHasher, }; use frame_system::RawOrigin; use pallet_contracts_primitives::ExecReturnValue; use smallvec::{Array, SmallVec}; use sp_core::{crypto::UncheckedFrom, ecdsa::Public as ECDSAPublic}; -use sp_io::crypto::secp256k1_ecdsa_recover_compressed; +use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256}; use sp_runtime::traits::Convert; use sp_std::{marker::PhantomData, mem, prelude::*}; @@ -40,12 +41,40 @@ pub type AccountIdOf = ::AccountId; pub type MomentOf = <::Time as Time>::Moment; pub type SeedOf = ::Hash; pub type BlockNumberOf = ::BlockNumber; -pub type StorageKey = [u8; 32]; pub type ExecResult = Result; /// A type that represents a topic of an event. At the moment a hash is used. pub type TopicOf = ::Hash; +/// Type for fix sized storage key. +pub type FixSizedKey = [u8; 32]; + +/// Type for variable sized storage key. Used for transparent hashing. +pub type VarSizedKey = BoundedVec::MaxStorageKeyLen>; + +/// Trait for hashing storage keys. +pub trait StorageKey +where + T: Config, +{ + fn hash(&self) -> Vec; +} + +impl StorageKey for FixSizedKey { + fn hash(&self) -> Vec { + blake2_256(self.as_slice()).to_vec() + } +} + +impl StorageKey for VarSizedKey +where + T: Config, +{ + fn hash(&self) -> Vec { + Blake2_128Concat::hash(self.as_slice()) + } +} + /// Origin of the error. /// /// Call or instantiate both called into other contracts and pass through errors happening @@ -140,19 +169,44 @@ pub trait Ext: sealing::Sealed { /// /// Returns `None` if the `key` wasn't previously set by `set_storage` or /// was deleted. - fn get_storage(&mut self, key: &StorageKey) -> Option>; + fn get_storage(&mut self, key: &FixSizedKey) -> Option>; + + /// This is a variation of `get_storage()` to be used with transparent hashing. + /// These two will be merged into a single function after some refactoring is done. + /// Returns the storage entry of the executing account by the given `key`. + /// + /// Returns `None` if the `key` wasn't previously set by `set_storage` or + /// was deleted. + fn get_storage_transparent(&mut self, key: &VarSizedKey) -> Option>; /// Returns `Some(len)` (in bytes) if a storage item exists at `key`. /// /// Returns `None` if the `key` wasn't previously set by `set_storage` or /// was deleted. - fn get_storage_size(&mut self, key: &StorageKey) -> Option; + fn get_storage_size(&mut self, key: &FixSizedKey) -> Option; + + /// This is the variation of `get_storage_size()` to be used with transparent hashing. + /// These two will be merged into a single function after some refactoring is done. + /// Returns `Some(len)` (in bytes) if a storage item exists at `key`. + /// + /// Returns `None` if the `key` wasn't previously set by `set_storage` or + /// was deleted. + fn get_storage_size_transparent(&mut self, key: &VarSizedKey) -> Option; /// Sets the storage entry by the given key to the specified value. If `value` is `None` then /// the storage entry is deleted. fn set_storage( &mut self, - key: StorageKey, + key: &FixSizedKey, + value: Option>, + take_old: bool, + ) -> Result; + + /// This is the variation of `set_storage()` to be used with transparent hashing. + /// These two will be merged into a single function after some refactoring is done. + fn set_storage_transparent( + &mut self, + key: &VarSizedKey, value: Option>, take_old: bool, ) -> Result; @@ -1085,24 +1139,48 @@ where Self::transfer(ExistenceRequirement::KeepAlive, &self.top_frame().account_id, to, value) } - fn get_storage(&mut self, key: &StorageKey) -> Option> { + fn get_storage(&mut self, key: &FixSizedKey) -> Option> { Storage::::read(&self.top_frame_mut().contract_info().trie_id, key) } - fn get_storage_size(&mut self, key: &StorageKey) -> Option { + fn get_storage_transparent(&mut self, key: &VarSizedKey) -> Option> { + Storage::::read(&self.top_frame_mut().contract_info().trie_id, key) + } + + fn get_storage_size(&mut self, key: &FixSizedKey) -> Option { + Storage::::size(&self.top_frame_mut().contract_info().trie_id, key) + } + + fn get_storage_size_transparent(&mut self, key: &VarSizedKey) -> Option { Storage::::size(&self.top_frame_mut().contract_info().trie_id, key) } fn set_storage( &mut self, - key: StorageKey, + key: &FixSizedKey, value: Option>, take_old: bool, ) -> Result { let frame = self.top_frame_mut(); Storage::::write( &frame.contract_info.get(&frame.account_id).trie_id, - &key, + key, + value, + Some(&mut frame.nested_storage), + take_old, + ) + } + + fn set_storage_transparent( + &mut self, + key: &VarSizedKey, + value: Option>, + take_old: bool, + ) -> Result { + let frame = self.top_frame_mut(); + Storage::::write( + &frame.contract_info.get(&frame.account_id).trie_id, + key, value, Some(&mut frame.nested_storage), take_old, @@ -2635,35 +2713,35 @@ mod tests { let code_hash = MockLoader::insert(Call, |ctx, _| { // Write assert_eq!( - ctx.ext.set_storage([1; 32], Some(vec![1, 2, 3]), false), + ctx.ext.set_storage(&[1; 32], Some(vec![1, 2, 3]), false), Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.set_storage([2; 32], Some(vec![4, 5, 6]), true), + ctx.ext.set_storage(&[2; 32], Some(vec![4, 5, 6]), true), Ok(WriteOutcome::New) ); - assert_eq!(ctx.ext.set_storage([3; 32], None, false), Ok(WriteOutcome::New)); - assert_eq!(ctx.ext.set_storage([4; 32], None, true), Ok(WriteOutcome::New)); - assert_eq!(ctx.ext.set_storage([5; 32], Some(vec![]), false), Ok(WriteOutcome::New)); - assert_eq!(ctx.ext.set_storage([6; 32], Some(vec![]), true), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.set_storage(&[3; 32], None, false), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.set_storage(&[4; 32], None, true), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.set_storage(&[5; 32], Some(vec![]), false), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.set_storage(&[6; 32], Some(vec![]), true), Ok(WriteOutcome::New)); // Overwrite assert_eq!( - ctx.ext.set_storage([1; 32], Some(vec![42]), false), + ctx.ext.set_storage(&[1; 32], Some(vec![42]), false), Ok(WriteOutcome::Overwritten(3)) ); assert_eq!( - ctx.ext.set_storage([2; 32], Some(vec![48]), true), + ctx.ext.set_storage(&[2; 32], Some(vec![48]), true), Ok(WriteOutcome::Taken(vec![4, 5, 6])) ); - assert_eq!(ctx.ext.set_storage([3; 32], None, false), Ok(WriteOutcome::New)); - assert_eq!(ctx.ext.set_storage([4; 32], None, true), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.set_storage(&[3; 32], None, false), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.set_storage(&[4; 32], None, true), Ok(WriteOutcome::New)); assert_eq!( - ctx.ext.set_storage([5; 32], Some(vec![]), false), + ctx.ext.set_storage(&[5; 32], Some(vec![]), false), Ok(WriteOutcome::Overwritten(0)) ); assert_eq!( - ctx.ext.set_storage([6; 32], Some(vec![]), true), + ctx.ext.set_storage(&[6; 32], Some(vec![]), true), Ok(WriteOutcome::Taken(vec![])) ); @@ -2690,14 +2768,175 @@ mod tests { }); } + #[test] + fn set_storage_transparent_works() { + let code_hash = MockLoader::insert(Call, |ctx, _| { + // Write + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([1; 64].to_vec()).unwrap(), + Some(vec![1, 2, 3]), + false + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([2; 19].to_vec()).unwrap(), + Some(vec![4, 5, 6]), + true + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([3; 19].to_vec()).unwrap(), + None, + false + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([4; 64].to_vec()).unwrap(), + None, + true + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([5; 30].to_vec()).unwrap(), + Some(vec![]), + false + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([6; 128].to_vec()).unwrap(), + Some(vec![]), + true + ), + Ok(WriteOutcome::New) + ); + + // Overwrite + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([1; 64].to_vec()).unwrap(), + Some(vec![42, 43, 44]), + false + ), + Ok(WriteOutcome::Overwritten(3)) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([2; 19].to_vec()).unwrap(), + Some(vec![48]), + true + ), + Ok(WriteOutcome::Taken(vec![4, 5, 6])) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([3; 19].to_vec()).unwrap(), + None, + false + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([4; 64].to_vec()).unwrap(), + None, + true + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([5; 30].to_vec()).unwrap(), + Some(vec![]), + false + ), + Ok(WriteOutcome::Overwritten(0)) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([6; 128].to_vec()).unwrap(), + Some(vec![]), + true + ), + Ok(WriteOutcome::Taken(vec![])) + ); + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let min_balance = ::Currency::minimum_balance(); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + set_balance(&ALICE, min_balance * 1000); + place_contract(&BOB, code_hash); + let mut storage_meter = storage::meter::Meter::new(&ALICE, None, 0).unwrap(); + assert_ok!(MockStack::run_call( + ALICE, + BOB, + &mut gas_meter, + &mut storage_meter, + &schedule, + 0, + vec![], + None, + )); + }); + } + + #[test] + fn get_storage_works() { + let code_hash = MockLoader::insert(Call, |ctx, _| { + assert_eq!( + ctx.ext.set_storage(&[1; 32], Some(vec![1, 2, 3]), false), + Ok(WriteOutcome::New) + ); + assert_eq!(ctx.ext.set_storage(&[2; 32], Some(vec![]), false), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.get_storage(&[1; 32]), Some(vec![1, 2, 3])); + assert_eq!(ctx.ext.get_storage(&[2; 32]), Some(vec![])); + assert_eq!(ctx.ext.get_storage(&[3; 32]), None); + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let min_balance = ::Currency::minimum_balance(); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + set_balance(&ALICE, min_balance * 1000); + place_contract(&BOB, code_hash); + let mut storage_meter = storage::meter::Meter::new(&ALICE, None, 0).unwrap(); + assert_ok!(MockStack::run_call( + ALICE, + BOB, + &mut gas_meter, + &mut storage_meter, + &schedule, + 0, + vec![], + None, + )); + }); + } + #[test] fn get_storage_size_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { assert_eq!( - ctx.ext.set_storage([1; 32], Some(vec![1, 2, 3]), false), + ctx.ext.set_storage(&[1; 32], Some(vec![1, 2, 3]), false), Ok(WriteOutcome::New) ); - assert_eq!(ctx.ext.set_storage([2; 32], Some(vec![]), false), Ok(WriteOutcome::New)); + assert_eq!(ctx.ext.set_storage(&[2; 32], Some(vec![]), false), Ok(WriteOutcome::New)); assert_eq!(ctx.ext.get_storage_size(&[1; 32]), Some(3)); assert_eq!(ctx.ext.get_storage_size(&[2; 32]), Some(0)); assert_eq!(ctx.ext.get_storage_size(&[3; 32]), None); @@ -2724,6 +2963,129 @@ mod tests { )); }); } + + #[test] + fn get_storage_transparent_works() { + let code_hash = MockLoader::insert(Call, |ctx, _| { + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([1; 19].to_vec()).unwrap(), + Some(vec![1, 2, 3]), + false + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([2; 16].to_vec()).unwrap(), + Some(vec![]), + false + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.get_storage_transparent( + &VarSizedKey::::try_from([1; 19].to_vec()).unwrap() + ), + Some(vec![1, 2, 3]) + ); + assert_eq!( + ctx.ext.get_storage_transparent( + &VarSizedKey::::try_from([2; 16].to_vec()).unwrap() + ), + Some(vec![]) + ); + assert_eq!( + ctx.ext.get_storage_transparent( + &VarSizedKey::::try_from([3; 8].to_vec()).unwrap() + ), + None + ); + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let min_balance = ::Currency::minimum_balance(); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + set_balance(&ALICE, min_balance * 1000); + place_contract(&BOB, code_hash); + let mut storage_meter = storage::meter::Meter::new(&ALICE, None, 0).unwrap(); + assert_ok!(MockStack::run_call( + ALICE, + BOB, + &mut gas_meter, + &mut storage_meter, + &schedule, + 0, + vec![], + None, + )); + }); + } + + #[test] + fn get_storage_size_transparent_works() { + let code_hash = MockLoader::insert(Call, |ctx, _| { + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([1; 19].to_vec()).unwrap(), + Some(vec![1, 2, 3]), + false + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from([2; 16].to_vec()).unwrap(), + Some(vec![]), + false + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.get_storage_size_transparent( + &VarSizedKey::::try_from([1; 19].to_vec()).unwrap() + ), + Some(3) + ); + assert_eq!( + ctx.ext.get_storage_size_transparent( + &VarSizedKey::::try_from([2; 16].to_vec()).unwrap() + ), + Some(0) + ); + assert_eq!( + ctx.ext.get_storage_size_transparent( + &VarSizedKey::::try_from([3; 8].to_vec()).unwrap() + ), + None + ); + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let min_balance = ::Currency::minimum_balance(); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + set_balance(&ALICE, min_balance * 1000); + place_contract(&BOB, code_hash); + let mut storage_meter = storage::meter::Meter::new(&ALICE, None, 0).unwrap(); + assert_ok!(MockStack::run_call( + ALICE, + BOB, + &mut gas_meter, + &mut storage_meter, + &schedule, + 0, + vec![], + None, + )); + }); + } + #[test] fn ecdsa_to_eth_address_returns_proper_value() { let bob_ch = MockLoader::insert(Call, |ctx, _| { diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 1011e264eb930..60b30ffa25005 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -98,11 +98,6 @@ pub mod weights; #[cfg(test)] mod tests; -pub use crate::{ - exec::Frame, - pallet::*, - schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, -}; use crate::{ exec::{AccountIdOf, ExecError, Executable, Stack as ExecStack}, gas::GasMeter, @@ -129,6 +124,12 @@ use sp_core::{crypto::UncheckedFrom, Bytes}; use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup}; use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; +pub use crate::{ + exec::{Frame, VarSizedKey as StorageKey}, + pallet::*, + schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, +}; + type CodeHash = ::Hash; type TrieId = BoundedVec>; type BalanceOf = @@ -372,6 +373,9 @@ pub mod pallet { /// new instrumentation increases the size beyond the limit it would make that contract /// inaccessible until rectified by another runtime upgrade. type RelaxedMaxCodeLen: Get; + + /// The maximum allowable length in bytes for storage keys. + type MaxStorageKeyLen: Get; } #[pallet::hooks] @@ -942,11 +946,14 @@ where } /// Query storage of a specified contract under a specified key. - pub fn get_storage(address: T::AccountId, key: [u8; 32]) -> GetStorageResult { + pub fn get_storage(address: T::AccountId, key: Vec) -> GetStorageResult { let contract_info = ContractInfoOf::::get(&address).ok_or(ContractAccessError::DoesntExist)?; - let maybe_value = Storage::::read(&contract_info.trie_id, &key); + let maybe_value = Storage::::read( + &contract_info.trie_id, + &StorageKey::::try_from(key).map_err(|_| ContractAccessError::KeyDecodingFailed)?, + ); Ok(maybe_value) } diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index e73eb8d3bd55e..01c809da8675e 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -32,7 +32,7 @@ use frame_support::{ }; use scale_info::TypeInfo; use sp_core::crypto::UncheckedFrom; -use sp_io::{hashing::blake2_256, KillStorageResult}; +use sp_io::KillStorageResult; use sp_runtime::{ traits::{Hash, Zero}, RuntimeDebug, @@ -124,16 +124,16 @@ where /// /// The read is performed from the `trie_id` only. The `address` is not necessary. If the /// contract doesn't store under the given `key` `None` is returned. - pub fn read(trie_id: &TrieId, key: &StorageKey) -> Option> { - child::get_raw(&child_trie_info(trie_id), &blake2_256(key)) + pub fn read>(trie_id: &TrieId, key: &K) -> Option> { + child::get_raw(&child_trie_info(trie_id), key.hash().as_slice()) } /// Returns `Some(len)` (in bytes) if a storage item exists at `key`. /// /// Returns `None` if the `key` wasn't previously set by `set_storage` or /// was deleted. - pub fn size(trie_id: &TrieId, key: &StorageKey) -> Option { - child::len(&child_trie_info(trie_id), &blake2_256(key)) + pub fn size>(trie_id: &TrieId, key: &K) -> Option { + child::len(&child_trie_info(trie_id), key.hash().as_slice()) } /// Update a storage entry into a contract's kv storage. @@ -143,15 +143,15 @@ where /// /// This function also records how much storage was created or removed if a `storage_meter` /// is supplied. It should only be absent for testing or benchmarking code. - pub fn write( + pub fn write>( trie_id: &TrieId, - key: &StorageKey, + key: &K, new_value: Option>, storage_meter: Option<&mut meter::NestedMeter>, take: bool, ) -> Result { - let hashed_key = blake2_256(key); let child_trie_info = &child_trie_info(trie_id); + let hashed_key = key.hash(); let (old_len, old_value) = if take { let val = child::get_raw(child_trie_info, &hashed_key); (val.as_ref().map(|v| v.len() as u32), val) diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index d4a5434cd5a46..bbac18142a658 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -20,7 +20,7 @@ use crate::{ ChainExtension, Environment, Ext, InitState, Result as ExtensionResult, RetVal, ReturnFlags, SysConfig, UncheckedFrom, }, - exec::Frame, + exec::{FixSizedKey, Frame}, storage::Storage, wasm::{PrefabWasmModule, ReturnCode as RuntimeReturnCode}, weights::WeightInfo, @@ -291,6 +291,7 @@ impl Config for Test { type ContractAccessWeight = DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; type RelaxedMaxCodeLen = ConstU32<{ 256 * 1024 }>; + type MaxStorageKeyLen = ConstU32<128>; } pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); @@ -1723,8 +1724,14 @@ fn lazy_removal_partial_remove_works() { // Put value into the contracts child trie for val in &vals { - Storage::::write(&info.trie_id, &val.0, Some(val.2.clone()), None, false) - .unwrap(); + Storage::::write( + &info.trie_id, + &val.0 as &FixSizedKey, + Some(val.2.clone()), + None, + false, + ) + .unwrap(); } >::insert(&addr, info.clone()); @@ -1903,8 +1910,14 @@ fn lazy_removal_does_not_use_all_weight() { // Put value into the contracts child trie for val in &vals { - Storage::::write(&info.trie_id, &val.0, Some(val.2.clone()), None, false) - .unwrap(); + Storage::::write( + &info.trie_id, + &val.0 as &FixSizedKey, + Some(val.2.clone()), + None, + false, + ) + .unwrap(); } >::insert(&addr, info.clone()); diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index c385b5d2f7cc2..3dd5da187b258 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -275,7 +275,8 @@ mod tests { use super::*; use crate::{ exec::{ - AccountIdOf, BlockNumberOf, ErrorOrigin, ExecError, Executable, Ext, SeedOf, StorageKey, + AccountIdOf, BlockNumberOf, ErrorOrigin, ExecError, Executable, Ext, FixSizedKey, + SeedOf, VarSizedKey, }, gas::GasMeter, storage::WriteOutcome, @@ -330,7 +331,7 @@ mod tests { } pub struct MockExt { - storage: HashMap>, + storage: HashMap, Vec>, instantiates: Vec, terminations: Vec, calls: Vec, @@ -425,19 +426,45 @@ mod tests { self.terminations.push(TerminationEntry { beneficiary: beneficiary.clone() }); Ok(()) } - fn get_storage(&mut self, key: &StorageKey) -> Option> { - self.storage.get(key).cloned() + fn get_storage(&mut self, key: &FixSizedKey) -> Option> { + self.storage.get(&key.to_vec()).cloned() } - fn get_storage_size(&mut self, key: &StorageKey) -> Option { - self.storage.get(key).map(|val| val.len() as u32) + fn get_storage_transparent(&mut self, key: &VarSizedKey) -> Option> { + self.storage.get(&key.to_vec()).cloned() + } + fn get_storage_size(&mut self, key: &FixSizedKey) -> Option { + self.storage.get(&key.to_vec()).map(|val| val.len() as u32) + } + fn get_storage_size_transparent(&mut self, key: &VarSizedKey) -> Option { + self.storage.get(&key.to_vec()).map(|val| val.len() as u32) } fn set_storage( &mut self, - key: StorageKey, + key: &FixSizedKey, value: Option>, take_old: bool, ) -> Result { - let entry = self.storage.entry(key); + let key = key.to_vec(); + let entry = self.storage.entry(key.clone()); + let result = match (entry, take_old) { + (Entry::Vacant(_), _) => WriteOutcome::New, + (Entry::Occupied(entry), false) => + WriteOutcome::Overwritten(entry.remove().len() as u32), + (Entry::Occupied(entry), true) => WriteOutcome::Taken(entry.remove()), + }; + if let Some(value) = value { + self.storage.insert(key, value); + } + Ok(result) + } + fn set_storage_transparent( + &mut self, + key: &VarSizedKey, + value: Option>, + take_old: bool, + ) -> Result { + let key = key.to_vec(); + let entry = self.storage.entry(key.clone()); let result = match (entry, take_old) { (Entry::Vacant(_), _) => WriteOutcome::New, (Entry::Occupied(entry), false) => @@ -836,6 +863,7 @@ mod tests { } #[test] + #[cfg(not(feature = "unstable-interface"))] fn contains_storage_works() { const CODE: &str = r#" (module @@ -844,11 +872,10 @@ mod tests { (import "seal0" "seal_contains_storage" (func $seal_contains_storage (param i32) (result i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 4) size of input buffer (32 byte as we copy the key here) + ;; [0, 4) size of input buffer (32 bytes as we copy the key here) (data (i32.const 0) "\20") ;; [4, 36) input buffer - ;; [36, inf) output buffer (func (export "call") @@ -879,8 +906,8 @@ mod tests { let mut ext = MockExt::default(); - ext.storage.insert([1u8; 32], vec![42u8]); - ext.storage.insert([2u8; 32], vec![]); + ext.storage.insert(vec![1u8; 32], vec![42u8]); + ext.storage.insert(vec![2u8; 32], vec![]); // value does not exist -> sentinel value returned let result = execute(CODE, [3u8; 32].encode(), &mut ext).unwrap(); @@ -895,6 +922,84 @@ mod tests { assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0,); } + #[test] + #[cfg(feature = "unstable-interface")] + fn contains_storage_works() { + const CODE: &str = r#" +(module + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "__unstable__" "seal_contains_storage" (func $seal_contains_storage (param i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + + ;; size of input buffer + ;; [0, 4) size of input buffer (128+32 = 160 bytes = 0xA0) + (data (i32.const 0) "\A0") + + ;; [4, 164) input buffer + + (func (export "call") + ;; Receive key + (call $seal_input + (i32.const 4) ;; Where we take input and store it + (i32.const 0) ;; Where we take and store the length of the data + ) + ;; Call seal_clear_storage and save what it returns at 0 + (i32.store (i32.const 0) + (call $seal_contains_storage + (i32.const 8) ;; key_ptr + (i32.load (i32.const 4)) ;; key_len + ) + ) + (call $seal_return + (i32.const 0) ;; flags + (i32.const 0) ;; returned value + (i32.const 4) ;; length of returned value + ) + ) + + (func (export "deploy")) +) +"#; + + let mut ext = MockExt::default(); + ext.set_storage_transparent( + &VarSizedKey::::try_from([1u8; 64].to_vec()).unwrap(), + Some(vec![42u8]), + false, + ) + .unwrap(); + ext.set_storage_transparent( + &VarSizedKey::::try_from([2u8; 19].to_vec()).unwrap(), + Some(vec![]), + false, + ) + .unwrap(); + + //value does not exist (wrong key length) + let input = (63, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + // sentinel returned + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + + // value exists + let input = (64, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + // true as u32 returned + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1); + // getter does not remove the value from storage + assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); + + // value exists (test for 0 sized) + let input = (19, [2u8; 19]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + // true as u32 returned + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + // getter does not remove the value from storage + assert_eq!(ext.storage.get(&[2u8; 19].to_vec()).unwrap(), &([] as [u8; 0])); + } + const CODE_INSTANTIATE: &str = r#" (module ;; seal_instantiate( @@ -1204,7 +1309,7 @@ mod tests { #[test] fn get_storage_puts_data_into_buf() { let mut mock_ext = MockExt::default(); - mock_ext.storage.insert([0x11; 32], [0x22; 32].to_vec()); + mock_ext.storage.insert([0x11; 32].to_vec(), [0x22; 32].to_vec()); let output = execute(CODE_GET_STORAGE, vec![], mock_ext).unwrap(); @@ -2176,6 +2281,7 @@ mod tests { } #[test] + #[cfg(not(feature = "unstable-interface"))] fn set_storage_works() { const CODE: &str = r#" (module @@ -2199,7 +2305,7 @@ mod tests { (call $seal_set_storage (i32.const 4) ;; key_ptr (i32.const 36) ;; value_ptr - (i32.sub ;; value_len (input_size - key_size) + (i32.sub ;; value_len (input_size - key_size) (i32.load (i32.const 0)) (i32.const 32) ) @@ -2207,7 +2313,7 @@ mod tests { ) (call $seal_return (i32.const 0) ;; flags - (i32.const 0) ;; returned value + (i32.const 0) ;; pointer to returned value (i32.const 4) ;; length of returned value ) ) @@ -2222,45 +2328,213 @@ mod tests { let input = ([1u8; 32], [42u8, 48]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); - assert_eq!(ext.storage.get(&[1u8; 32]).unwrap(), &[42u8, 48]); + assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); // value do exist -> length of old value returned let input = ([1u8; 32], [0u8; 0]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 2); - assert_eq!(ext.storage.get(&[1u8; 32]).unwrap(), &[0u8; 0]); + assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); // value do exist -> length of old value returned (test for zero sized val) let input = ([1u8; 32], [99u8]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); - assert_eq!(ext.storage.get(&[1u8; 32]).unwrap(), &[99u8]); + assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); } #[test] #[cfg(feature = "unstable-interface")] - fn clear_storage_works() { + fn set_storage_works() { const CODE: &str = r#" (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "__unstable__" "seal_clear_storage" (func $seal_clear_storage (param i32) (result i32))) + (import "__unstable__" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) - ;; 0x1000 = 4k in little endian - ;; size of input buffer + ;; [0, 4) size of input buffer + ;; 4k in little endian (data (i32.const 0) "\00\10") + ;; [4, 4100) input buffer + (func (export "call") - ;; Receive key + ;; Receive (key ++ value_to_write) (call $seal_input (i32.const 4) ;; Pointer to the input buffer - (i32.const 0) ;; Size of the length buffer + (i32.const 0) ;; Size of the input buffer ) ;; Store the passed value to the passed key and store result to memory + (i32.store (i32.const 168) + (call $seal_set_storage + (i32.const 8) ;; key_ptr + (i32.load (i32.const 4)) ;; key_len + (i32.add ;; value_ptr = 8 + key_len + (i32.const 8) + (i32.load (i32.const 4))) + (i32.sub ;; value_len (input_size - (key_len + key_len_len)) + (i32.load (i32.const 0)) + (i32.add + (i32.load (i32.const 4)) + (i32.const 4) + ) + ) + ) + ) + (call $seal_return + (i32.const 0) ;; flags + (i32.const 168) ;; ptr to returned value + (i32.const 4) ;; length of returned value + ) + ) + + (func (export "deploy")) +) +"#; + + let mut ext = MockExt::default(); + + // value did not exist before -> sentinel returned + let input = (32, [1u8; 32], [42u8, 48]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); + + // value do exist -> length of old value returned + let input = (32, [1u8; 32], [0u8; 0]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 2); + assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); + + // value do exist -> length of old value returned (test for zero sized val) + let input = (32, [1u8; 32], [99u8]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); + } + + #[test] + #[cfg(feature = "unstable-interface")] + fn get_storage_works() { + const CODE: &str = r#" +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "__unstable__" "seal_get_storage" (func $seal_get_storage (param i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 4) size of input buffer (160 bytes as we copy the key+len here) + (data (i32.const 0) "\A0") + + ;; [4, 8) size of output buffer + ;; 4k in little endian + (data (i32.const 4) "\00\10") + + ;; [8, 168) input buffer + ;; [168, 4264) output buffer + + (func (export "call") + ;; Receive (key ++ value_to_write) + (call $seal_input + (i32.const 8) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the input buffer + ) + ;; Load a storage value and result of this call into the output buffer + (i32.store (i32.const 168) + (call $seal_get_storage + (i32.const 12) ;; key_ptr + (i32.load (i32.const 8)) ;; key_len + (i32.const 172) ;; Pointer to the output buffer + (i32.const 4) ;; Pointer to the size of the buffer + ) + ) + (call $seal_return + (i32.const 0) ;; flags + (i32.const 168) ;; output buffer ptr + (i32.add ;; length: output size + 4 (retval) + (i32.load (i32.const 4)) + (i32.const 4) + ) + ) + ) + + (func (export "deploy")) +) +"#; + + let mut ext = MockExt::default(); + + ext.set_storage_transparent( + &VarSizedKey::::try_from([1u8; 64].to_vec()).unwrap(), + Some(vec![42u8]), + false, + ) + .unwrap(); + + ext.set_storage_transparent( + &VarSizedKey::::try_from([2u8; 19].to_vec()).unwrap(), + Some(vec![]), + false, + ) + .unwrap(); + + // value does not exist + let input = (63, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!( + u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + ReturnCode::KeyNotFound as u32 + ); + + // value exists + let input = (64, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!( + u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + ReturnCode::Success as u32 + ); + assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); + assert_eq!(&result.data.0[4..], &[42u8]); + + // value exists (test for 0 sized) + let input = (19, [2u8; 19]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!( + u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + ReturnCode::Success as u32 + ); + assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), Some(&vec![])); + assert_eq!(&result.data.0[4..], &([] as [u8; 0])); + } + + #[test] + #[cfg(feature = "unstable-interface")] + fn clear_storage_works() { + const CODE: &str = r#" +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "__unstable__" "seal_clear_storage" (func $seal_clear_storage (param i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + ;; size of input buffer + ;; [0, 4) size of input buffer (128+32 = 160 bytes = 0xA0) + (data (i32.const 0) "\A0") + + ;; [4, 164) input buffer + + (func (export "call") + ;; Receive key + (call $seal_input + (i32.const 4) ;; Where we take input and store it + (i32.const 0) ;; Where we take and store the length of thedata + ) + ;; Call seal_clear_storage and save what it returns at 0 (i32.store (i32.const 0) (call $seal_clear_storage - (i32.const 4) ;; key_ptr + (i32.const 8) ;; key_ptr + (i32.load (i32.const 4)) ;; key_len ) ) (call $seal_return @@ -2276,23 +2550,48 @@ mod tests { let mut ext = MockExt::default(); - ext.storage.insert([1u8; 32], vec![42u8]); - ext.storage.insert([2u8; 32], vec![]); + ext.set_storage_transparent( + &VarSizedKey::::try_from([1u8; 64].to_vec()).unwrap(), + Some(vec![42u8]), + false, + ) + .unwrap(); + ext.set_storage_transparent( + &VarSizedKey::::try_from([2u8; 19].to_vec()).unwrap(), + Some(vec![]), + false, + ) + .unwrap(); - // value does not exist -> sentinel returned - let result = execute(CODE, [3u8; 32].encode(), &mut ext).unwrap(); + // value did not exist + let input = (32, [3u8; 32]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + // sentinel returned assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); - assert_eq!(ext.storage.get(&[3u8; 32]), None); + assert_eq!(ext.storage.get(&[3u8; 32].to_vec()), None); - // value did exist -> length returned - let result = execute(CODE, [1u8; 32].encode(), &mut ext).unwrap(); + // value did exist + let input = (64, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + // length returned assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1); - assert_eq!(ext.storage.get(&[1u8; 32]), None); + // value cleared + assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); - // value did exist -> length returned (test for 0 sized) - let result = execute(CODE, [2u8; 32].encode(), &mut ext).unwrap(); + //value did not exist (wrong key length) + let input = (63, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + // sentinel returned + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); + + // value exists + let input = (19, [2u8; 19]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + // length returned (test for 0 sized) assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); - assert_eq!(ext.storage.get(&[2u8; 32]), None); + // value cleared + assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); } #[test] @@ -2302,42 +2601,42 @@ mod tests { (module (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "__unstable__" "seal_take_storage" (func $seal_take_storage (param i32 i32 i32) (result i32))) + (import "__unstable__" "seal_take_storage" (func $seal_take_storage (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 32) size of input buffer (32 byte as we copy the key here) - (data (i32.const 0) "\20") + ;; [0, 4) size of input buffer (160 bytes as we copy the key+len here) + (data (i32.const 0) "\A0") - ;; [32, 64) size of output buffer + ;; [4, 8) size of output buffer ;; 4k in little endian - (data (i32.const 32) "\00\10") - - ;; [64, 96) input buffer + (data (i32.const 4) "\00\10") - ;; [96, inf) output buffer + ;; [8, 168) input buffer + ;; [168, 4264) output buffer (func (export "call") ;; Receive key (call $seal_input - (i32.const 64) ;; Pointer to the input buffer + (i32.const 8) ;; Pointer to the input buffer (i32.const 0) ;; Size of the length buffer ) ;; Load a storage value and result of this call into the output buffer - (i32.store (i32.const 96) + (i32.store (i32.const 168) (call $seal_take_storage - (i32.const 64) ;; The pointer to the storage key to fetch - (i32.const 100) ;; Pointer to the output buffer - (i32.const 32) ;; Pointer to the size of the buffer + (i32.const 12) ;; key_ptr + (i32.load (i32.const 8)) ;; key_len + (i32.const 172) ;; Pointer to the output buffer + (i32.const 4) ;; Pointer to the size of the buffer ) ) ;; Return the contents of the buffer (call $seal_return - (i32.const 0) ;; flags - (i32.const 96) ;; output buffer ptr - (i32.add ;; length: storage size + 4 (retval) - (i32.load (i32.const 32)) + (i32.const 0) ;; flags + (i32.const 168) ;; output buffer ptr + (i32.add ;; length: storage size + 4 (retval) + (i32.load (i32.const 4)) (i32.const 4) ) ) @@ -2349,32 +2648,46 @@ mod tests { let mut ext = MockExt::default(); - ext.storage.insert([1u8; 32], vec![42u8]); - ext.storage.insert([2u8; 32], vec![]); + ext.set_storage_transparent( + &VarSizedKey::::try_from([1u8; 64].to_vec()).unwrap(), + Some(vec![42u8]), + false, + ) + .unwrap(); + + ext.set_storage_transparent( + &VarSizedKey::::try_from([2u8; 19].to_vec()).unwrap(), + Some(vec![]), + false, + ) + .unwrap(); // value does not exist -> error returned - let result = execute(CODE, [3u8; 32].encode(), &mut ext).unwrap(); + let input = (63, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), ReturnCode::KeyNotFound as u32 ); // value did exist -> value returned - let result = execute(CODE, [1u8; 32].encode(), &mut ext).unwrap(); + let input = (64, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); - assert_eq!(ext.storage.get(&[1u8; 32]), None); + assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); assert_eq!(&result.data.0[4..], &[42u8]); // value did exist -> length returned (test for 0 sized) - let result = execute(CODE, [2u8; 32].encode(), &mut ext).unwrap(); + let input = (19, [2u8; 19]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); - assert_eq!(ext.storage.get(&[2u8; 32]), None); + assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); assert_eq!(&result.data.0[4..], &[0u8; 0]); } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 850794fc9b09e..c1757ba06412a 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -18,15 +18,16 @@ //! Environment definition of the wasm smart-contract runtime. use crate::{ - exec::{ExecError, ExecResult, Ext, StorageKey, TopicOf}, + exec::{ExecError, ExecResult, Ext, FixSizedKey, TopicOf, VarSizedKey}, gas::{ChargedAmount, Token}, schedule::HostFnWeights, wasm::env_def::ConvertibleToWasm, BalanceOf, CodeHash, Config, Error, SENTINEL, }; + use bitflags::bitflags; use codec::{Decode, DecodeAll, Encode, MaxEncodedLen}; -use frame_support::{dispatch::DispatchError, ensure, weights::Weight}; +use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use sp_core::{crypto::UncheckedFrom, Bytes}; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; @@ -35,6 +36,28 @@ use sp_sandbox::SandboxMemory; use sp_std::prelude::*; use wasm_instrument::parity_wasm::elements::ValueType; +/// Type of a storage key. +#[allow(dead_code)] +enum KeyType { + /// Deprecated fix sized key [0;32]. + Fix, + /// Variable sized key used in transparent hashing, + /// cannot be larger than MaxStorageKeyLen. + Variable(u32), +} + +impl KeyType { + fn len(&self) -> Result { + match self { + KeyType::Fix => Ok(32u32), + KeyType::Variable(len) => { + ensure!(len <= &::MaxStorageKeyLen::get(), Error::::DecodingFailed); + Ok(*len) + }, + } + } +} + /// Every error that can be returned to a contract when it calls any of the host functions. /// /// # Note @@ -695,6 +718,7 @@ where fn set_storage( &mut self, + key_type: KeyType, key_ptr: u32, value_ptr: u32, value_len: u32, @@ -705,10 +729,21 @@ where if value_len > max_size { return Err(Error::::ValueTooLarge.into()) } - let mut key: StorageKey = [0; 32]; - self.read_sandbox_memory_into_buf(key_ptr, &mut key)?; + let key = self.read_sandbox_memory(key_ptr, key_type.len::()?)?; let value = Some(self.read_sandbox_memory(value_ptr, value_len)?); - let write_outcome = self.ext.set_storage(key, value, false)?; + let write_outcome = match key_type { + KeyType::Fix => self.ext.set_storage( + &FixSizedKey::try_from(key).map_err(|_| Error::::DecodingFailed)?, + value, + false, + )?, + KeyType::Variable(_) => self.ext.set_storage_transparent( + &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, + value, + false, + )?, + }; + self.adjust_gas( charged, RuntimeCosts::SetStorage { new_bytes: value_len, old_bytes: write_outcome.old_len() }, @@ -716,15 +751,70 @@ where Ok(write_outcome.old_len_with_sentinel()) } - fn clear_storage(&mut self, key_ptr: u32) -> Result { + fn clear_storage(&mut self, key_type: KeyType, key_ptr: u32) -> Result { let charged = self.charge_gas(RuntimeCosts::ClearStorage(self.ext.max_value_size()))?; - let mut key: StorageKey = [0; 32]; - self.read_sandbox_memory_into_buf(key_ptr, &mut key)?; - let outcome = self.ext.set_storage(key, None, false)?; + let key = self.read_sandbox_memory(key_ptr, key_type.len::()?)?; + let outcome = match key_type { + KeyType::Fix => self.ext.set_storage( + &FixSizedKey::try_from(key).map_err(|_| Error::::DecodingFailed)?, + None, + false, + )?, + KeyType::Variable(_) => self.ext.set_storage_transparent( + &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, + None, + false, + )?, + }; + self.adjust_gas(charged, RuntimeCosts::ClearStorage(outcome.old_len())); Ok(outcome.old_len_with_sentinel()) } + fn get_storage( + &mut self, + key_type: KeyType, + key_ptr: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { + let charged = self.charge_gas(RuntimeCosts::GetStorage(self.ext.max_value_size()))?; + let key = self.read_sandbox_memory(key_ptr, key_type.len::()?)?; + let outcome = match key_type { + KeyType::Fix => self.ext.get_storage( + &FixSizedKey::try_from(key).map_err(|_| Error::::DecodingFailed)?, + ), + KeyType::Variable(_) => self.ext.get_storage_transparent( + &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, + ), + }; + + if let Some(value) = outcome { + self.adjust_gas(charged, RuntimeCosts::GetStorage(value.len() as u32)); + self.write_sandbox_output(out_ptr, out_len_ptr, &value, false, already_charged)?; + Ok(ReturnCode::Success) + } else { + self.adjust_gas(charged, RuntimeCosts::GetStorage(0)); + Ok(ReturnCode::KeyNotFound) + } + } + + fn contains_storage(&mut self, key_type: KeyType, key_ptr: u32) -> Result { + let charged = self.charge_gas(RuntimeCosts::ContainsStorage(self.ext.max_value_size()))?; + let key = self.read_sandbox_memory(key_ptr, key_type.len::()?)?; + let outcome = match key_type { + KeyType::Fix => self.ext.get_storage_size( + &FixSizedKey::try_from(key).map_err(|_| Error::::DecodingFailed)?, + ), + KeyType::Variable(_) => self.ext.get_storage_size_transparent( + &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, + ), + }; + + self.adjust_gas(charged, RuntimeCosts::ClearStorage(outcome.unwrap_or(0))); + Ok(outcome.unwrap_or(SENTINEL)) + } + fn call( &mut self, flags: CallFlags, @@ -863,11 +953,14 @@ define_env!(Env, , // Equivalent to the newer version of `seal_set_storage` with the exception of the return // type. Still a valid thing to call when not interested in the return value. [seal0] seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { - ctx.set_storage(key_ptr, value_ptr, value_len).map(|_| ()) + ctx.set_storage(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) }, // Set the value at the given key in the contract storage. // + // This version is to be used with a fixed sized storage key. For runtimes supporting transparent + // hashing, please use the newer version of this function. + // // The value length must not exceed the maximum defined by the contracts module parameters. // Specifying a `value_len` of zero will store an empty value. // @@ -882,7 +975,27 @@ define_env!(Env, , // Returns the size of the pre-existing value at the specified key if any. Otherwise // `SENTINEL` is returned as a sentinel value. [seal1] seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) -> u32 => { - ctx.set_storage(key_ptr, value_ptr, value_len) + ctx.set_storage(KeyType::Fix, key_ptr, value_ptr, value_len) + }, + + // Set the value at the given key in the contract storage. + // + // The key and value lengths must not exceed the maximums defined by the contracts module parameters. + // Specifying a `value_len` of zero will store an empty value. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the location to store the value is placed. + // - `key_len`: the length of the key in bytes. + // - `value_ptr`: pointer into the linear memory where the value to set is placed. + // - `value_len`: the length of the value in bytes. + // + // # Return Value + // + // Returns the size of the pre-existing value at the specified key if any. Otherwise + // `SENTINEL` is returned as a sentinel value. + [__unstable__] seal_set_storage(ctx, key_ptr: u32, key_len: u32, value_ptr: u32, value_len: u32) -> u32 => { + ctx.set_storage(KeyType::Variable(key_len), key_ptr, value_ptr, value_len) }, // Clear the value at the given key in the contract storage. @@ -890,25 +1003,29 @@ define_env!(Env, , // Equivalent to the newer version of `seal_clear_storage` with the exception of the return // type. Still a valid thing to call when not interested in the return value. [seal0] seal_clear_storage(ctx, key_ptr: u32) => { - ctx.clear_storage(key_ptr).map(|_| ()).map_err(Into::into) + ctx.clear_storage(KeyType::Fix, key_ptr).map(|_| ()) }, // Clear the value at the given key in the contract storage. // // # Parameters // - // - `key_ptr`: pointer into the linear memory where the location to clear the value is placed. + // - `key_ptr`: pointer into the linear memory where the key is placed. + // - `key_len`: the length of the key in bytes. // // # Return Value // // Returns the size of the pre-existing value at the specified key if any. Otherwise // `SENTINEL` is returned as a sentinel value. - [__unstable__] seal_clear_storage(ctx, key_ptr: u32) -> u32 => { - ctx.clear_storage(key_ptr).map_err(Into::into) + [__unstable__] seal_clear_storage(ctx, key_ptr: u32, key_len: u32) -> u32 => { + ctx.clear_storage(KeyType::Variable(key_len), key_ptr) }, // Retrieve the value under the given key from storage. // + // This version is to be used with a fixed sized storage key. For runtimes supporting transparent + // hashing, please use the newer version of this function. + // // # Parameters // // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. @@ -920,21 +1037,36 @@ define_env!(Env, , // // `ReturnCode::KeyNotFound` [seal0] seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { - let charged = ctx.charge_gas(RuntimeCosts::GetStorage(ctx.ext.max_value_size()))?; - let mut key: StorageKey = [0; 32]; - ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; - if let Some(value) = ctx.ext.get_storage(&key) { - ctx.adjust_gas(charged, RuntimeCosts::GetStorage(value.len() as u32)); - ctx.write_sandbox_output(out_ptr, out_len_ptr, &value, false, already_charged)?; - Ok(ReturnCode::Success) - } else { - ctx.adjust_gas(charged, RuntimeCosts::GetStorage(0)); - Ok(ReturnCode::KeyNotFound) - } + ctx.get_storage(KeyType::Fix, key_ptr, out_ptr, out_len_ptr) + }, + + // Retrieve the value under the given key from storage. + // + // This version is to be used with a fixed sized storage key. For runtimes supporting transparent + // hashing, please use the newer version of this function. + // + // The key length must not exceed the maximum defined by the contracts module parameter. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + // - `key_len`: the length of the key in bytes. + // - `out_ptr`: pointer to the linear memory where the value is written to. + // - `out_len_ptr`: in-out pointer into linear memory where the buffer length + // is read from and the value length is written to. + // + // # Errors + // + // `ReturnCode::KeyNotFound` + [__unstable__] seal_get_storage(ctx, key_ptr: u32, key_len: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + ctx.get_storage(KeyType::Variable(key_len), key_ptr, out_ptr, out_len_ptr) }, // Checks whether there is a value stored under the given key. // + // This version is to be used with a fixed sized storage key. For runtimes supporting transparent + // hashing, please use the newer version of this function. + // // # Parameters // // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. @@ -944,16 +1076,24 @@ define_env!(Env, , // Returns the size of the pre-existing value at the specified key if any. Otherwise // `SENTINEL` is returned as a sentinel value. [seal0] seal_contains_storage(ctx, key_ptr: u32) -> u32 => { - let charged = ctx.charge_gas(RuntimeCosts::ContainsStorage(ctx.ext.max_value_size()))?; - let mut key: StorageKey = [0; 32]; - ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; - if let Some(len) = ctx.ext.get_storage_size(&key) { - ctx.adjust_gas(charged, RuntimeCosts::ContainsStorage(len)); - Ok(len) - } else { - ctx.adjust_gas(charged, RuntimeCosts::ContainsStorage(0)); - Ok(SENTINEL) - } + ctx.contains_storage(KeyType::Fix, key_ptr) + }, + + // Checks whether there is a value stored under the given key. + // + // The key length must not exceed the maximum defined by the contracts module parameter. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + // - `key_len`: the length of the key in bytes. + // + // # Return Value + // + // Returns the size of the pre-existing value at the specified key if any. Otherwise + // `SENTINEL` is returned as a sentinel value. + [__unstable__] seal_contains_storage(ctx, key_ptr: u32, key_len: u32) -> u32 => { + ctx.contains_storage(KeyType::Variable(key_len), key_ptr) }, // Retrieve and remove the value under the given key from storage. @@ -961,6 +1101,7 @@ define_env!(Env, , // # Parameters // // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + // - `key_len`: the length of the key in bytes. // - `out_ptr`: pointer to the linear memory where the value is written to. // - `out_len_ptr`: in-out pointer into linear memory where the buffer length // is read from and the value length is written to. @@ -968,11 +1109,10 @@ define_env!(Env, , // # Errors // // `ReturnCode::KeyNotFound` - [__unstable__] seal_take_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + [__unstable__] seal_take_storage(ctx, key_ptr: u32, key_len: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { let charged = ctx.charge_gas(RuntimeCosts::TakeStorage(ctx.ext.max_value_size()))?; - let mut key: StorageKey = [0; 32]; - ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; - if let crate::storage::WriteOutcome::Taken(value) = ctx.ext.set_storage(key, None, true)? { + let key = ctx.read_sandbox_memory(key_ptr, key_len)?; + if let crate::storage::WriteOutcome::Taken(value) = ctx.ext.set_storage_transparent(&VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, None, true)? { ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(value.len() as u32)); ctx.write_sandbox_output(out_ptr, out_len_ptr, &value, false, already_charged)?; Ok(ReturnCode::Success) diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index b18c259ebd433..3c90579e65d53 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,11 +18,12 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-04-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-06-22, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// target/production/substrate // benchmark // pallet // --chain=dev @@ -32,8 +33,9 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/contracts/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -164,44 +166,47 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - (1_664_000 as Weight) + (1_654_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - (11_878_000 as Weight) + (8_564_000 as Weight) // Standard Error: 0 - .saturating_add((758_000 as Weight).saturating_mul(k as Weight)) + .saturating_add((868_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } // Storage: Contracts DeletionQueue (r:1 w:0) + /// The range of component `q` is `[0, 1024]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (10_240_000 as Weight) - // Standard Error: 4_000 - .saturating_add((1_899_000 as Weight).saturating_mul(q as Weight)) + (0 as Weight) + // Standard Error: 5_000 + .saturating_add((1_944_000 as Weight).saturating_mul(q as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) + /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - (18_012_000 as Weight) + (19_016_000 as Weight) // Standard Error: 0 - .saturating_add((51_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - (206_036_000 as Weight) + (205_194_000 as Weight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((53_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -209,50 +214,51 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) + /// The range of component `c` is `[0, 64226]`. + /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (243_162_000 as Weight) + (288_487_000 as Weight) // Standard Error: 0 - .saturating_add((122_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((124_000 as Weight).saturating_mul(c as Weight)) // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(7 as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) + /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - (180_607_000 as Weight) + (186_136_000 as Weight) // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: System Account (r:1 w:1) fn call() -> Weight { - (146_032_000 as Weight) + (149_232_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) + /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - (45_113_000 as Weight) + (51_721_000 as Weight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((48_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -260,14 +266,14 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - (25_722_000 as Weight) + (30_016_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) fn set_code() -> Weight { - (23_135_000 as Weight) + (27_192_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -275,11 +281,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - (205_061_000 as Weight) - // Standard Error: 76_000 - .saturating_add((40_732_000 as Weight).saturating_mul(r as Weight)) + (206_405_000 as Weight) + // Standard Error: 112_000 + .saturating_add((40_987_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -287,11 +293,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - (97_971_000 as Weight) - // Standard Error: 741_000 - .saturating_add((308_361_000 as Weight).saturating_mul(r as Weight)) + (106_220_000 as Weight) + // Standard Error: 710_000 + .saturating_add((307_648_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -300,11 +306,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - (109_052_000 as Weight) - // Standard Error: 716_000 - .saturating_add((366_257_000 as Weight).saturating_mul(r as Weight)) + (104_498_000 as Weight) + // Standard Error: 633_000 + .saturating_add((368_901_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -313,11 +319,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - (205_748_000 as Weight) - // Standard Error: 87_000 - .saturating_add((44_474_000 as Weight).saturating_mul(r as Weight)) + (208_696_000 as Weight) + // Standard Error: 101_000 + .saturating_add((44_445_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -325,11 +331,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - (201_898_000 as Weight) - // Standard Error: 55_000 - .saturating_add((16_703_000 as Weight).saturating_mul(r as Weight)) + (205_612_000 as Weight) + // Standard Error: 68_000 + .saturating_add((17_145_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -337,11 +343,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - (204_668_000 as Weight) - // Standard Error: 65_000 - .saturating_add((40_459_000 as Weight).saturating_mul(r as Weight)) + (206_947_000 as Weight) + // Standard Error: 107_000 + .saturating_add((40_789_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -349,11 +355,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - (203_240_000 as Weight) - // Standard Error: 68_000 - .saturating_add((40_270_000 as Weight).saturating_mul(r as Weight)) + (208_692_000 as Weight) + // Standard Error: 109_000 + .saturating_add((40_600_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -361,11 +367,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - (211_535_000 as Weight) - // Standard Error: 73_000 - .saturating_add((114_954_000 as Weight).saturating_mul(r as Weight)) + (209_811_000 as Weight) + // Standard Error: 208_000 + .saturating_add((116_831_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -373,11 +379,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - (204_653_000 as Weight) - // Standard Error: 71_000 - .saturating_add((40_188_000 as Weight).saturating_mul(r as Weight)) + (207_406_000 as Weight) + // Standard Error: 117_000 + .saturating_add((40_702_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -385,11 +391,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - (204_690_000 as Weight) - // Standard Error: 82_000 - .saturating_add((40_260_000 as Weight).saturating_mul(r as Weight)) + (209_260_000 as Weight) + // Standard Error: 130_000 + .saturating_add((40_479_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -397,11 +403,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - (205_004_000 as Weight) - // Standard Error: 62_000 - .saturating_add((40_018_000 as Weight).saturating_mul(r as Weight)) + (206_448_000 as Weight) + // Standard Error: 95_000 + .saturating_add((40_134_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -409,11 +415,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - (204_341_000 as Weight) - // Standard Error: 93_000 - .saturating_add((39_920_000 as Weight).saturating_mul(r as Weight)) + (206_969_000 as Weight) + // Standard Error: 116_000 + .saturating_add((40_251_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -421,12 +427,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) + /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - (208_702_000 as Weight) - // Standard Error: 115_000 - .saturating_add((101_441_000 as Weight).saturating_mul(r as Weight)) + (211_611_000 as Weight) + // Standard Error: 175_000 + .saturating_add((98_675_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -434,11 +440,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - (131_983_000 as Weight) - // Standard Error: 17_000 - .saturating_add((19_153_000 as Weight).saturating_mul(r as Weight)) + (134_484_000 as Weight) + // Standard Error: 57_000 + .saturating_add((19_329_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -446,11 +452,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - (203_768_000 as Weight) - // Standard Error: 57_000 - .saturating_add((39_316_000 as Weight).saturating_mul(r as Weight)) + (208_556_000 as Weight) + // Standard Error: 125_000 + .saturating_add((40_328_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -458,11 +464,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - (273_930_000 as Weight) - // Standard Error: 3_000 - .saturating_add((9_513_000 as Weight).saturating_mul(n as Weight)) + (268_886_000 as Weight) + // Standard Error: 4_000 + .saturating_add((9_627_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -470,10 +476,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - fn seal_return(r: u32, ) -> Weight { - (199_311_000 as Weight) - // Standard Error: 601_000 - .saturating_add((2_181_000 as Weight).saturating_mul(r as Weight)) + /// The range of component `r` is `[0, 1]`. + fn seal_return(_r: u32, ) -> Weight { + (203_591_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -481,11 +486,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - (201_130_000 as Weight) + (204_258_000 as Weight) // Standard Error: 0 - .saturating_add((184_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((183_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -493,28 +498,28 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: Contracts DeletionQueue (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) + /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - (202_063_000 as Weight) - // Standard Error: 100_000 - .saturating_add((54_190_000 as Weight).saturating_mul(r as Weight)) + (206_625_000 as Weight) + // Standard Error: 672_000 + .saturating_add((59_377_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) + /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - (206_528_000 as Weight) - // Standard Error: 120_000 - .saturating_add((136_384_000 as Weight).saturating_mul(r as Weight)) + (208_866_000 as Weight) + // Standard Error: 164_000 + .saturating_add((133_438_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -522,11 +527,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - (210_309_000 as Weight) - // Standard Error: 138_000 - .saturating_add((236_583_000 as Weight).saturating_mul(r as Weight)) + (220_860_000 as Weight) + // Standard Error: 209_000 + .saturating_add((239_951_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -535,12 +540,14 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:80 w:80) + /// The range of component `t` is `[0, 4]`. + /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (434_046_000 as Weight) - // Standard Error: 1_678_000 - .saturating_add((242_928_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 330_000 - .saturating_add((66_716_000 as Weight).saturating_mul(n as Weight)) + (439_782_000 as Weight) + // Standard Error: 1_643_000 + .saturating_add((264_687_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 323_000 + .saturating_add((67_636_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -550,119 +557,140 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - (138_934_000 as Weight) - // Standard Error: 34_000 - .saturating_add((31_927_000 as Weight).saturating_mul(r as Weight)) + (140_280_000 as Weight) + // Standard Error: 82_000 + .saturating_add((32_717_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - (88_315_000 as Weight) - // Standard Error: 594_000 - .saturating_add((328_984_000 as Weight).saturating_mul(r as Weight)) + (161_247_000 as Weight) + // Standard Error: 883_000 + .saturating_add((423_997_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - (529_349_000 as Weight) - // Standard Error: 223_000 - .saturating_add((21_065_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(85 as Weight)) - .saturating_add(T::DbWeight::get().writes(83 as Weight)) + (529_247_000 as Weight) + // Standard Error: 2_745_000 + .saturating_add((85_282_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(55 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(53 as Weight)) + .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - (546_447_000 as Weight) - // Standard Error: 261_000 - .saturating_add((8_709_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(85 as Weight)) - .saturating_add(T::DbWeight::get().writes(83 as Weight)) + (529_812_000 as Weight) + // Standard Error: 2_513_000 + .saturating_add((74_554_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(55 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(53 as Weight)) + .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - (118_849_000 as Weight) - // Standard Error: 518_000 - .saturating_add((309_800_000 as Weight).saturating_mul(r as Weight)) + (184_803_000 as Weight) + // Standard Error: 733_000 + .saturating_add((404_933_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - (537_039_000 as Weight) - // Standard Error: 235_000 - .saturating_add((8_071_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(85 as Weight)) - .saturating_add(T::DbWeight::get().writes(83 as Weight)) + (500_958_000 as Weight) + // Standard Error: 2_980_000 + .saturating_add((75_996_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(55 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(52 as Weight)) + .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - (125_427_000 as Weight) - // Standard Error: 635_000 - .saturating_add((276_126_000 as Weight).saturating_mul(r as Weight)) + (177_682_000 as Weight) + // Standard Error: 743_000 + .saturating_add((338_172_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (500_356_000 as Weight) - // Standard Error: 279_000 - .saturating_add((49_746_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(84 as Weight)) + (465_285_000 as Weight) + // Standard Error: 2_599_000 + .saturating_add((155_106_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(55 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - (129_046_000 as Weight) - // Standard Error: 408_000 - .saturating_add((237_117_000 as Weight).saturating_mul(r as Weight)) + (179_118_000 as Weight) + // Standard Error: 572_000 + .saturating_add((311_083_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - (451_122_000 as Weight) - // Standard Error: 200_000 - .saturating_add((7_750_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(84 as Weight)) + (423_056_000 as Weight) + // Standard Error: 2_037_000 + .saturating_add((69_665_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(54 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - (118_085_000 as Weight) - // Standard Error: 526_000 - .saturating_add((338_332_000 as Weight).saturating_mul(r as Weight)) + (188_884_000 as Weight) + // Standard Error: 761_000 + .saturating_add((432_781_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - (569_270_000 as Weight) - // Standard Error: 294_000 - .saturating_add((51_071_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(85 as Weight)) - .saturating_add(T::DbWeight::get().writes(83 as Weight)) + (532_408_000 as Weight) + // Standard Error: 3_348_000 + .saturating_add((164_943_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(55 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(53 as Weight)) + .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - (124_818_000 as Weight) - // Standard Error: 1_251_000 - .saturating_add((1_455_607_000 as Weight).saturating_mul(r as Weight)) + (127_181_000 as Weight) + // Standard Error: 1_495_000 + .saturating_add((1_500_589_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -672,11 +700,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 4_575_000 - .saturating_add((14_645_061_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 3_803_000 + .saturating_add((14_860_909_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -686,11 +714,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 5_742_000 - .saturating_add((14_623_917_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 6_045_000 + .saturating_add((14_797_140_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -698,13 +726,14 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:81 w:81) // Storage: Contracts CodeStorage (r:2 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `t` is `[0, 1]`. + /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - (9_081_635_000 as Weight) - // Standard Error: 11_326_000 - .saturating_add((1_335_139_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 4_000 - .saturating_add((9_575_000 as Weight).saturating_mul(c as Weight)) + (9_196_444_000 as Weight) + // Standard Error: 20_486_000 + .saturating_add((1_458_153_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 8_000 + .saturating_add((9_718_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(85 as Weight)) .saturating_add(T::DbWeight::get().reads((81 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes(81 as Weight)) @@ -714,13 +743,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:80 w:80) + /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 34_958_000 - .saturating_add((20_700_850_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 36_253_000 + .saturating_add((21_201_529_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().reads((320 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) @@ -730,15 +759,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:81 w:81) // Storage: Contracts CodeStorage (r:2 w:1) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) + /// The range of component `t` is `[0, 1]`. + /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - (12_091_206_000 as Weight) - // Standard Error: 104_884_000 - .saturating_add((635_259_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 49_000 - .saturating_add((122_935_000 as Weight).saturating_mul(s as Weight)) + (12_282_498_000 as Weight) + // Standard Error: 48_112_000 + .saturating_add((720_795_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 22_000 + .saturating_add((124_274_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(167 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes(165 as Weight)) @@ -748,11 +778,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - (203_315_000 as Weight) - // Standard Error: 74_000 - .saturating_add((60_223_000 as Weight).saturating_mul(r as Weight)) + (203_959_000 as Weight) + // Standard Error: 142_000 + .saturating_add((61_311_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -760,11 +790,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (355_672_000 as Weight) - // Standard Error: 25_000 - .saturating_add((319_519_000 as Weight).saturating_mul(n as Weight)) + (349_915_000 as Weight) + // Standard Error: 40_000 + .saturating_add((320_652_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -772,11 +802,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - (203_117_000 as Weight) - // Standard Error: 94_000 - .saturating_add((77_363_000 as Weight).saturating_mul(r as Weight)) + (209_219_000 as Weight) + // Standard Error: 157_000 + .saturating_add((73_728_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -784,11 +814,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (196_575_000 as Weight) - // Standard Error: 13_000 - .saturating_add((243_479_000 as Weight).saturating_mul(n as Weight)) + (208_860_000 as Weight) + // Standard Error: 25_000 + .saturating_add((245_718_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -796,11 +826,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - (203_938_000 as Weight) - // Standard Error: 97_000 - .saturating_add((50_708_000 as Weight).saturating_mul(r as Weight)) + (206_165_000 as Weight) + // Standard Error: 138_000 + .saturating_add((51_644_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -808,11 +838,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (247_065_000 as Weight) - // Standard Error: 8_000 - .saturating_add((94_160_000 as Weight).saturating_mul(n as Weight)) + (255_955_000 as Weight) + // Standard Error: 14_000 + .saturating_add((95_090_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -820,11 +850,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - (204_389_000 as Weight) - // Standard Error: 86_000 - .saturating_add((50_663_000 as Weight).saturating_mul(r as Weight)) + (208_153_000 as Weight) + // Standard Error: 140_000 + .saturating_add((51_264_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -832,11 +862,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (284_700_000 as Weight) - // Standard Error: 9_000 - .saturating_add((94_231_000 as Weight).saturating_mul(n as Weight)) + (278_368_000 as Weight) + // Standard Error: 14_000 + .saturating_add((95_006_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -844,11 +874,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - (235_813_000 as Weight) - // Standard Error: 521_000 - .saturating_add((3_044_204_000 as Weight).saturating_mul(r as Weight)) + (331_955_000 as Weight) + // Standard Error: 1_155_000 + .saturating_add((3_069_955_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -856,11 +886,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - (204_095_000 as Weight) - // Standard Error: 495_000 - .saturating_add((2_027_914_000 as Weight).saturating_mul(r as Weight)) + (207_838_000 as Weight) + // Standard Error: 783_000 + .saturating_add((2_058_503_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -869,267 +899,319 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: Contracts OwnerInfoOf (r:16 w:16) + /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_604_000 - .saturating_add((759_511_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_567_000 + .saturating_add((774_380_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes((79 as Weight).saturating_mul(r as Weight))) } + /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - (74_210_000 as Weight) + (73_955_000 as Weight) // Standard Error: 1_000 - .saturating_add((601_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((612_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - (74_123_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_315_000 as Weight).saturating_mul(r as Weight)) + (74_057_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - (74_145_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_388_000 as Weight).saturating_mul(r as Weight)) + (74_137_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_427_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - (73_931_000 as Weight) + (73_844_000 as Weight) // Standard Error: 1_000 - .saturating_add((1_768_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_773_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - (73_829_000 as Weight) - // Standard Error: 0 - .saturating_add((1_957_000 as Weight).saturating_mul(r as Weight)) + (73_979_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_952_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - (73_760_000 as Weight) - // Standard Error: 0 - .saturating_add((932_000 as Weight).saturating_mul(r as Weight)) + (73_924_000 as Weight) + // Standard Error: 3_000 + .saturating_add((941_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - (73_714_000 as Weight) - // Standard Error: 2_000 - .saturating_add((1_420_000 as Weight).saturating_mul(r as Weight)) + (73_574_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_439_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - (73_496_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_575_000 as Weight).saturating_mul(r as Weight)) + (73_343_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_603_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - (76_036_000 as Weight) + (76_267_000 as Weight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(e as Weight)) + .saturating_add((4_000 as Weight).saturating_mul(e as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - (76_015_000 as Weight) - // Standard Error: 19_000 - .saturating_add((6_954_000 as Weight).saturating_mul(r as Weight)) + (74_877_000 as Weight) + // Standard Error: 12_000 + .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - (88_247_000 as Weight) - // Standard Error: 9_000 - .saturating_add((8_957_000 as Weight).saturating_mul(r as Weight)) + (88_665_000 as Weight) + // Standard Error: 20_000 + .saturating_add((9_142_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (98_336_000 as Weight) + (98_600_000 as Weight) // Standard Error: 2_000 - .saturating_add((474_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((469_000 as Weight).saturating_mul(p as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - (74_565_000 as Weight) - // Standard Error: 4_000 - .saturating_add((627_000 as Weight).saturating_mul(r as Weight)) + (74_555_000 as Weight) + // Standard Error: 1_000 + .saturating_add((624_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - (74_414_000 as Weight) + (74_329_000 as Weight) // Standard Error: 1_000 - .saturating_add((684_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((688_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - (74_346_000 as Weight) + (74_612_000 as Weight) // Standard Error: 1_000 - .saturating_add((911_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((909_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - (76_649_000 as Weight) - // Standard Error: 0 - .saturating_add((1_183_000 as Weight).saturating_mul(r as Weight)) + (76_906_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_192_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - (76_995_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_370_000 as Weight).saturating_mul(r as Weight)) + (76_979_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_361_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - (73_927_000 as Weight) - // Standard Error: 1_000 - .saturating_add((666_000 as Weight).saturating_mul(r as Weight)) + (74_370_000 as Weight) + // Standard Error: 3_000 + .saturating_add((661_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - (73_479_000 as Weight) - // Standard Error: 24_000 - .saturating_add((180_808_000 as Weight).saturating_mul(r as Weight)) + (73_584_000 as Weight) + // Standard Error: 353_000 + .saturating_add((187_114_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - (74_048_000 as Weight) + (74_206_000 as Weight) // Standard Error: 1_000 - .saturating_add((885_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((884_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - (73_894_000 as Weight) + (73_992_000 as Weight) // Standard Error: 1_000 - .saturating_add((889_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((893_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - (73_728_000 as Weight) - // Standard Error: 0 - .saturating_add((896_000 as Weight).saturating_mul(r as Weight)) + (73_985_000 as Weight) + // Standard Error: 2_000 + .saturating_add((891_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - (74_049_000 as Weight) - // Standard Error: 1_000 - .saturating_add((897_000 as Weight).saturating_mul(r as Weight)) + (74_117_000 as Weight) + // Standard Error: 4_000 + .saturating_add((901_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - (74_118_000 as Weight) + (73_981_000 as Weight) // Standard Error: 1_000 - .saturating_add((863_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((866_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - (74_042_000 as Weight) - // Standard Error: 1_000 - .saturating_add((865_000 as Weight).saturating_mul(r as Weight)) + (74_104_000 as Weight) + // Standard Error: 3_000 + .saturating_add((868_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - (73_885_000 as Weight) - // Standard Error: 1_000 - .saturating_add((884_000 as Weight).saturating_mul(r as Weight)) + (74_293_000 as Weight) + // Standard Error: 3_000 + .saturating_add((878_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - (73_788_000 as Weight) - // Standard Error: 0 - .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) + (74_055_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - (73_727_000 as Weight) - // Standard Error: 0 - .saturating_add((1_352_000 as Weight).saturating_mul(r as Weight)) + (73_710_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - (73_825_000 as Weight) - // Standard Error: 0 - .saturating_add((1_351_000 as Weight).saturating_mul(r as Weight)) + (73_917_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - (73_638_000 as Weight) - // Standard Error: 0 - .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) + (74_048_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - (73_688_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_357_000 as Weight).saturating_mul(r as Weight)) + (74_029_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_349_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - (73_895_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_359_000 as Weight).saturating_mul(r as Weight)) + (74_267_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - (73_860_000 as Weight) - // Standard Error: 0 + (73_952_000 as Weight) + // Standard Error: 1_000 .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - (73_864_000 as Weight) - // Standard Error: 0 - .saturating_add((1_361_000 as Weight).saturating_mul(r as Weight)) + (73_851_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_368_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - (72_730_000 as Weight) - // Standard Error: 7_000 - .saturating_add((1_432_000 as Weight).saturating_mul(r as Weight)) + (74_034_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_348_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - (73_998_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_346_000 as Weight).saturating_mul(r as Weight)) + (73_979_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - (73_708_000 as Weight) + (74_000_000 as Weight) // Standard Error: 1_000 - .saturating_add((1_333_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_328_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - (74_312_000 as Weight) - // Standard Error: 2_000 - .saturating_add((1_339_000 as Weight).saturating_mul(r as Weight)) + (73_883_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_331_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - (74_203_000 as Weight) - // Standard Error: 2_000 - .saturating_add((1_322_000 as Weight).saturating_mul(r as Weight)) + (74_216_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - (73_990_000 as Weight) + (73_989_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_010_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_998_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - (73_918_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_019_000 as Weight).saturating_mul(r as Weight)) + (73_857_000 as Weight) + // Standard Error: 4_000 + .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - (73_927_000 as Weight) + (73_801_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_001_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_027_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - (73_691_000 as Weight) - // Standard Error: 0 - .saturating_add((2_062_000 as Weight).saturating_mul(r as Weight)) + (74_130_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_064_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - (73_869_000 as Weight) - // Standard Error: 0 + (74_071_000 as Weight) + // Standard Error: 1_000 .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - (73_890_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) + (74_201_000 as Weight) + // Standard Error: 4_000 + .saturating_add((1_330_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - (73_866_000 as Weight) + (74_241_000 as Weight) // Standard Error: 1_000 - .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_321_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - (73_793_000 as Weight) - // Standard Error: 0 - .saturating_add((1_352_000 as Weight).saturating_mul(r as Weight)) + (74_331_000 as Weight) + // Standard Error: 6_000 + .saturating_add((1_347_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - (73_695_000 as Weight) - // Standard Error: 0 - .saturating_add((1_354_000 as Weight).saturating_mul(r as Weight)) + (73_674_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_359_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - (73_743_000 as Weight) - // Standard Error: 0 - .saturating_add((1_352_000 as Weight).saturating_mul(r as Weight)) + (73_807_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - (73_781_000 as Weight) - // Standard Error: 0 - .saturating_add((1_352_000 as Weight).saturating_mul(r as Weight)) + (73_725_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - (73_941_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_348_000 as Weight).saturating_mul(r as Weight)) + (73_755_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } } @@ -1137,44 +1219,47 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - (1_641_000 as Weight) + (1_654_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - (11_878_000 as Weight) + (8_564_000 as Weight) // Standard Error: 0 - .saturating_add((758_000 as Weight).saturating_mul(k as Weight)) + .saturating_add((868_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } // Storage: Contracts DeletionQueue (r:1 w:0) + /// The range of component `q` is `[0, 1024]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (10_240_000 as Weight) - // Standard Error: 4_000 - .saturating_add((1_899_000 as Weight).saturating_mul(q as Weight)) + (0 as Weight) + // Standard Error: 5_000 + .saturating_add((1_944_000 as Weight).saturating_mul(q as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) + /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - (18_012_000 as Weight) + (19_016_000 as Weight) // Standard Error: 0 - .saturating_add((51_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - (206_036_000 as Weight) + (205_194_000 as Weight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((53_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -1182,50 +1267,51 @@ impl WeightInfo for () { // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) + /// The range of component `c` is `[0, 64226]`. + /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (243_162_000 as Weight) + (288_487_000 as Weight) // Standard Error: 0 - .saturating_add((122_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((124_000 as Weight).saturating_mul(c as Weight)) // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(7 as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) + /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - (180_607_000 as Weight) + (186_136_000 as Weight) // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: System Account (r:1 w:1) fn call() -> Weight { - (146_032_000 as Weight) + (149_232_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) + /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - (45_113_000 as Weight) + (51_721_000 as Weight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((48_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -1233,14 +1319,14 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - (25_722_000 as Weight) + (30_016_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) fn set_code() -> Weight { - (23_135_000 as Weight) + (27_192_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -1248,11 +1334,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - (205_061_000 as Weight) - // Standard Error: 76_000 - .saturating_add((40_732_000 as Weight).saturating_mul(r as Weight)) + (206_405_000 as Weight) + // Standard Error: 112_000 + .saturating_add((40_987_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1260,11 +1346,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - (97_971_000 as Weight) - // Standard Error: 741_000 - .saturating_add((308_361_000 as Weight).saturating_mul(r as Weight)) + (106_220_000 as Weight) + // Standard Error: 710_000 + .saturating_add((307_648_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1273,11 +1359,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - (109_052_000 as Weight) - // Standard Error: 716_000 - .saturating_add((366_257_000 as Weight).saturating_mul(r as Weight)) + (104_498_000 as Weight) + // Standard Error: 633_000 + .saturating_add((368_901_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1286,11 +1372,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - (205_748_000 as Weight) - // Standard Error: 87_000 - .saturating_add((44_474_000 as Weight).saturating_mul(r as Weight)) + (208_696_000 as Weight) + // Standard Error: 101_000 + .saturating_add((44_445_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1298,11 +1384,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - (201_898_000 as Weight) - // Standard Error: 55_000 - .saturating_add((16_703_000 as Weight).saturating_mul(r as Weight)) + (205_612_000 as Weight) + // Standard Error: 68_000 + .saturating_add((17_145_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1310,11 +1396,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - (204_668_000 as Weight) - // Standard Error: 65_000 - .saturating_add((40_459_000 as Weight).saturating_mul(r as Weight)) + (206_947_000 as Weight) + // Standard Error: 107_000 + .saturating_add((40_789_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1322,11 +1408,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - (203_240_000 as Weight) - // Standard Error: 68_000 - .saturating_add((40_270_000 as Weight).saturating_mul(r as Weight)) + (208_692_000 as Weight) + // Standard Error: 109_000 + .saturating_add((40_600_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1334,11 +1420,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - (211_535_000 as Weight) - // Standard Error: 73_000 - .saturating_add((114_954_000 as Weight).saturating_mul(r as Weight)) + (209_811_000 as Weight) + // Standard Error: 208_000 + .saturating_add((116_831_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1346,11 +1432,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - (204_653_000 as Weight) - // Standard Error: 71_000 - .saturating_add((40_188_000 as Weight).saturating_mul(r as Weight)) + (207_406_000 as Weight) + // Standard Error: 117_000 + .saturating_add((40_702_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1358,11 +1444,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - (204_690_000 as Weight) - // Standard Error: 82_000 - .saturating_add((40_260_000 as Weight).saturating_mul(r as Weight)) + (209_260_000 as Weight) + // Standard Error: 130_000 + .saturating_add((40_479_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1370,11 +1456,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - (205_004_000 as Weight) - // Standard Error: 62_000 - .saturating_add((40_018_000 as Weight).saturating_mul(r as Weight)) + (206_448_000 as Weight) + // Standard Error: 95_000 + .saturating_add((40_134_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1382,11 +1468,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - (204_341_000 as Weight) - // Standard Error: 93_000 - .saturating_add((39_920_000 as Weight).saturating_mul(r as Weight)) + (206_969_000 as Weight) + // Standard Error: 116_000 + .saturating_add((40_251_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1394,12 +1480,12 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) + /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - (208_702_000 as Weight) - // Standard Error: 115_000 - .saturating_add((101_441_000 as Weight).saturating_mul(r as Weight)) + (211_611_000 as Weight) + // Standard Error: 175_000 + .saturating_add((98_675_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1407,11 +1493,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - (131_983_000 as Weight) - // Standard Error: 17_000 - .saturating_add((19_153_000 as Weight).saturating_mul(r as Weight)) + (134_484_000 as Weight) + // Standard Error: 57_000 + .saturating_add((19_329_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1419,11 +1505,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - (203_768_000 as Weight) - // Standard Error: 57_000 - .saturating_add((39_316_000 as Weight).saturating_mul(r as Weight)) + (208_556_000 as Weight) + // Standard Error: 125_000 + .saturating_add((40_328_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1431,11 +1517,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - (273_930_000 as Weight) - // Standard Error: 3_000 - .saturating_add((9_513_000 as Weight).saturating_mul(n as Weight)) + (268_886_000 as Weight) + // Standard Error: 4_000 + .saturating_add((9_627_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1443,10 +1529,9 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - fn seal_return(r: u32, ) -> Weight { - (199_311_000 as Weight) - // Standard Error: 601_000 - .saturating_add((2_181_000 as Weight).saturating_mul(r as Weight)) + /// The range of component `r` is `[0, 1]`. + fn seal_return(_r: u32, ) -> Weight { + (203_591_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1454,11 +1539,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - (201_130_000 as Weight) + (204_258_000 as Weight) // Standard Error: 0 - .saturating_add((184_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((183_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1466,28 +1551,28 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: Contracts DeletionQueue (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) + /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - (202_063_000 as Weight) - // Standard Error: 100_000 - .saturating_add((54_190_000 as Weight).saturating_mul(r as Weight)) + (206_625_000 as Weight) + // Standard Error: 672_000 + .saturating_add((59_377_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) + /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - (206_528_000 as Weight) - // Standard Error: 120_000 - .saturating_add((136_384_000 as Weight).saturating_mul(r as Weight)) + (208_866_000 as Weight) + // Standard Error: 164_000 + .saturating_add((133_438_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1495,11 +1580,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - (210_309_000 as Weight) - // Standard Error: 138_000 - .saturating_add((236_583_000 as Weight).saturating_mul(r as Weight)) + (220_860_000 as Weight) + // Standard Error: 209_000 + .saturating_add((239_951_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1508,12 +1593,14 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:80 w:80) + /// The range of component `t` is `[0, 4]`. + /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (434_046_000 as Weight) - // Standard Error: 1_678_000 - .saturating_add((242_928_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 330_000 - .saturating_add((66_716_000 as Weight).saturating_mul(n as Weight)) + (439_782_000 as Weight) + // Standard Error: 1_643_000 + .saturating_add((264_687_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 323_000 + .saturating_add((67_636_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1523,119 +1610,140 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - (138_934_000 as Weight) - // Standard Error: 34_000 - .saturating_add((31_927_000 as Weight).saturating_mul(r as Weight)) + (140_280_000 as Weight) + // Standard Error: 82_000 + .saturating_add((32_717_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - (88_315_000 as Weight) - // Standard Error: 594_000 - .saturating_add((328_984_000 as Weight).saturating_mul(r as Weight)) + (161_247_000 as Weight) + // Standard Error: 883_000 + .saturating_add((423_997_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - (529_349_000 as Weight) - // Standard Error: 223_000 - .saturating_add((21_065_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(85 as Weight)) - .saturating_add(RocksDbWeight::get().writes(83 as Weight)) + (529_247_000 as Weight) + // Standard Error: 2_745_000 + .saturating_add((85_282_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(55 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(53 as Weight)) + .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - (546_447_000 as Weight) - // Standard Error: 261_000 - .saturating_add((8_709_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(85 as Weight)) - .saturating_add(RocksDbWeight::get().writes(83 as Weight)) + (529_812_000 as Weight) + // Standard Error: 2_513_000 + .saturating_add((74_554_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(55 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(53 as Weight)) + .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - (118_849_000 as Weight) - // Standard Error: 518_000 - .saturating_add((309_800_000 as Weight).saturating_mul(r as Weight)) + (184_803_000 as Weight) + // Standard Error: 733_000 + .saturating_add((404_933_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - (537_039_000 as Weight) - // Standard Error: 235_000 - .saturating_add((8_071_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(85 as Weight)) - .saturating_add(RocksDbWeight::get().writes(83 as Weight)) + (500_958_000 as Weight) + // Standard Error: 2_980_000 + .saturating_add((75_996_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(55 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(52 as Weight)) + .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - (125_427_000 as Weight) - // Standard Error: 635_000 - .saturating_add((276_126_000 as Weight).saturating_mul(r as Weight)) + (177_682_000 as Weight) + // Standard Error: 743_000 + .saturating_add((338_172_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (500_356_000 as Weight) - // Standard Error: 279_000 - .saturating_add((49_746_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(84 as Weight)) + (465_285_000 as Weight) + // Standard Error: 2_599_000 + .saturating_add((155_106_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(55 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - (129_046_000 as Weight) - // Standard Error: 408_000 - .saturating_add((237_117_000 as Weight).saturating_mul(r as Weight)) + (179_118_000 as Weight) + // Standard Error: 572_000 + .saturating_add((311_083_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - (451_122_000 as Weight) - // Standard Error: 200_000 - .saturating_add((7_750_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(84 as Weight)) + (423_056_000 as Weight) + // Standard Error: 2_037_000 + .saturating_add((69_665_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(54 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - (118_085_000 as Weight) - // Standard Error: 526_000 - .saturating_add((338_332_000 as Weight).saturating_mul(r as Weight)) + (188_884_000 as Weight) + // Standard Error: 761_000 + .saturating_add((432_781_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - (569_270_000 as Weight) - // Standard Error: 294_000 - .saturating_add((51_071_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(85 as Weight)) - .saturating_add(RocksDbWeight::get().writes(83 as Weight)) + (532_408_000 as Weight) + // Standard Error: 3_348_000 + .saturating_add((164_943_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(55 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(53 as Weight)) + .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - (124_818_000 as Weight) - // Standard Error: 1_251_000 - .saturating_add((1_455_607_000 as Weight).saturating_mul(r as Weight)) + (127_181_000 as Weight) + // Standard Error: 1_495_000 + .saturating_add((1_500_589_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) @@ -1645,11 +1753,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 4_575_000 - .saturating_add((14_645_061_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 3_803_000 + .saturating_add((14_860_909_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1659,11 +1767,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 5_742_000 - .saturating_add((14_623_917_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 6_045_000 + .saturating_add((14_797_140_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1671,13 +1779,14 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:81 w:81) // Storage: Contracts CodeStorage (r:2 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `t` is `[0, 1]`. + /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - (9_081_635_000 as Weight) - // Standard Error: 11_326_000 - .saturating_add((1_335_139_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 4_000 - .saturating_add((9_575_000 as Weight).saturating_mul(c as Weight)) + (9_196_444_000 as Weight) + // Standard Error: 20_486_000 + .saturating_add((1_458_153_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 8_000 + .saturating_add((9_718_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(85 as Weight)) .saturating_add(RocksDbWeight::get().reads((81 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes(81 as Weight)) @@ -1687,13 +1796,13 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:80 w:80) + /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 34_958_000 - .saturating_add((20_700_850_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 36_253_000 + .saturating_add((21_201_529_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().reads((320 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) @@ -1703,15 +1812,16 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:81 w:81) // Storage: Contracts CodeStorage (r:2 w:1) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) + /// The range of component `t` is `[0, 1]`. + /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - (12_091_206_000 as Weight) - // Standard Error: 104_884_000 - .saturating_add((635_259_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 49_000 - .saturating_add((122_935_000 as Weight).saturating_mul(s as Weight)) + (12_282_498_000 as Weight) + // Standard Error: 48_112_000 + .saturating_add((720_795_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 22_000 + .saturating_add((124_274_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(167 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes(165 as Weight)) @@ -1721,11 +1831,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - (203_315_000 as Weight) - // Standard Error: 74_000 - .saturating_add((60_223_000 as Weight).saturating_mul(r as Weight)) + (203_959_000 as Weight) + // Standard Error: 142_000 + .saturating_add((61_311_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1733,11 +1843,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (355_672_000 as Weight) - // Standard Error: 25_000 - .saturating_add((319_519_000 as Weight).saturating_mul(n as Weight)) + (349_915_000 as Weight) + // Standard Error: 40_000 + .saturating_add((320_652_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1745,11 +1855,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - (203_117_000 as Weight) - // Standard Error: 94_000 - .saturating_add((77_363_000 as Weight).saturating_mul(r as Weight)) + (209_219_000 as Weight) + // Standard Error: 157_000 + .saturating_add((73_728_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1757,11 +1867,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (196_575_000 as Weight) - // Standard Error: 13_000 - .saturating_add((243_479_000 as Weight).saturating_mul(n as Weight)) + (208_860_000 as Weight) + // Standard Error: 25_000 + .saturating_add((245_718_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1769,11 +1879,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - (203_938_000 as Weight) - // Standard Error: 97_000 - .saturating_add((50_708_000 as Weight).saturating_mul(r as Weight)) + (206_165_000 as Weight) + // Standard Error: 138_000 + .saturating_add((51_644_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1781,11 +1891,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (247_065_000 as Weight) - // Standard Error: 8_000 - .saturating_add((94_160_000 as Weight).saturating_mul(n as Weight)) + (255_955_000 as Weight) + // Standard Error: 14_000 + .saturating_add((95_090_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1793,11 +1903,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - (204_389_000 as Weight) - // Standard Error: 86_000 - .saturating_add((50_663_000 as Weight).saturating_mul(r as Weight)) + (208_153_000 as Weight) + // Standard Error: 140_000 + .saturating_add((51_264_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1805,11 +1915,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (284_700_000 as Weight) - // Standard Error: 9_000 - .saturating_add((94_231_000 as Weight).saturating_mul(n as Weight)) + (278_368_000 as Weight) + // Standard Error: 14_000 + .saturating_add((95_006_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1817,11 +1927,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - (235_813_000 as Weight) - // Standard Error: 521_000 - .saturating_add((3_044_204_000 as Weight).saturating_mul(r as Weight)) + (331_955_000 as Weight) + // Standard Error: 1_155_000 + .saturating_add((3_069_955_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1829,11 +1939,11 @@ impl WeightInfo for () { // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: unknown [0x3a7472616e73616374696f6e5f6c6576656c3a] (r:1 w:1) + /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - (204_095_000 as Weight) - // Standard Error: 495_000 - .saturating_add((2_027_914_000 as Weight).saturating_mul(r as Weight)) + (207_838_000 as Weight) + // Standard Error: 783_000 + .saturating_add((2_058_503_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1842,266 +1952,318 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: Contracts OwnerInfoOf (r:16 w:16) + /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_604_000 - .saturating_add((759_511_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_567_000 + .saturating_add((774_380_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes((79 as Weight).saturating_mul(r as Weight))) } + /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - (74_210_000 as Weight) + (73_955_000 as Weight) // Standard Error: 1_000 - .saturating_add((601_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((612_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - (74_123_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_315_000 as Weight).saturating_mul(r as Weight)) + (74_057_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - (74_145_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_388_000 as Weight).saturating_mul(r as Weight)) + (74_137_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_427_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - (73_931_000 as Weight) + (73_844_000 as Weight) // Standard Error: 1_000 - .saturating_add((1_768_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_773_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - (73_829_000 as Weight) - // Standard Error: 0 - .saturating_add((1_957_000 as Weight).saturating_mul(r as Weight)) + (73_979_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_952_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - (73_760_000 as Weight) - // Standard Error: 0 - .saturating_add((932_000 as Weight).saturating_mul(r as Weight)) + (73_924_000 as Weight) + // Standard Error: 3_000 + .saturating_add((941_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - (73_714_000 as Weight) - // Standard Error: 2_000 - .saturating_add((1_420_000 as Weight).saturating_mul(r as Weight)) + (73_574_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_439_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - (73_496_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_575_000 as Weight).saturating_mul(r as Weight)) + (73_343_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_603_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - (76_036_000 as Weight) + (76_267_000 as Weight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(e as Weight)) + .saturating_add((4_000 as Weight).saturating_mul(e as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - (76_015_000 as Weight) - // Standard Error: 19_000 - .saturating_add((6_954_000 as Weight).saturating_mul(r as Weight)) + (74_877_000 as Weight) + // Standard Error: 12_000 + .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - (88_247_000 as Weight) - // Standard Error: 9_000 - .saturating_add((8_957_000 as Weight).saturating_mul(r as Weight)) + (88_665_000 as Weight) + // Standard Error: 20_000 + .saturating_add((9_142_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (98_336_000 as Weight) + (98_600_000 as Weight) // Standard Error: 2_000 - .saturating_add((474_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((469_000 as Weight).saturating_mul(p as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - (74_565_000 as Weight) - // Standard Error: 4_000 - .saturating_add((627_000 as Weight).saturating_mul(r as Weight)) + (74_555_000 as Weight) + // Standard Error: 1_000 + .saturating_add((624_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - (74_414_000 as Weight) + (74_329_000 as Weight) // Standard Error: 1_000 - .saturating_add((684_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((688_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - (74_346_000 as Weight) + (74_612_000 as Weight) // Standard Error: 1_000 - .saturating_add((911_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((909_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - (76_649_000 as Weight) - // Standard Error: 0 - .saturating_add((1_183_000 as Weight).saturating_mul(r as Weight)) + (76_906_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_192_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - (76_995_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_370_000 as Weight).saturating_mul(r as Weight)) + (76_979_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_361_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - (73_927_000 as Weight) - // Standard Error: 1_000 - .saturating_add((666_000 as Weight).saturating_mul(r as Weight)) + (74_370_000 as Weight) + // Standard Error: 3_000 + .saturating_add((661_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - (73_479_000 as Weight) - // Standard Error: 24_000 - .saturating_add((180_808_000 as Weight).saturating_mul(r as Weight)) + (73_584_000 as Weight) + // Standard Error: 353_000 + .saturating_add((187_114_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - (74_048_000 as Weight) + (74_206_000 as Weight) // Standard Error: 1_000 - .saturating_add((885_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((884_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - (73_894_000 as Weight) + (73_992_000 as Weight) // Standard Error: 1_000 - .saturating_add((889_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((893_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - (73_728_000 as Weight) - // Standard Error: 0 - .saturating_add((896_000 as Weight).saturating_mul(r as Weight)) + (73_985_000 as Weight) + // Standard Error: 2_000 + .saturating_add((891_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - (74_049_000 as Weight) - // Standard Error: 1_000 - .saturating_add((897_000 as Weight).saturating_mul(r as Weight)) + (74_117_000 as Weight) + // Standard Error: 4_000 + .saturating_add((901_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - (74_118_000 as Weight) + (73_981_000 as Weight) // Standard Error: 1_000 - .saturating_add((863_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((866_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - (74_042_000 as Weight) - // Standard Error: 1_000 - .saturating_add((865_000 as Weight).saturating_mul(r as Weight)) + (74_104_000 as Weight) + // Standard Error: 3_000 + .saturating_add((868_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - (73_885_000 as Weight) - // Standard Error: 1_000 - .saturating_add((884_000 as Weight).saturating_mul(r as Weight)) + (74_293_000 as Weight) + // Standard Error: 3_000 + .saturating_add((878_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - (73_788_000 as Weight) - // Standard Error: 0 - .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) + (74_055_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - (73_727_000 as Weight) - // Standard Error: 0 - .saturating_add((1_352_000 as Weight).saturating_mul(r as Weight)) + (73_710_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - (73_825_000 as Weight) - // Standard Error: 0 - .saturating_add((1_351_000 as Weight).saturating_mul(r as Weight)) + (73_917_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - (73_638_000 as Weight) - // Standard Error: 0 - .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) + (74_048_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - (73_688_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_357_000 as Weight).saturating_mul(r as Weight)) + (74_029_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_349_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - (73_895_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_359_000 as Weight).saturating_mul(r as Weight)) + (74_267_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - (73_860_000 as Weight) - // Standard Error: 0 + (73_952_000 as Weight) + // Standard Error: 1_000 .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - (73_864_000 as Weight) - // Standard Error: 0 - .saturating_add((1_361_000 as Weight).saturating_mul(r as Weight)) + (73_851_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_368_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - (72_730_000 as Weight) - // Standard Error: 7_000 - .saturating_add((1_432_000 as Weight).saturating_mul(r as Weight)) + (74_034_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_348_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - (73_998_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_346_000 as Weight).saturating_mul(r as Weight)) + (73_979_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - (73_708_000 as Weight) + (74_000_000 as Weight) // Standard Error: 1_000 - .saturating_add((1_333_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_328_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - (74_312_000 as Weight) - // Standard Error: 2_000 - .saturating_add((1_339_000 as Weight).saturating_mul(r as Weight)) + (73_883_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_331_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - (74_203_000 as Weight) - // Standard Error: 2_000 - .saturating_add((1_322_000 as Weight).saturating_mul(r as Weight)) + (74_216_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - (73_990_000 as Weight) + (73_989_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_010_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_998_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - (73_918_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_019_000 as Weight).saturating_mul(r as Weight)) + (73_857_000 as Weight) + // Standard Error: 4_000 + .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - (73_927_000 as Weight) + (73_801_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_001_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_027_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - (73_691_000 as Weight) - // Standard Error: 0 - .saturating_add((2_062_000 as Weight).saturating_mul(r as Weight)) + (74_130_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_064_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - (73_869_000 as Weight) - // Standard Error: 0 + (74_071_000 as Weight) + // Standard Error: 1_000 .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - (73_890_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) + (74_201_000 as Weight) + // Standard Error: 4_000 + .saturating_add((1_330_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - (73_866_000 as Weight) + (74_241_000 as Weight) // Standard Error: 1_000 - .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_321_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - (73_793_000 as Weight) - // Standard Error: 0 - .saturating_add((1_352_000 as Weight).saturating_mul(r as Weight)) + (74_331_000 as Weight) + // Standard Error: 6_000 + .saturating_add((1_347_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - (73_695_000 as Weight) - // Standard Error: 0 - .saturating_add((1_354_000 as Weight).saturating_mul(r as Weight)) + (73_674_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_359_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - (73_743_000 as Weight) - // Standard Error: 0 - .saturating_add((1_352_000 as Weight).saturating_mul(r as Weight)) + (73_807_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - (73_781_000 as Weight) - // Standard Error: 0 - .saturating_add((1_352_000 as Weight).saturating_mul(r as Weight)) + (73_725_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) } + /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - (73_941_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_348_000 as Weight).saturating_mul(r as Weight)) + (73_755_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } } From 16bae92b3f257f9c15525ae3d0a23d036bd59715 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 23 Jun 2022 17:54:15 +0300 Subject: [PATCH 014/135] Explain why `rusty-cachier` is put first (#11740) --- scripts/ci/gitlab/pipeline/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 0c3fd4d33798d..cc167410f94a4 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -80,6 +80,7 @@ cargo-check-benches: - .test-refs - .collect-artifacts before_script: + # perform rusty-cachier operations before any further modifications to the git repo to make cargo feel cheated not so much - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] # merges in the master branch on PRs From 60b945caca440fdf6a7be96b7dc0db54923f039d Mon Sep 17 00:00:00 2001 From: Koute Date: Fri, 24 Jun 2022 20:25:21 +0900 Subject: [PATCH 015/135] Bump `wasmtime` to 0.38.0 and `zstd` to 0.11.2 (#11720) * Bump `wasmtime` to 0.37.0 and `zstd` to 0.11.2 * Bump `wasmtime` to 0.38.0 --- Cargo.lock | 180 +++++++++++--------- client/executor/wasmtime/Cargo.toml | 2 +- client/executor/wasmtime/src/imports.rs | 12 +- client/executor/wasmtime/src/runtime.rs | 1 - frame/state-trie-migration/Cargo.toml | 2 +- primitives/maybe-compressed-blob/Cargo.toml | 2 +- primitives/runtime/Cargo.toml | 2 +- primitives/wasm-interface/Cargo.toml | 2 +- utils/frame/try-runtime/cli/Cargo.toml | 2 +- 9 files changed, 109 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cfcf5573c0bff..58604b47a87da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1141,11 +1141,11 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.82.3" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38faa2a16616c8e78a18d37b4726b98bfd2de192f2fdc8a39ddf568a408a0f75" +checksum = "899dc8d22f7771e7f887fb8bafa0c0d3ac1dea0c7f2c0ded6e20a855a7a1e890" dependencies = [ - "cranelift-entity 0.82.3", + "cranelift-entity 0.85.0", ] [[package]] @@ -1160,24 +1160,25 @@ dependencies = [ "cranelift-entity 0.76.0", "gimli 0.25.0", "log", - "regalloc 0.0.31", + "regalloc", "smallvec", "target-lexicon", ] [[package]] name = "cranelift-codegen" -version = "0.82.3" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f192472a3ba23860afd07d2b0217dc628f21fcc72617aa1336d98e1671f33b" +checksum = "8dbdc03f695cf67e7bc45da57155528274f47390b85060af8107eb304ef167c4" dependencies = [ - "cranelift-bforest 0.82.3", - "cranelift-codegen-meta 0.82.3", - "cranelift-codegen-shared 0.82.3", - "cranelift-entity 0.82.3", + "cranelift-bforest 0.85.0", + "cranelift-codegen-meta 0.85.0", + "cranelift-codegen-shared 0.85.0", + "cranelift-entity 0.85.0", + "cranelift-isle", "gimli 0.26.1", "log", - "regalloc 0.0.34", + "regalloc2", "smallvec", "target-lexicon", ] @@ -1194,11 +1195,11 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.82.3" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32ddb89e9b89d3d9b36a5b7d7ea3261c98235a76ac95ba46826b8ec40b1a24" +checksum = "9ea66cbba3eb7fcb3ec9f42839a6d381bd40cf97780397e7167daf9725d4ffa0" dependencies = [ - "cranelift-codegen-shared 0.82.3", + "cranelift-codegen-shared 0.85.0", ] [[package]] @@ -1209,9 +1210,9 @@ checksum = "9dabb5fe66e04d4652e434195b45ae65b5c8172d520247b8f66d8df42b2b45dc" [[package]] name = "cranelift-codegen-shared" -version = "0.82.3" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01fd0d9f288cc1b42d9333b7a776b17e278fc888c28e6a0f09b5573d45a150bc" +checksum = "712fbebd119a476f59122b4ba51fdce893a66309b5c92bd5506bfb11a0587496" [[package]] name = "cranelift-entity" @@ -1221,9 +1222,9 @@ checksum = "3329733e4d4b8e91c809efcaa4faee80bf66f20164e3dd16d707346bd3494799" [[package]] name = "cranelift-entity" -version = "0.82.3" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3bfe172b83167604601faf9dc60453e0d0a93415b57a9c4d1a7ae6849185cf" +checksum = "4cb8b95859c4e14c9e860db78d596a904fdbe9261990233b62bd526346cb56cb" dependencies = [ "serde", ] @@ -1242,40 +1243,46 @@ dependencies = [ [[package]] name = "cranelift-frontend" -version = "0.82.3" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a006e3e32d80ce0e4ba7f1f9ddf66066d052a8c884a110b91d05404d6ce26dce" +checksum = "c7b91b19a7d1221a73f190c0e865c12be77a84f661cac89abfd4ab5820142886" dependencies = [ - "cranelift-codegen 0.82.3", + "cranelift-codegen 0.85.0", "log", "smallvec", "target-lexicon", ] +[[package]] +name = "cranelift-isle" +version = "0.85.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86d4f53bc86fb458e59c695c6a95ce8346e6a8377ee7ffc058e3ac08b5f94cb1" + [[package]] name = "cranelift-native" -version = "0.82.3" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501241b0cdf903412ec9075385ac9f2b1eb18a89044d1538e97fab603231f70c" +checksum = "592f035d0ed41214dfeeb37abd536233536a27be6b4c2d39f380cd402f0cff4f" dependencies = [ - "cranelift-codegen 0.82.3", + "cranelift-codegen 0.85.0", "libc", "target-lexicon", ] [[package]] name = "cranelift-wasm" -version = "0.82.3" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d9e4211bbc3268042a96dd4de5bd979cda22434991d035f5f8eacba987fad2" +checksum = "295add6bf0b527a8bc50d02e31ff878585d2d2db53cb7e8754d6d82b84480086" dependencies = [ - "cranelift-codegen 0.82.3", - "cranelift-entity 0.82.3", - "cranelift-frontend 0.82.3", + "cranelift-codegen 0.85.0", + "cranelift-entity 0.85.0", + "cranelift-frontend 0.85.0", "itertools", "log", "smallvec", - "wasmparser 0.83.0", + "wasmparser 0.85.0", "wasmtime-types", ] @@ -2572,6 +2579,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "gcc" version = "0.3.55" @@ -5118,8 +5134,6 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" dependencies = [ - "crc32fast", - "indexmap", "memchr", ] @@ -5137,9 +5151,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" +checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" [[package]] name = "oorandom" @@ -7591,13 +7605,14 @@ dependencies = [ ] [[package]] -name = "regalloc" -version = "0.0.34" +name = "regalloc2" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62446b1d3ebf980bdc68837700af1d77b37bc430e524bf95319c6eada2a4cc02" +checksum = "0d37148700dbb38f994cd99a1431613057f37ed934d7e4d799b7ab758c482461" dependencies = [ + "fxhash", "log", - "rustc-hash", + "slice-group-by", "smallvec", ] @@ -7829,9 +7844,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.33.5" +version = "0.33.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03627528abcc4a365554d32a9f3bbf67f7694c102cfeda792dc86a2d6057cc85" +checksum = "938a344304321a9da4973b9ff4f9f8db9caf4597dfd9dda6a60b523340a0fff0" dependencies = [ "bitflags", "errno", @@ -9539,6 +9554,12 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +[[package]] +name = "slice-group-by" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec" + [[package]] name = "smallvec" version = "1.8.0" @@ -11901,15 +11922,18 @@ checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65" [[package]] name = "wasmparser" -version = "0.83.0" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718ed7c55c2add6548cca3ddd6383d738cd73b892df400e96b9aa876f0141d7a" +checksum = "570460c58b21e9150d2df0eaaedbb7816c34bcec009ae0dcc976e40ba81463e7" +dependencies = [ + "indexmap", +] [[package]] name = "wasmtime" -version = "0.35.3" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21ffb4705016d5ca91e18a72ed6822dab50e6d5ddd7045461b17ef19071cdef1" +checksum = "c842f9c8e190fe01300fc8d715e9368c775670fb9856247c67abffdb5236d6db" dependencies = [ "anyhow", "backtrace", @@ -11919,7 +11943,7 @@ dependencies = [ "lazy_static", "libc", "log", - "object 0.27.1", + "object 0.28.3", "once_cell", "paste 1.0.6", "psm", @@ -11927,7 +11951,7 @@ dependencies = [ "region 2.2.0", "serde", "target-lexicon", - "wasmparser 0.83.0", + "wasmparser 0.85.0", "wasmtime-cache", "wasmtime-cranelift", "wasmtime-environ", @@ -11938,9 +11962,9 @@ dependencies = [ [[package]] name = "wasmtime-cache" -version = "0.35.3" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c6ab24291fa7cb3a181f5669f6c72599b7ef781669759b45c7828c5999d0c0" +checksum = "cce2aa752e864a33eef2a6629edc59554e75f0bc1719431dac5e49eed516af69" dependencies = [ "anyhow", "base64", @@ -11958,51 +11982,51 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "0.35.3" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04c810078a491b7bc4866ebe045f714d2b95e6b539e1f64009a4a7606be11de" +checksum = "922361eb8c03cea8909bc922471202f6c6bc2f0c682fac2fe473740441c86b3b" dependencies = [ "anyhow", - "cranelift-codegen 0.82.3", - "cranelift-entity 0.82.3", - "cranelift-frontend 0.82.3", + "cranelift-codegen 0.85.0", + "cranelift-entity 0.85.0", + "cranelift-frontend 0.85.0", "cranelift-native", "cranelift-wasm", "gimli 0.26.1", "log", "more-asserts", - "object 0.27.1", + "object 0.28.3", "target-lexicon", "thiserror", - "wasmparser 0.83.0", + "wasmparser 0.85.0", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.35.3" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61448266ea164b1ac406363cdcfac81c7c44db4d94c7a81c8620ac6c5c6cdf59" +checksum = "e602f1120fc40a3f016f1f69d08c86cfeff7b867bed1462901953e6871f85167" dependencies = [ "anyhow", - "cranelift-entity 0.82.3", + "cranelift-entity 0.85.0", "gimli 0.26.1", "indexmap", "log", "more-asserts", - "object 0.27.1", + "object 0.28.3", "serde", "target-lexicon", "thiserror", - "wasmparser 0.83.0", + "wasmparser 0.85.0", "wasmtime-types", ] [[package]] name = "wasmtime-jit" -version = "0.35.3" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "156b4623c6b0d4b8c24afb846c20525922f538ef464cc024abab7ea8de2109a2" +checksum = "49af1445759a8e797a92f27dd0983c155615648263052e0b80d69e7d223896b7" dependencies = [ "addr2line", "anyhow", @@ -12011,7 +12035,7 @@ dependencies = [ "cpp_demangle", "gimli 0.26.1", "log", - "object 0.27.1", + "object 0.28.3", "region 2.2.0", "rustc-demangle", "rustix", @@ -12026,20 +12050,20 @@ dependencies = [ [[package]] name = "wasmtime-jit-debug" -version = "0.35.3" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5dc31f811760a6c76b2672c404866fd19b75e5fb3b0075a3e377a6846490654" +checksum = "e6d5dd480cc6dc0a401653e45b79796a3317f8228990d84bc2271bdaf0810071" dependencies = [ "lazy_static", - "object 0.27.1", + "object 0.28.3", "rustix", ] [[package]] name = "wasmtime-runtime" -version = "0.35.3" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f907beaff69d4d920fa4688411ee4cc75c0f01859e424677f9e426e2ef749864" +checksum = "e875bcd02d1ecfc7d099dd58354d55d73467652eb2b103ff470fe3aecb7d0381" dependencies = [ "anyhow", "backtrace", @@ -12063,14 +12087,14 @@ dependencies = [ [[package]] name = "wasmtime-types" -version = "0.35.3" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514ef0e5fd197b9609dc9eb74beba0c84d5a12b2417cbae55534633329ba4852" +checksum = "8fd63a19ba61ac7448add4dc1fecb8d78304812af2a52dad04b89f887791b156" dependencies = [ - "cranelift-entity 0.82.3", + "cranelift-entity 0.85.0", "serde", "thiserror", - "wasmparser 0.83.0", + "wasmparser 0.85.0", ] [[package]] @@ -12328,18 +12352,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.10.0+zstd.1.5.2" +version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b1365becbe415f3f0fcd024e2f7b45bacfb5bdd055f0dc113571394114e7bdd" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "4.1.4+zstd.1.5.2" +version = "5.0.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f7cd17c9af1a4d6c24beb1cc54b17e2ef7b593dc92f19e9d9acad8b182bbaee" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" dependencies = [ "libc", "zstd-sys", @@ -12347,9 +12371,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.6.3+zstd.1.5.2" +version = "2.0.1+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc49afa5c8d634e75761feda8c592051e7eeb4683ba827211eb0d731d3402ea8" +checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" dependencies = [ "cc", "libc", diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 83859ee61be34..9d40fb53fd559 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } libc = "0.2.121" log = "0.4.17" parity-wasm = "0.42.0" -wasmtime = { version = "0.35.3", default-features = false, features = [ +wasmtime = { version = "0.38.0", default-features = false, features = [ "cache", "cranelift", "jitdump", diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index 4aad571029313..fae8d75854ef3 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -34,7 +34,7 @@ where { let mut pending_func_imports = HashMap::new(); for import_ty in module.imports() { - let name = import_name(&import_ty)?; + let name = import_ty.name(); if import_ty.module() != "env" { return Err(WasmError::Other(format!( @@ -121,13 +121,3 @@ impl<'a, 'b> sp_wasm_interface::HostFunctionRegistry for Registry<'a, 'b> { Ok(()) } } - -/// When the module linking proposal is supported the import's name can be `None`. -/// Because we are not using this proposal we could safely unwrap the name. -/// However, we opt for an error in order to avoid panics at all costs. -fn import_name<'a, 'b: 'a>(import: &'a ImportType<'b>) -> Result<&'a str, WasmError> { - let name = import.name().ok_or_else(|| { - WasmError::Other("The module linking proposal is not supported.".to_owned()) - })?; - Ok(name) -} diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 8f6826653f27d..2184dd7466a53 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -323,7 +323,6 @@ fn common_config(semantics: &Semantics) -> std::result::Result Date: Fri, 24 Jun 2022 18:21:07 -0400 Subject: [PATCH 016/135] Avoid a duplicate block request when syncing from a fork (#11094) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Separate queueing blocks for import from removal * Add regression tests * Remove unnecessary log * Clear queued blocks when processed * Move check out of match block * Track queued block ranges * Update client/network/sync/src/blocks.rs * Update client/network/sync/src/blocks.rs * Update client/network/sync/src/blocks.rs * Update client/network/sync/src/blocks.rs * FMT Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- client/consensus/common/src/import_queue.rs | 10 + client/network/sync/src/blocks.rs | 114 ++++++++--- client/network/sync/src/lib.rs | 200 ++++++++++++++++++-- 3 files changed, 281 insertions(+), 43 deletions(-) diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index a7b456191b000..c71e21ccd4b00 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -160,6 +160,16 @@ pub enum BlockImportStatus { ImportedUnknown(N, ImportedAux, Option), } +impl BlockImportStatus { + /// Returns the imported block number. + pub fn number(&self) -> &N { + match self { + BlockImportStatus::ImportedKnown(n, _) | + BlockImportStatus::ImportedUnknown(n, _, _) => n, + } + } +} + /// Block import error. #[derive(Debug, thiserror::Error)] pub enum BlockImportError { diff --git a/client/network/sync/src/blocks.rs b/client/network/sync/src/blocks.rs index b897184e7a44c..2cca13bfd7cc7 100644 --- a/client/network/sync/src/blocks.rs +++ b/client/network/sync/src/blocks.rs @@ -18,7 +18,7 @@ use crate::message; use libp2p::PeerId; -use log::trace; +use log::{debug, trace}; use sp_runtime::traits::{Block as BlockT, NumberFor, One}; use std::{ cmp, @@ -39,6 +39,7 @@ pub struct BlockData { enum BlockRangeState { Downloading { len: NumberFor, downloading: u32 }, Complete(Vec>), + Queued { len: NumberFor }, } impl BlockRangeState { @@ -46,6 +47,7 @@ impl BlockRangeState { match *self { Self::Downloading { len, .. } => len, Self::Complete(ref blocks) => (blocks.len() as u32).into(), + Self::Queued { len } => len, } } } @@ -56,12 +58,19 @@ pub struct BlockCollection { /// Downloaded blocks. blocks: BTreeMap, BlockRangeState>, peer_requests: HashMap>, + /// Block ranges downloaded and queued for import. + /// Maps start_hash => (start_num, end_num). + queued_blocks: HashMap, NumberFor)>, } impl BlockCollection { /// Create a new instance. pub fn new() -> Self { - Self { blocks: BTreeMap::new(), peer_requests: HashMap::new() } + Self { + blocks: BTreeMap::new(), + peer_requests: HashMap::new(), + queued_blocks: HashMap::new(), + } } /// Clear everything. @@ -170,29 +179,52 @@ impl BlockCollection { } /// Get a valid chain of blocks ordered in descending order and ready for importing into - /// blockchain. - pub fn drain(&mut self, from: NumberFor) -> Vec> { - let mut drained = Vec::new(); - let mut ranges = Vec::new(); + /// the blockchain. + pub fn ready_blocks(&mut self, from: NumberFor) -> Vec> { + let mut ready = Vec::new(); let mut prev = from; - for (start, range_data) in &mut self.blocks { - match range_data { - BlockRangeState::Complete(blocks) if *start <= prev => { - prev = *start + (blocks.len() as u32).into(); - // Remove all elements from `blocks` and add them to `drained` - drained.append(blocks); - ranges.push(*start); + for (&start, range_data) in &mut self.blocks { + if start > prev { + break + } + let len = match range_data { + BlockRangeState::Complete(blocks) => { + let len = (blocks.len() as u32).into(); + prev = start + len; + // Remove all elements from `blocks` and add them to `ready` + ready.append(blocks); + len }, + BlockRangeState::Queued { .. } => continue, _ => break, - } + }; + *range_data = BlockRangeState::Queued { len }; } - for r in ranges { - self.blocks.remove(&r); + if let Some(BlockData { block, .. }) = ready.first() { + self.queued_blocks + .insert(block.hash, (from, from + (ready.len() as u32).into())); + } + + trace!(target: "sync", "{} blocks ready for import", ready.len()); + ready + } + + pub fn clear_queued(&mut self, from_hash: &B::Hash) { + match self.queued_blocks.remove(from_hash) { + None => { + debug!(target: "sync", "Can't clear unknown queued blocks from {:?}", from_hash); + }, + Some((from, to)) => { + let mut block_num = from; + while block_num < to { + self.blocks.remove(&block_num); + block_num += One::one(); + } + trace!(target: "sync", "Cleared blocks from {:?} to {:?}", from, to); + }, } - trace!(target: "sync", "Drained {} blocks from {:?}", drained.len(), from); - drained } pub fn clear_peer_download(&mut self, who: &PeerId) { @@ -268,14 +300,14 @@ mod test { bc.clear_peer_download(&peer1); bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); - assert_eq!(bc.drain(1), vec![]); + assert_eq!(bc.ready_blocks(1), vec![]); assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121..151)); bc.clear_peer_download(&peer0); bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11..41)); assert_eq!( - bc.drain(1), + bc.ready_blocks(1), blocks[1..11] .iter() .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) @@ -285,16 +317,16 @@ mod test { bc.clear_peer_download(&peer0); bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); - let drained = bc.drain(12); + let ready = bc.ready_blocks(12); assert_eq!( - drained[..30], + ready[..30], blocks[11..41] .iter() .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) .collect::>()[..] ); assert_eq!( - drained[30..], + ready[30..], blocks[41..81] .iter() .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) @@ -308,17 +340,17 @@ mod test { bc.clear_peer_download(&peer1); bc.insert(121, blocks[121..150].to_vec(), peer1.clone()); - assert_eq!(bc.drain(80), vec![]); - let drained = bc.drain(81); + assert_eq!(bc.ready_blocks(80), vec![]); + let ready = bc.ready_blocks(81); assert_eq!( - drained[..40], + ready[..40], blocks[81..121] .iter() .map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }) .collect::>()[..] ); assert_eq!( - drained[40..], + ready[40..], blocks[121..150] .iter() .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) @@ -344,4 +376,32 @@ mod test { Some(100 + 128..100 + 128 + 128) ); } + + #[test] + fn no_duplicate_requests_on_fork() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + let peer = PeerId::random(); + + let blocks = generate_blocks(10); + + // count = 5, peer_best = 50, common = 39, max_parallel = 0, max_ahead = 200 + assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(40..45)); + + // got a response on the request for `40..45` + bc.clear_peer_download(&peer); + bc.insert(40, blocks[..5].to_vec(), peer.clone()); + + // our "node" started on a fork, with its current best = 47, which is > common + let ready = bc.ready_blocks(48); + assert_eq!( + ready, + blocks[..5] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer.clone()) }) + .collect::>() + ); + + assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(45..50)); + } } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index b027d77827374..4465e840c5ef5 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -1098,7 +1098,7 @@ where { self.blocks.insert(start_block, blocks, *who); } - self.drain_blocks() + self.ready_blocks() }, PeerSyncState::DownloadingGap(_) => { peer.state = PeerSyncState::Available; @@ -1112,7 +1112,7 @@ where gap = true; let blocks: Vec<_> = gap_sync .blocks - .drain(gap_sync.best_queued_number + One::one()) + .ready_blocks(gap_sync.best_queued_number + One::one()) .into_iter() .map(|block_data| { let justifications = @@ -1434,6 +1434,12 @@ where OnBlockData::Import(origin, new_blocks) } + fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.update_common_number(new_common); + } + } + /// Handle a response from the remote to a justification request that we made. /// /// `request` must be the original request that triggered `response`. @@ -1515,9 +1521,12 @@ where for (_, hash) in &results { self.queue_blocks.remove(hash); } + if let Some(from_hash) = results.first().map(|(_, h)| h) { + self.blocks.clear_queued(from_hash); + } for (result, hash) in results { if has_error { - continue + break } if result.is_err() { @@ -1525,11 +1534,10 @@ where } match result { - Ok(BlockImportStatus::ImportedKnown(number, who)) => { - if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { - peer.update_common_number(number); - } - }, + Ok(BlockImportStatus::ImportedKnown(number, who)) => + if let Some(peer) = who { + self.update_peer_common_number(&peer, number); + }, Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { if aux.clear_justification_requests { trace!( @@ -1558,8 +1566,8 @@ where } } - if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { - peer.update_common_number(number); + if let Some(peer) = who { + self.update_peer_common_number(&peer, number); } let state_sync_complete = self.state_sync.as_ref().map_or(false, |s| s.target() == hash); @@ -1993,11 +2001,11 @@ where // is either one further ahead or it's the one they just announced, if we know about it. if is_best { if known && self.best_queued_number >= number { - peer.update_common_number(number); + self.update_peer_common_number(&who, number); } else if announce.header.parent_hash() == &self.best_queued_hash || known_parent && self.best_queued_number >= number { - peer.update_common_number(number - One::one()); + self.update_peer_common_number(&who, number - One::one()); } } self.allowed_requests.add(&who); @@ -2071,7 +2079,7 @@ where target.peers.remove(who); !target.peers.is_empty() }); - let blocks = self.drain_blocks(); + let blocks = self.ready_blocks(); if !blocks.is_empty() { Some(self.validate_and_queue_blocks(blocks, false)) } else { @@ -2191,10 +2199,10 @@ where } } - /// Drain the downloaded block set up to the first gap. - fn drain_blocks(&mut self) -> Vec> { + /// Get the set of downloaded blocks that are ready to be queued for import. + fn ready_blocks(&mut self) -> Vec> { self.blocks - .drain(self.best_queued_number + One::one()) + .ready_blocks(self.best_queued_number + One::one()) .into_iter() .map(|block_data| { let justifications = block_data @@ -2981,6 +2989,25 @@ mod test { best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + let _ = sync.on_blocks_processed( + MAX_BLOCKS_TO_REQUEST as usize, + MAX_BLOCKS_TO_REQUEST as usize, + resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + b.header().number().clone(), + Default::default(), + Some(peer_id1.clone()), + )), + b.hash(), + ) + }) + .collect(), + ); + resp_blocks .into_iter() .rev() @@ -3165,6 +3192,147 @@ mod test { ); } + #[test] + fn syncs_fork_without_duplicate_requests() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) + .map(|_| build_block(&mut client, None, false)) + .collect::>(); + + let fork_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] + .into_iter() + .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) + .cloned() + .collect::>(); + + fork_blocks + .into_iter() + .chain( + (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) + .map(|_| build_block(&mut client, None, true)), + ) + .collect::>() + }; + + let info = client.info(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + Box::new(DefaultBlockAnnounceValidator), + 5, + None, + ) + .unwrap(); + + let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); + let just = (*b"TEST", Vec::new()); + client + .finalize_block(BlockId::Hash(finalized_block.hash()), Some(just)) + .unwrap(); + sync.update_chain_info(&info.best_hash, info.best_number); + + let peer_id1 = PeerId::random(); + + let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) + .unwrap(); + + send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); + + let mut request = + get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + + // Do the ancestor search + loop { + let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; + let response = create_block_response(vec![block.clone()]); + + let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + request = match on_block_data.into_request() { + Some(req) => req.1, + // We found the ancenstor + None => break, + }; + + log::trace!(target: "sync", "Request: {:?}", request); + } + + // Now request and import the fork. + let mut best_block_num = finalized_block.header().number().clone() as u32; + let mut request = get_block_request( + &mut sync, + FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), + MAX_BLOCKS_TO_REQUEST as u32, + &peer_id1, + ); + let last_block_num = *fork_blocks.last().unwrap().header().number() as u32 - 1; + while best_block_num < last_block_num { + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + let res = sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + ),); + + best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + + if best_block_num < last_block_num { + // make sure we're not getting a duplicate request in the time before the blocks are + // processed + request = get_block_request( + &mut sync, + FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), + MAX_BLOCKS_TO_REQUEST as u32, + &peer_id1, + ); + } + + let _ = sync.on_blocks_processed( + MAX_BLOCKS_TO_REQUEST as usize, + MAX_BLOCKS_TO_REQUEST as usize, + resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + b.header().number().clone(), + Default::default(), + Some(peer_id1.clone()), + )), + b.hash(), + ) + }) + .collect(), + ); + + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); + } + + // Request the tip + get_block_request( + &mut sync, + FromBlock::Hash(fork_blocks.last().unwrap().hash()), + 1, + &peer_id1, + ); + } + #[test] fn removes_target_fork_on_disconnect() { sp_tracing::try_init_simple(); From c28702f3f2af48b3f04374d41d3f91add33e4e45 Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Mon, 27 Jun 2022 16:33:58 +0200 Subject: [PATCH 017/135] Democracy.fast_track not allowed with zero voting period (#11666) * Democracy.fast_track not allowed with zero voting period * revert static parameter alter line * ensure voting period greater zero * update doc for fast_track * unit test: instant fast track to the next block referendum is backed * fix typos in comments --- frame/democracy/src/lib.rs | 9 ++-- frame/democracy/src/tests/fast_tracking.rs | 58 ++++++++++++++++++++++ 2 files changed, 64 insertions(+), 3 deletions(-) diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 4e11c207657f0..443b8579116d0 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -600,6 +600,8 @@ pub mod pallet { MaxVotesReached, /// Maximum number of proposals reached. TooManyProposals, + /// Voting period too low + VotingPeriodLow, } #[pallet::hooks] @@ -800,8 +802,9 @@ pub mod pallet { /// The dispatch of this call must be `FastTrackOrigin`. /// /// - `proposal_hash`: The hash of the current external proposal. - /// - `voting_period`: The period that is allowed for voting on this proposal. Increased to - /// `FastTrackVotingPeriod` if too low. + /// - `voting_period`: The period that is allowed for voting on this proposal. + /// Must be always greater than zero. + /// For `FastTrackOrigin` must be equal or greater than `FastTrackVotingPeriod`. /// - `delay`: The number of block after voting has ended in approval and this should be /// enacted. This doesn't have a minimum amount. /// @@ -830,7 +833,7 @@ pub mod pallet { T::InstantOrigin::ensure_origin(ensure_instant)?; ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); } - + ensure!(voting_period > T::BlockNumber::zero(), Error::::VotingPeriodLow); let (e_proposal_hash, threshold) = >::get().ok_or(Error::::ProposalMissing)?; ensure!( diff --git a/frame/democracy/src/tests/fast_tracking.rs b/frame/democracy/src/tests/fast_tracking.rs index 7a15bb8de4dac..caf83c6d46120 100644 --- a/frame/democracy/src/tests/fast_tracking.rs +++ b/frame/democracy/src/tests/fast_tracking.rs @@ -67,6 +67,10 @@ fn instant_referendum_works() { Error::::InstantNotAllowed ); INSTANT_ALLOWED.with(|v| *v.borrow_mut() = true); + assert_noop!( + Democracy::fast_track(Origin::signed(6), h, 0, 0), + Error::::VotingPeriodLow + ); assert_ok!(Democracy::fast_track(Origin::signed(6), h, 1, 0)); assert_eq!( Democracy::referendum_status(0), @@ -81,6 +85,60 @@ fn instant_referendum_works() { }); } +#[test] +fn instant_next_block_referendum_backed() { + new_test_ext().execute_with(|| { + // arrange + let start_block_number = 10; + let majority_origin_id = 3; + let instant_origin_id = 6; + let voting_period = 1; + let proposal_hash = set_balance_proposal_hash_and_note(2); + let delay = 2; // has no effect on test + + // init + System::set_block_number(start_block_number); + InstantAllowed::set(true); + + // propose with majority origin + assert_ok!(Democracy::external_propose_majority( + Origin::signed(majority_origin_id), + proposal_hash + )); + + // fast track with instant origin and voting period pointing to the next block + assert_ok!(Democracy::fast_track( + Origin::signed(instant_origin_id), + proposal_hash, + voting_period, + delay + )); + + // fetch the status of the only referendum at index 0 + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: start_block_number + voting_period, + proposal_hash, + threshold: VoteThreshold::SimpleMajority, + delay, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + + // referendum expected to be baked with the start of the next block + next_block(); + + // assert no active referendums + assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); + // the only referendum in the storage is finished and not approved + assert_eq!( + ReferendumInfoOf::::get(0).unwrap(), + ReferendumInfo::Finished { approved: false, end: start_block_number + voting_period } + ); + }); +} + #[test] fn fast_track_referendum_fails_when_no_simple_majority() { new_test_ext().execute_with(|| { From ee3eb8f2448cc1bb978c5d1564febd351c128bb0 Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 28 Jun 2022 15:54:56 +0900 Subject: [PATCH 018/135] Prevent unsoundness in environments with broken `madvise(MADV_DONTNEED)` (#11722) * Prevend unsoundness in environments with broken `madvise(MADV_DONTNEED)` * Add the `std` feature to `rustix` dependency Apparently not having this breaks compilation on non-nightly toolchains. * Autodetect the page size when checking whether `madvise` works * Only make sure that the madvice check doesn't return `Err` --- Cargo.lock | 89 ++++++++++++++++++++--- client/executor/wasmtime/Cargo.toml | 4 ++ client/executor/wasmtime/src/runtime.rs | 15 ++-- client/executor/wasmtime/src/util.rs | 93 ++++++++++++++++++++++++- 4 files changed, 187 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 58604b47a87da..1bf1034005284 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3113,6 +3113,12 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec58677acfea8a15352d42fc87d11d63596ade9239e0a7c9352914417515dbe6" +[[package]] +name = "io-lifetimes" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24c3f4eff5495aee4c0399d7b6a0dc2b6e81be84242ffbfcf253ebacccc1d0cb" + [[package]] name = "ip_network" version = "0.4.1" @@ -3411,9 +3417,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.121" +version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "libgit2-sys" @@ -4108,6 +4114,12 @@ version = "0.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5284f00d480e1c39af34e72f8ad60b94f47007e3481cd3b731c1d67190ddc7b7" +[[package]] +name = "linux-raw-sys" +version = "0.0.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" + [[package]] name = "lite-json" version = "0.1.3" @@ -6712,7 +6724,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-sys", + "windows-sys 0.32.0", ] [[package]] @@ -7850,12 +7862,26 @@ checksum = "938a344304321a9da4973b9ff4f9f8db9caf4597dfd9dda6a60b523340a0fff0" dependencies = [ "bitflags", "errno", - "io-lifetimes", + "io-lifetimes 0.5.3", "libc", - "linux-raw-sys", + "linux-raw-sys 0.0.42", "winapi", ] +[[package]] +name = "rustix" +version = "0.35.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef258c11e17f5c01979a10543a30a4e12faef6aab217a74266e747eefa3aed88" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes 0.7.2", + "libc", + "linux-raw-sys 0.0.46", + "windows-sys 0.36.1", +] + [[package]] name = "rustls" version = "0.20.2" @@ -8479,9 +8505,11 @@ dependencies = [ "cfg-if 1.0.0", "libc", "log", + "once_cell", "parity-scale-codec", "parity-wasm 0.42.2", "paste 1.0.6", + "rustix 0.35.6", "sc-allocator", "sc-executor-common", "sc-runtime-test", @@ -11972,7 +12000,7 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix", + "rustix 0.33.7", "serde", "sha2 0.9.8", "toml", @@ -12038,7 +12066,7 @@ dependencies = [ "object 0.28.3", "region 2.2.0", "rustc-demangle", - "rustix", + "rustix 0.33.7", "serde", "target-lexicon", "thiserror", @@ -12056,7 +12084,7 @@ checksum = "e6d5dd480cc6dc0a401653e45b79796a3317f8228990d84bc2271bdaf0810071" dependencies = [ "lazy_static", "object 0.28.3", - "rustix", + "rustix 0.33.7", ] [[package]] @@ -12078,7 +12106,7 @@ dependencies = [ "more-asserts", "rand 0.8.4", "region 2.2.0", - "rustix", + "rustix 0.33.7", "thiserror", "wasmtime-environ", "wasmtime-jit-debug", @@ -12226,6 +12254,19 @@ dependencies = [ "windows_x86_64_msvc 0.32.0", ] +[[package]] +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", +] + [[package]] name = "windows_aarch64_msvc" version = "0.29.0" @@ -12238,6 +12279,12 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + [[package]] name = "windows_i686_gnu" version = "0.29.0" @@ -12250,6 +12297,12 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + [[package]] name = "windows_i686_msvc" version = "0.29.0" @@ -12262,6 +12315,12 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + [[package]] name = "windows_x86_64_gnu" version = "0.29.0" @@ -12274,6 +12333,12 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + [[package]] name = "windows_x86_64_msvc" version = "0.29.0" @@ -12286,6 +12351,12 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + [[package]] name = "winreg" version = "0.7.0" diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 9d40fb53fd559..2dcfde378bb87 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -32,6 +32,10 @@ sp-runtime-interface = { version = "6.0.0", path = "../../../primitives/runtime- sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } sp-wasm-interface = { version = "6.0.0", features = ["wasmtime"], path = "../../../primitives/wasm-interface" } +[target.'cfg(target_os = "linux")'.dependencies] +rustix = { version = "0.35.6", default-features = false, features = ["std", "mm", "fs", "param"] } +once_cell = "1.12.0" + [dev-dependencies] wat = "1.0" sc-runtime-test = { version = "2.0.0", path = "../runtime-test" } diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 2184dd7466a53..c63a802df07cc 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -21,7 +21,7 @@ use crate::{ host::HostState, instance_wrapper::{EntryPoint, InstanceWrapper}, - util, + util::{self, replace_strategy_if_broken}, }; use sc_allocator::FreeingBumpHeapAllocator; @@ -411,6 +411,7 @@ fn common_config(semantics: &Semantics) -> std::result::Result( code_supply_mode: CodeSupplyMode<'_>, - config: Config, + mut config: Config, ) -> std::result::Result where H: HostFunctions, { + replace_strategy_if_broken(&mut config.semantics.instantiation_strategy); + let mut wasmtime_config = common_config(&config.semantics)?; if let Some(ref cache_path) = config.cache_path { if let Err(reason) = setup_wasmtime_caching(cache_path, &mut wasmtime_config) { @@ -719,9 +723,12 @@ pub fn prepare_runtime_artifact( blob: RuntimeBlob, semantics: &Semantics, ) -> std::result::Result, WasmError> { - let blob = prepare_blob_for_compilation(blob, semantics)?; + let mut semantics = semantics.clone(); + replace_strategy_if_broken(&mut semantics.instantiation_strategy); + + let blob = prepare_blob_for_compilation(blob, &semantics)?; - let engine = Engine::new(&common_config(semantics)?) + let engine = Engine::new(&common_config(&semantics)?) .map_err(|e| WasmError::Other(format!("cannot create the engine: {}", e)))?; engine diff --git a/client/executor/wasmtime/src/util.rs b/client/executor/wasmtime/src/util.rs index 60ca598aee181..83745e21e86af 100644 --- a/client/executor/wasmtime/src/util.rs +++ b/client/executor/wasmtime/src/util.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::runtime::StoreData; +use crate::{runtime::StoreData, InstantiationStrategy}; use sc_executor_common::{ error::{Error, Result}, util::checked_range, @@ -98,3 +98,94 @@ pub(crate) fn write_memory_from( memory[range].copy_from_slice(data); Ok(()) } + +/// Checks whether the `madvise(MADV_DONTNEED)` works as expected. +/// +/// In certain environments (e.g. when running under the QEMU user-mode emulator) +/// this syscall is broken. +#[cfg(target_os = "linux")] +fn is_madvise_working() -> std::result::Result { + let page_size = rustix::param::page_size(); + + unsafe { + // Allocate two memory pages. + let pointer = rustix::mm::mmap_anonymous( + std::ptr::null_mut(), + 2 * page_size, + rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE, + rustix::mm::MapFlags::PRIVATE, + ) + .map_err(|error| format!("mmap failed: {}", error))?; + + // Dirty them both. + std::ptr::write_volatile(pointer.cast::(), b'A'); + std::ptr::write_volatile(pointer.cast::().add(page_size), b'B'); + + // Clear the first page. + let result_madvise = + rustix::mm::madvise(pointer, page_size, rustix::mm::Advice::LinuxDontNeed) + .map_err(|error| format!("madvise failed: {}", error)); + + // Fetch the values. + let value_1 = std::ptr::read_volatile(pointer.cast::()); + let value_2 = std::ptr::read_volatile(pointer.cast::().add(page_size)); + + let result_munmap = rustix::mm::munmap(pointer, 2 * page_size) + .map_err(|error| format!("munmap failed: {}", error)); + + result_madvise?; + result_munmap?; + + // Verify that the first page was cleared, while the second one was not. + Ok(value_1 == 0 && value_2 == b'B') + } +} + +#[cfg(test)] +#[cfg(target_os = "linux")] +#[test] +fn test_is_madvise_working_check_does_not_fail() { + assert!(is_madvise_working().is_ok()); +} + +/// Checks whether a given instantiation strategy can be safely used, and replaces +/// it with a slower (but sound) alternative if it isn't. +#[cfg(target_os = "linux")] +pub(crate) fn replace_strategy_if_broken(strategy: &mut InstantiationStrategy) { + let replacement_strategy = match *strategy { + // These strategies don't need working `madvise`. + InstantiationStrategy::Pooling | InstantiationStrategy::RecreateInstance => return, + + // These strategies require a working `madvise` to be sound. + InstantiationStrategy::PoolingCopyOnWrite => InstantiationStrategy::Pooling, + InstantiationStrategy::RecreateInstanceCopyOnWrite | + InstantiationStrategy::LegacyInstanceReuse => InstantiationStrategy::RecreateInstance, + }; + + use once_cell::sync::OnceCell; + static IS_OK: OnceCell = OnceCell::new(); + + let is_ok = IS_OK.get_or_init(|| { + let is_ok = match is_madvise_working() { + Ok(is_ok) => is_ok, + Err(error) => { + // This should never happen. + log::warn!("Failed to check whether `madvise(MADV_DONTNEED)` works: {}", error); + false + } + }; + + if !is_ok { + log::warn!("You're running on a system with a broken `madvise(MADV_DONTNEED)` implementation. This will result in lower performance."); + } + + is_ok + }); + + if !is_ok { + *strategy = replacement_strategy; + } +} + +#[cfg(not(target_os = "linux"))] +pub(crate) fn replace_strategy_if_broken(_: &mut InstantiationStrategy) {} From f39769740858bf480ef13adbfd46267afce92eb9 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 29 Jun 2022 17:39:56 +0100 Subject: [PATCH 019/135] Refund weight in `system::fillBlock` (#11754) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix * pushed * node: fix fee multiplier test Co-authored-by: André Silva --- Cargo.lock | 1 + bin/node/executor/Cargo.toml | 1 + bin/node/executor/tests/fees.rs | 6 ++++-- frame/system/src/lib.rs | 13 +++++++++++-- 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1bf1034005284..d68330cfb15c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4732,6 +4732,7 @@ dependencies = [ "pallet-balances", "pallet-contracts", "pallet-im-online", + "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", "pallet-treasury", diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 5fbf59a74fdcd..6143ea3bd8c42 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -34,6 +34,7 @@ node-testing = { version = "3.0.0-dev", path = "../testing" } pallet-balances = { version = "4.0.0-dev", path = "../../../frame/balances" } pallet-contracts = { version = "4.0.0-dev", path = "../../../frame/contracts" } pallet-im-online = { version = "4.0.0-dev", path = "../../../frame/im-online" } +pallet-sudo = { version = "4.0.0-dev", path = "../../../frame/sudo" } pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } pallet-treasury = { version = "4.0.0-dev", path = "../../../frame/treasury" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index cf794bae1d307..e1550071d3006 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -58,8 +58,10 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), - function: Call::System(frame_system::Call::fill_block { - ratio: Perbill::from_percent(60), + function: Call::Sudo(pallet_sudo::Call::sudo { + call: Box::new(Call::System(frame_system::Call::fill_block { + ratio: Perbill::from_percent(60), + })), }), }, ], diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 370a802665918..3c6f514808b5d 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -197,6 +197,7 @@ impl, MaxOverflow: Get> ConsumerLimits for (MaxNormal, pub mod pallet { use crate::{self as frame_system, pallet_prelude::*, *}; use frame_support::pallet_prelude::*; + use sp_runtime::DispatchErrorWithPostInfo; /// System configuration trait. Implemented by runtime. #[pallet::config] @@ -371,8 +372,16 @@ pub mod pallet { // that's not possible at present (since it's within the pallet macro). #[pallet::weight(*_ratio * T::BlockWeights::get().max_block)] pub fn fill_block(origin: OriginFor, _ratio: Perbill) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - Ok(().into()) + match ensure_root(origin) { + Ok(_) => Ok(().into()), + Err(_) => { + // roughly same as a 4 byte remark since perbill is u32. + Err(DispatchErrorWithPostInfo { + post_info: Some(T::SystemWeightInfo::remark(4u32)).into(), + error: DispatchError::BadOrigin, + }) + }, + } } /// Make some on-chain remark. From 279593d87f103fa5e10c9751a97b1584f3ad79d6 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Thu, 30 Jun 2022 12:02:57 +0200 Subject: [PATCH 020/135] nomination-pools fix (#11748) * Nomination pool fix * fmt --- frame/nomination-pools/src/lib.rs | 8 ++++---- frame/nomination-pools/src/mock.rs | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 19155a51405b0..d43d43c806d61 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -43,7 +43,7 @@ //! //! ### Join //! -//! A account can stake funds with a nomination pool by calling [`Call::join`]. +//! An account can stake funds with a nomination pool by calling [`Call::join`]. //! //! ### Claim rewards //! @@ -87,7 +87,7 @@ //! [`Call::unbond`] and [`Call::withdraw_unbonded`]. Once a pool is in destroying state, it //! cannot be reverted to another state. //! -//! A pool has 3 administrative roles (see [`PoolRoles`]): +//! A pool has 4 administrative roles (see [`PoolRoles`]): //! //! * Depositor: creates the pool and is the initial member. They can only leave the pool once all //! other members have left. Once they fully leave the pool is destroyed. @@ -297,8 +297,8 @@ //! * PoolMembers cannot vote with their staked funds because they are transferred into the pools //! account. In the future this can be overcome by allowing the members to vote with their bonded //! funds via vote splitting. -//! * PoolMembers cannot quickly transfer to another pool if they do no like nominations, instead -//! they must wait for the unbonding duration. +//! * PoolMembers cannot quickly transfer to another pool if they do not like the nominations, +//! instead they must wait for the unbonding duration. //! //! # Runtime builder warnings //! diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index a3020e5add044..7d71efc3be04d 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -111,6 +111,7 @@ impl sp_staking::StakingInterface for StakingMock { Ok(()) } + #[cfg(feature = "runtime-benchmarks")] fn nominations(_: Self::AccountId) -> Option> { Nominations::get() } From 3080d3264c97cbe19843cfe708a7ac226309b7fa Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 5 Jul 2022 09:48:37 +0200 Subject: [PATCH 021/135] [ci] Remove polkadot-companion-labels GHA (#11774) --- .../workflows/polkadot-companion-labels.yml | 32 ------------------- 1 file changed, 32 deletions(-) delete mode 100644 .github/workflows/polkadot-companion-labels.yml diff --git a/.github/workflows/polkadot-companion-labels.yml b/.github/workflows/polkadot-companion-labels.yml deleted file mode 100644 index 0a5af09358524..0000000000000 --- a/.github/workflows/polkadot-companion-labels.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Check Polkadot Companion and Label - -on: - pull_request: - types: [opened, synchronize] - -jobs: - check_status: - runs-on: ubuntu-latest - steps: - - name: Monitor the status of the gitlab-check-companion-build job - uses: s3krit/await-status-action@v1.0.1 - id: 'check-companion-status' - with: - authToken: ${{ secrets.GITHUB_TOKEN }} - ref: ${{ github.event.pull_request.head.sha }} - contexts: 'continuous-integration/gitlab-check-dependent-polkadot' - timeout: 1800 - notPresentTimeout: 3600 # It can take quite a while before the job starts on Gitlab when the CI queue is large - failureStates: failure - interruptedStates: error # Error = job was probably cancelled. We don't want to label the PR in that case - pollInterval: 30 - - name: Label success - uses: andymckay/labeler@master - if: steps.check-companion-status.outputs.result == 'success' - with: - remove-labels: 'A7-needspolkadotpr' - - name: Label failure - uses: andymckay/labeler@master - if: steps.check-companion-status.outputs.result == 'failure' - with: - add-labels: 'A7-needspolkadotpr' From 6a946fc36d68b89599d7ca1ab03803d10c78468c Mon Sep 17 00:00:00 2001 From: GreenBaneling | Supercolony Date: Tue, 5 Jul 2022 11:12:16 +0100 Subject: [PATCH 022/135] [contracts] Fixed the bug with transfer value for delegate call (#11771) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fixed the bug with transfer value. * Apply suggestions from code review Co-authored-by: Alexander Theißen * Moved check into `initial_transfer` * Fmt Co-authored-by: Alexander Theißen --- frame/contracts/src/exec.rs | 86 ++++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 2 deletions(-) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index ec7e1e9335182..d93c646872c6f 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -797,7 +797,7 @@ where )?; } - // Every call or instantiate also optionally transferres balance. + // Every non delegate call or instantiate also optionally transfers the balance. self.initial_transfer()?; // Call into the wasm blob. @@ -959,8 +959,14 @@ where // The transfer as performed by a call or instantiate. fn initial_transfer(&self) -> DispatchResult { let frame = self.top_frame(); - let value = frame.value_transferred; + // If it is a delegate call, then we've already transferred tokens in the + // last non-delegate frame. + if frame.delegate_caller.is_some() { + return Ok(()) + } + + let value = frame.value_transferred; Self::transfer(ExistenceRequirement::KeepAlive, self.caller(), &frame.account_id, value) } @@ -1560,6 +1566,82 @@ mod tests { }); } + #[test] + fn correct_transfer_on_call() { + let origin = ALICE; + let dest = BOB; + let value = 55; + + let success_ch = MockLoader::insert(Call, move |ctx, _| { + assert_eq!(ctx.ext.value_transferred(), value); + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + }); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&dest, success_ch); + set_balance(&origin, 100); + let balance = get_balance(&dest); + let mut storage_meter = storage::meter::Meter::new(&origin, Some(0), 55).unwrap(); + + let _ = MockStack::run_call( + origin.clone(), + dest.clone(), + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + &schedule, + value, + vec![], + None, + ) + .unwrap(); + + assert_eq!(get_balance(&origin), 100 - value); + assert_eq!(get_balance(&dest), balance + value); + }); + } + + #[test] + fn correct_transfer_on_delegate_call() { + let origin = ALICE; + let dest = BOB; + let value = 35; + + let success_ch = MockLoader::insert(Call, move |ctx, _| { + assert_eq!(ctx.ext.value_transferred(), value); + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + }); + + let delegate_ch = MockLoader::insert(Call, move |ctx, _| { + assert_eq!(ctx.ext.value_transferred(), value); + let _ = ctx.ext.delegate_call(success_ch, Vec::new())?; + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + }); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&dest, delegate_ch); + set_balance(&origin, 100); + let balance = get_balance(&dest); + let mut storage_meter = storage::meter::Meter::new(&origin, Some(0), 55).unwrap(); + + let _ = MockStack::run_call( + origin.clone(), + dest.clone(), + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + &schedule, + value, + vec![], + None, + ) + .unwrap(); + + assert_eq!(get_balance(&origin), 100 - value); + assert_eq!(get_balance(&dest), balance + value); + }); + } + #[test] fn changes_are_reverted_on_failing_call() { // This test verifies that changes are reverted on a call which fails (or equally, returns From 09154359f98e4ca5c121ad33685840b4dfd1f9de Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Thu, 7 Jul 2022 16:24:06 +0200 Subject: [PATCH 023/135] Fix clearing queued blocks in the sync module (#11763) --- client/network/sync/src/blocks.rs | 21 ++++++--------- client/network/sync/src/lib.rs | 43 +++++++++++++++++++------------ 2 files changed, 34 insertions(+), 30 deletions(-) diff --git a/client/network/sync/src/blocks.rs b/client/network/sync/src/blocks.rs index 2cca13bfd7cc7..8d9b039b14fba 100644 --- a/client/network/sync/src/blocks.rs +++ b/client/network/sync/src/blocks.rs @@ -18,7 +18,7 @@ use crate::message; use libp2p::PeerId; -use log::{debug, trace}; +use log::trace; use sp_runtime::traits::{Block as BlockT, NumberFor, One}; use std::{ cmp, @@ -212,18 +212,13 @@ impl BlockCollection { } pub fn clear_queued(&mut self, from_hash: &B::Hash) { - match self.queued_blocks.remove(from_hash) { - None => { - debug!(target: "sync", "Can't clear unknown queued blocks from {:?}", from_hash); - }, - Some((from, to)) => { - let mut block_num = from; - while block_num < to { - self.blocks.remove(&block_num); - block_num += One::one(); - } - trace!(target: "sync", "Cleared blocks from {:?} to {:?}", from, to); - }, + if let Some((from, to)) = self.queued_blocks.remove(from_hash) { + let mut block_num = from; + while block_num < to { + self.blocks.remove(&block_num); + block_num += One::one(); + } + trace!(target: "sync", "Cleared blocks from {:?} to {:?}", from, to); } } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 4465e840c5ef5..5b5216c745c98 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -1520,9 +1520,7 @@ where let mut has_error = false; for (_, hash) in &results { self.queue_blocks.remove(hash); - } - if let Some(from_hash) = results.first().map(|(_, h)| h) { - self.blocks.clear_queued(from_hash); + self.blocks.clear_queued(hash); } for (result, hash) in results { if has_error { @@ -3299,23 +3297,34 @@ mod test { ); } + let mut notify_imported: Vec<_> = resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + b.header().number().clone(), + Default::default(), + Some(peer_id1.clone()), + )), + b.hash(), + ) + }) + .collect(); + + // The import queue may send notifications in batches of varying size. So we simulate + // this here by splitting the batch into 2 notifications. + let second_batch = notify_imported.split_off(notify_imported.len() / 2); let _ = sync.on_blocks_processed( MAX_BLOCKS_TO_REQUEST as usize, MAX_BLOCKS_TO_REQUEST as usize, - resp_blocks - .iter() - .rev() - .map(|b| { - ( - Ok(BlockImportStatus::ImportedUnknown( - b.header().number().clone(), - Default::default(), - Some(peer_id1.clone()), - )), - b.hash(), - ) - }) - .collect(), + notify_imported, + ); + + let _ = sync.on_blocks_processed( + MAX_BLOCKS_TO_REQUEST as usize, + MAX_BLOCKS_TO_REQUEST as usize, + second_batch, ); resp_blocks From 6c5ac3168d7ce4810648d667969c82b1db9950ac Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Thu, 7 Jul 2022 18:02:37 +0300 Subject: [PATCH 024/135] pallet-mmr: handle forks without collisions in offchain storage (#11594) * pallet-mmr: fix some typos * pallet-mmr: make the MMR resilient to chain forks * pallet-mmr: get hash for block that added node * beefy-mmr: add debug logging * add explanatory comment * account for block offset of pallet activation * add support for finding all nodes added by leaf * minor improvements * add helper to return all nodes added to mmr with a leaf append * simplify leaf_node_index_to_leaf_index summing the (shifted) differences in peak positions adds up to the (shifted) final position, so don't need to fold over positions. * dead fish: this also doesn't work The idea was to keep a rolling window of `(parent_hash, pos)` leaf entries in the offchain db, with the window matching the one that provides `block_num -> block_hash` mappings in `frame_system`. Once a leaf exits the window it would be "canonicalized" by switching its offchain db key from `(parent_hash, pos)` to simple `pos`. This doesn't work however because there's no way to get leaf contents from offchain db while in runtime context.. so no way to get+clear+set leaf to change its key in offchain db. Ideas: 1. move the "canonicalization" logic to offchain worker 2. enhance IndexingApi with "offchain::move(old_key, new_key)" This is weird, but correct, deterministic and safe AFAICT, so it could be exposed to runtime. * simplify rightmost_leaf_node_index_from_pos * minor fix * move leaf canonicalization to offchain worker * move storage related code to storage.rs * on offchain reads use canonic key for old leaves * fix offchain worker write using canon key * fix pallet-mmr tests * add documentation and fix logging * add offchain mmr canonicalization test * test canon + generate + verify * fix pallet-beefy-mmr tests * implement review suggestions * improve test * pallet-mmr: add offchain pruning of forks * pallet-mmr: improve offchain pruning Instead of keeping pruning map as single blob in offchain db, keep individual parent-hash lists with block-num identifier as part of the offchain key. Signed-off-by: acatangiu * pallet-mmr: improve MMRStore::get() Do the math and retrieve node using correct (canon or non-canon) offchain db key, instead of blindly looking in both canon and non-canon offchain db locations for each node. Still fallback on looking at both if for any reason it's not where expected. Signed-off-by: acatangiu * pallet-mmr: storage: improve logs * fix tests: correctly persist overlay runtime indexing API works on overlay, whereas offchain context bypasses overlay, so for loops > canon-window, canon would fail. * pallet-mmr: fix numeric typo in test * add comment around LeafData requirements Signed-off-by: acatangiu Co-authored-by: Robert Hambrock --- frame/beefy-mmr/src/tests.rs | 22 +- frame/merkle-mountain-range/src/lib.rs | 71 ++++- .../merkle-mountain-range/src/mmr/storage.rs | 228 +++++++++++++- frame/merkle-mountain-range/src/mmr/utils.rs | 68 +++++ frame/merkle-mountain-range/src/tests.rs | 287 ++++++++++++++++-- primitives/merkle-mountain-range/src/lib.rs | 12 +- 6 files changed, 635 insertions(+), 53 deletions(-) diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs index d9cd8c8a5d8c8..eaa50004ae848 100644 --- a/frame/beefy-mmr/src/tests.rs +++ b/frame/beefy-mmr/src/tests.rs @@ -44,16 +44,12 @@ pub fn beefy_log(log: ConsensusLog) -> DigestItem { DigestItem::Consensus(BEEFY_ENGINE_ID, log.encode()) } -fn offchain_key(pos: usize) -> Vec { - (::INDEXING_PREFIX, pos as u64).encode() -} - -fn read_mmr_leaf(ext: &mut TestExternalities, index: usize) -> MmrLeaf { +fn read_mmr_leaf(ext: &mut TestExternalities, key: Vec) -> MmrLeaf { type Node = pallet_mmr::primitives::DataOrHash; ext.persist_offchain_overlay(); let offchain_db = ext.offchain_db(); offchain_db - .get(&offchain_key(index)) + .get(&key) .map(|d| Node::decode(&mut &*d).unwrap()) .map(|n| match n { Node::Data(d) => d, @@ -105,12 +101,17 @@ fn should_contain_mmr_digest() { #[test] fn should_contain_valid_leaf_data() { + fn node_offchain_key(parent_hash: H256, pos: usize) -> Vec { + (::INDEXING_PREFIX, parent_hash, pos as u64).encode() + } + let mut ext = new_test_ext(vec![1, 2, 3, 4]); - ext.execute_with(|| { + let parent_hash = ext.execute_with(|| { init_block(1); + >::parent_hash() }); - let mmr_leaf = read_mmr_leaf(&mut ext, 0); + let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(parent_hash, 0)); assert_eq!( mmr_leaf, MmrLeaf { @@ -128,11 +129,12 @@ fn should_contain_valid_leaf_data() { ); // build second block on top - ext.execute_with(|| { + let parent_hash = ext.execute_with(|| { init_block(2); + >::parent_hash() }); - let mmr_leaf = read_mmr_leaf(&mut ext, 1); + let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(parent_hash, 1)); assert_eq!( mmr_leaf, MmrLeaf { diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index d6cf3240692fc..9274e8e72c508 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -58,7 +58,10 @@ use codec::Encode; use frame_support::weights::Weight; -use sp_runtime::traits::{self, One, Saturating}; +use sp_runtime::{ + traits::{self, One, Saturating}, + SaturatedConversion, +}; #[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarking; @@ -116,12 +119,12 @@ pub mod pallet { /// Prefix for elements stored in the Off-chain DB via Indexing API. /// /// Each node of the MMR is inserted both on-chain and off-chain via Indexing API. - /// The former does not store full leaf content, just it's compact version (hash), + /// The former does not store full leaf content, just its compact version (hash), /// and some of the inner mmr nodes might be pruned from on-chain storage. /// The latter will contain all the entries in their full form. /// /// Each node is stored in the Off-chain DB under key derived from the - /// [`Self::INDEXING_PREFIX`] and it's in-tree index (MMR position). + /// [`Self::INDEXING_PREFIX`] and its in-tree index (MMR position). const INDEXING_PREFIX: &'static [u8]; /// A hasher type for MMR. @@ -162,6 +165,12 @@ pub mod pallet { /// /// Note that the leaf at each block MUST be unique. You may want to include a block hash or /// block number as an easiest way to ensure that. + /// Also note that the leaf added by each block is expected to only reference data coming + /// from ancestor blocks (leaves are saved offchain using `(parent_hash, pos)` key to be + /// fork-resistant, as such conflicts could only happen on 1-block deep forks, which means + /// two forks with identical line of ancestors compete to write the same offchain key, but + /// that's fine as long as leaves only contain data coming from ancestors - conflicting + /// writes are identical). type LeafData: primitives::LeafDataProvider; /// A hook to act on the new MMR root. @@ -215,8 +224,31 @@ pub mod pallet { >::put(root); let peaks_after = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); + T::WeightInfo::on_initialize(peaks_before.max(peaks_after)) } + + fn offchain_worker(n: T::BlockNumber) { + use mmr::storage::{OffchainStorage, Storage}; + // MMR pallet uses offchain storage to hold full MMR and leaves. + // The leaves are saved under fork-unique keys `(parent_hash, pos)`. + // MMR Runtime depends on `frame_system::block_hash(block_num)` mappings to find + // parent hashes for particular nodes or leaves. + // This MMR offchain worker function moves a rolling window of the same size + // as `frame_system::block_hash` map, where nodes/leaves added by blocks that are just + // about to exit the window are "canonicalized" so that their offchain key no longer + // depends on `parent_hash` therefore on access to `frame_system::block_hash`. + // + // This approach works to eliminate fork-induced leaf collisions in offchain db, + // under the assumption that no fork will be deeper than `frame_system::BlockHashCount` + // blocks (2400 blocks on Polkadot, Kusama, Rococo, etc): + // entries pertaining to block `N` where `N < current-2400` are moved to a key based + // solely on block number. The only way to have collisions is if two competing forks + // are deeper than 2400 blocks and they both "canonicalize" their view of block `N`. + // Once a block is canonicalized, all MMR entries pertaining to sibling blocks from + // other forks are pruned from offchain db. + Storage::>::canonicalize_and_prune(n); + } } } @@ -254,9 +286,38 @@ where } impl, I: 'static> Pallet { - fn offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec { + /// Build offchain key from `parent_hash` of block that originally added node `pos` to MMR. + /// + /// This combination makes the offchain (key,value) entry resilient to chain forks. + fn node_offchain_key( + parent_hash: ::Hash, + pos: NodeIndex, + ) -> sp_std::prelude::Vec { + (T::INDEXING_PREFIX, parent_hash, pos).encode() + } + + /// Build canonical offchain key for node `pos` in MMR. + /// + /// Used for nodes added by now finalized blocks. + fn node_canon_offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec { (T::INDEXING_PREFIX, pos).encode() } + + /// Provide the parent number for the block that added `leaf_index` to the MMR. + fn leaf_index_to_parent_block_num( + leaf_index: LeafIndex, + leaves_count: LeafIndex, + ) -> ::BlockNumber { + // leaves are zero-indexed and were added one per block since pallet activation, + // while block numbers are one-indexed, so block number that added `leaf_idx` is: + // `block_num = block_num_when_pallet_activated + leaf_idx + 1` + // `block_num = (current_block_num - leaves_count) + leaf_idx + 1` + // `parent_block_num = current_block_num - leaves_count + leaf_idx`. + >::block_number() + .saturating_sub(leaves_count.saturated_into()) + .saturating_add(leaf_index.saturated_into()) + } + /// Generate a MMR proof for the given `leaf_indices`. /// /// Note this method can only be used from an off-chain context @@ -264,7 +325,7 @@ impl, I: 'static> Pallet { /// all the leaves to be present. /// It may return an error or panic if used incorrectly. pub fn generate_batch_proof( - leaf_indices: Vec, + leaf_indices: Vec, ) -> Result< (Vec>, primitives::BatchProof<>::Hash>), primitives::Error, diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index 535057ca80da7..d31262d4d7f2f 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -18,8 +18,11 @@ //! A MMR storage implementations. use codec::Encode; +use frame_support::traits::Get; use mmr_lib::helper; -use sp_io::offchain_index; +use sp_core::offchain::StorageKind; +use sp_io::{offchain, offchain_index}; +use sp_runtime::traits::UniqueSaturatedInto; use sp_std::iter::Peekable; #[cfg(not(feature = "std"))] use sp_std::prelude::*; @@ -46,6 +49,51 @@ pub struct RuntimeStorage; /// DOES NOT support adding new items to the MMR. pub struct OffchainStorage; +/// Suffix of key for the 'pruning_map'. +/// +/// Nodes and leaves are initially saved under fork-specific keys in offchain db, +/// eventually they are "canonicalized" and this map is used to prune non-canon entries. +const OFFCHAIN_PRUNING_MAP_KEY_SUFFIX: &str = "pruning_map"; + +/// Used to store offchain mappings of `BlockNumber -> Vec[Hash]` to track all forks. +/// Size of this offchain map is at most `frame_system::BlockHashCount`, its entries are pruned +/// as part of the mechanism that prunes the forks this map tracks. +pub(crate) struct PruningMap(sp_std::marker::PhantomData<(T, I)>); +impl PruningMap +where + T: Config, + I: 'static, +{ + pub(crate) fn pruning_map_offchain_key(block: T::BlockNumber) -> sp_std::prelude::Vec { + (T::INDEXING_PREFIX, block, OFFCHAIN_PRUNING_MAP_KEY_SUFFIX).encode() + } + + /// Append `hash` to the list of parent hashes for `block` in offchain db. + pub fn append(block: T::BlockNumber, hash: ::Hash) { + let map_key = Self::pruning_map_offchain_key(block); + offchain::local_storage_get(StorageKind::PERSISTENT, &map_key) + .and_then(|v| codec::Decode::decode(&mut &*v).ok()) + .or_else(|| Some(Vec::<::Hash>::new())) + .map(|mut parents| { + parents.push(hash); + offchain::local_storage_set( + StorageKind::PERSISTENT, + &map_key, + &Encode::encode(&parents), + ); + }); + } + + /// Remove list of parent hashes for `block` from offchain db and return it. + pub fn remove(block: T::BlockNumber) -> Option::Hash>> { + let map_key = Self::pruning_map_offchain_key(block); + offchain::local_storage_get(StorageKind::PERSISTENT, &map_key).and_then(|v| { + offchain::local_storage_clear(StorageKind::PERSISTENT, &map_key); + codec::Decode::decode(&mut &*v).ok() + }) + } +} + /// A storage layer for MMR. /// /// There are two different implementations depending on the use case. @@ -58,6 +106,109 @@ impl Default for Storage { } } +impl Storage +where + T: Config, + I: 'static, + L: primitives::FullLeaf, +{ + /// Move nodes and leaves added by block `N` in offchain db from _fork-aware key_ to + /// _canonical key_, + /// where `N` is `frame_system::BlockHashCount` blocks behind current block number. + /// + /// This "canonicalization" process is required because the _fork-aware key_ value depends + /// on `frame_system::block_hash(block_num)` map which only holds the last + /// `frame_system::BlockHashCount` blocks. + /// + /// For the canonicalized block, prune all nodes pertaining to other forks from offchain db. + /// + /// Should only be called from offchain context, because it requires both read and write + /// access to offchain db. + pub(crate) fn canonicalize_and_prune(block: T::BlockNumber) { + // Add "block_num -> hash" mapping to offchain db, + // with all forks pushing hashes to same entry (same block number). + let parent_hash = >::parent_hash(); + PruningMap::::append(block, parent_hash); + + // Effectively move a rolling window of fork-unique leaves. Once out of the window, leaves + // are "canonicalized" in offchain by moving them under `Pallet::node_canon_offchain_key`. + let leaves = NumberOfLeaves::::get(); + let window_size = + ::BlockHashCount::get().unique_saturated_into(); + if leaves >= window_size { + // Move the rolling window towards the end of `block_num->hash` mappings available + // in the runtime: we "canonicalize" the leaf at the end, + let to_canon_leaf = leaves.saturating_sub(window_size); + // and all the nodes added by that leaf. + let to_canon_nodes = NodesUtils::right_branch_ending_in_leaf(to_canon_leaf); + frame_support::log::debug!( + target: "runtime::mmr::offchain", "Nodes to canon for leaf {}: {:?}", + to_canon_leaf, to_canon_nodes + ); + // For this block number there may be node entries saved from multiple forks. + let to_canon_block_num = + Pallet::::leaf_index_to_parent_block_num(to_canon_leaf, leaves); + // Only entries under this hash (retrieved from state on current canon fork) are to be + // persisted. All other entries added by same block number will be cleared. + let to_canon_hash = >::block_hash(to_canon_block_num); + + Self::canonicalize_nodes_for_hash(&to_canon_nodes, to_canon_hash); + // Get all the forks to prune, also remove them from the offchain pruning_map. + PruningMap::::remove(to_canon_block_num) + .map(|forks| { + Self::prune_nodes_for_forks(&to_canon_nodes, forks); + }) + .unwrap_or_else(|| { + frame_support::log::error!( + target: "runtime::mmr::offchain", + "Offchain: could not prune: no entry in pruning map for block {:?}", + to_canon_block_num + ); + }) + } + } + + fn prune_nodes_for_forks(nodes: &[NodeIndex], forks: Vec<::Hash>) { + for hash in forks { + for pos in nodes { + let key = Pallet::::node_offchain_key(hash, *pos); + frame_support::log::debug!( + target: "runtime::mmr::offchain", + "Clear elem at pos {} with key {:?}", + pos, key + ); + offchain::local_storage_clear(StorageKind::PERSISTENT, &key); + } + } + } + + fn canonicalize_nodes_for_hash( + to_canon_nodes: &[NodeIndex], + to_canon_hash: ::Hash, + ) { + for pos in to_canon_nodes { + let key = Pallet::::node_offchain_key(to_canon_hash, *pos); + // Retrieve the element from Off-chain DB under fork-aware key. + if let Some(elem) = offchain::local_storage_get(StorageKind::PERSISTENT, &key) { + let canon_key = Pallet::::node_canon_offchain_key(*pos); + // Add under new canon key. + offchain::local_storage_set(StorageKind::PERSISTENT, &canon_key, &elem); + frame_support::log::debug!( + target: "runtime::mmr::offchain", + "Moved elem at pos {} from key {:?} to canon key {:?}", + pos, key, canon_key + ); + } else { + frame_support::log::error!( + target: "runtime::mmr::offchain", + "Could not canonicalize elem at pos {} using key {:?}", + pos, key + ); + } + } + } +} + impl mmr_lib::MMRStore> for Storage where T: Config, @@ -65,9 +216,49 @@ where L: primitives::FullLeaf + codec::Decode, { fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result>> { - let key = Pallet::::offchain_key(pos); + let leaves = NumberOfLeaves::::get(); + // Find out which leaf added node `pos` in the MMR. + let ancestor_leaf_idx = NodesUtils::leaf_index_that_added_node(pos); + + let window_size = + ::BlockHashCount::get().unique_saturated_into(); + // Leaves older than this window should have been canonicalized. + if leaves.saturating_sub(ancestor_leaf_idx) > window_size { + let key = Pallet::::node_canon_offchain_key(pos); + frame_support::log::debug!( + target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, key {:?}", + pos, ancestor_leaf_idx, key + ); + // Just for safety, to easily handle runtime upgrades where any of the window params + // change and maybe we mess up storage migration, + // return _if and only if_ node is found (in normal conditions it's always found), + if let Some(elem) = + sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + { + return Ok(codec::Decode::decode(&mut &*elem).ok()) + } + // BUT if we DID MESS UP, fall through to searching node using fork-specific key. + } + + // Leaves still within the window will be found in offchain db under fork-aware keys. + let ancestor_parent_block_num = + Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves); + let ancestor_parent_hash = >::block_hash(ancestor_parent_block_num); + let key = Pallet::::node_offchain_key(ancestor_parent_hash, pos); + frame_support::log::debug!( + target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, hash {:?}, key {:?}", + pos, ancestor_leaf_idx, ancestor_parent_hash, key + ); // Retrieve the element from Off-chain DB. Ok(sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + .or_else(|| { + // Again, this is just us being extra paranoid. + // We get here only if we mess up a storage migration for a runtime upgrades where + // say the window is increased, and for a little while following the upgrade there's + // leaves inside new 'window' that had been already canonicalized before upgrade. + let key = Pallet::::node_canon_offchain_key(pos); + sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + }) .and_then(|v| codec::Decode::decode(&mut &*v).ok())) } @@ -91,9 +282,11 @@ where return Ok(()) } - sp_std::if_std! { - frame_support::log::trace!("elems: {:?}", elems.iter().map(|elem| elem.hash()).collect::>()); - } + frame_support::log::trace!( + target: "runtime::mmr", + "elems: {:?}", + elems.iter().map(|elem| elem.hash()).collect::>() + ); let leaves = NumberOfLeaves::::get(); let size = NodesUtils::new(leaves).size(); @@ -112,11 +305,24 @@ where let mut leaf_index = leaves; let mut node_index = size; + // Use parent hash of block adding new nodes (this block) as extra identifier + // in offchain DB to avoid DB collisions and overwrites in case of forks. + let parent_hash = >::parent_hash(); for elem in elems { + // For now we store this leaf offchain keyed by `(parent_hash, node_index)` + // to make it fork-resistant. + // Offchain worker task will "canonicalize" it `frame_system::BlockHashCount` blocks + // later when we are not worried about forks anymore (highly unlikely to have a fork + // in the chain that deep). + // "Canonicalization" in this case means moving this leaf under a new key based + // only on the leaf's `node_index`. + let key = Pallet::::node_offchain_key(parent_hash, node_index); + frame_support::log::debug!( + target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}", + node_index, parent_hash, key + ); // Indexing API is used to store the full node content (both leaf and inner). - elem.using_encoded(|elem| { - offchain_index::set(&Pallet::::offchain_key(node_index), elem) - }); + elem.using_encoded(|elem| offchain_index::set(&key, elem)); // On-chain we are going to only store new peaks. if peaks_to_store.next_if_eq(&node_index).is_some() { @@ -150,10 +356,8 @@ fn peaks_to_prune_and_store( // both collections may share a common prefix. let peaks_before = if old_size == 0 { vec![] } else { helper::get_peaks(old_size) }; let peaks_after = helper::get_peaks(new_size); - sp_std::if_std! { - frame_support::log::trace!("peaks_before: {:?}", peaks_before); - frame_support::log::trace!("peaks_after: {:?}", peaks_after); - } + frame_support::log::trace!(target: "runtime::mmr", "peaks_before: {:?}", peaks_before); + frame_support::log::trace!(target: "runtime::mmr", "peaks_after: {:?}", peaks_after); let mut peaks_before = peaks_before.into_iter().peekable(); let mut peaks_after = peaks_after.into_iter().peekable(); diff --git a/frame/merkle-mountain-range/src/mmr/utils.rs b/frame/merkle-mountain-range/src/mmr/utils.rs index d9f7e3b671be3..3734ea514782d 100644 --- a/frame/merkle-mountain-range/src/mmr/utils.rs +++ b/frame/merkle-mountain-range/src/mmr/utils.rs @@ -18,6 +18,7 @@ //! Merkle Mountain Range utilities. use crate::primitives::{LeafIndex, NodeIndex}; +use mmr_lib::helper; /// MMR nodes & size -related utilities. pub struct NodesUtils { @@ -53,11 +54,78 @@ impl NodesUtils { 64 - self.no_of_leaves.next_power_of_two().leading_zeros() } + + /// Calculate `LeafIndex` for the leaf that added `node_index` to the MMR. + pub fn leaf_index_that_added_node(node_index: NodeIndex) -> LeafIndex { + let rightmost_leaf_pos = Self::rightmost_leaf_node_index_from_pos(node_index); + Self::leaf_node_index_to_leaf_index(rightmost_leaf_pos) + } + + // Translate a _leaf_ `NodeIndex` to its `LeafIndex`. + fn leaf_node_index_to_leaf_index(pos: NodeIndex) -> LeafIndex { + if pos == 0 { + return 0 + } + let peaks = helper::get_peaks(pos); + (pos + peaks.len() as u64) >> 1 + } + + // Starting from any node position get position of rightmost leaf; this is the leaf + // responsible for the addition of node `pos`. + fn rightmost_leaf_node_index_from_pos(pos: NodeIndex) -> NodeIndex { + pos - (helper::pos_height_in_tree(pos) as u64) + } + + /// Starting from any leaf index, get the sequence of positions of the nodes added + /// to the mmr when this leaf was added (inclusive of the leaf's position itself). + /// That is, all of these nodes are right children of their respective parents. + pub fn right_branch_ending_in_leaf(leaf_index: LeafIndex) -> crate::Vec { + let pos = helper::leaf_index_to_pos(leaf_index); + let num_parents = leaf_index.trailing_ones() as u64; + return (pos..=pos + num_parents).collect() + } } #[cfg(test)] mod tests { use super::*; + use mmr_lib::helper::leaf_index_to_pos; + + #[test] + fn should_calculate_node_index_from_leaf_index() { + for index in 0..100000 { + let pos = leaf_index_to_pos(index); + assert_eq!(NodesUtils::leaf_node_index_to_leaf_index(pos), index); + } + } + + #[test] + fn should_calculate_right_branch_correctly() { + fn left_jump_sequence(leaf_index: LeafIndex) -> Vec { + let pos = leaf_index_to_pos(leaf_index); + let mut right_branch_ending_in_leaf = vec![pos]; + let mut next_pos = pos + 1; + while mmr_lib::helper::pos_height_in_tree(next_pos) > 0 { + right_branch_ending_in_leaf.push(next_pos); + next_pos += 1; + } + right_branch_ending_in_leaf + } + + for leaf_index in 0..100000 { + let pos = mmr_lib::helper::leaf_index_to_pos(leaf_index); + assert_eq!(NodesUtils::right_branch_ending_in_leaf(pos), left_jump_sequence(pos)); + } + } + + #[test] + fn should_calculate_rightmost_leaf_node_index_from_pos() { + for pos in 0..100000 { + let leaf_pos = NodesUtils::rightmost_leaf_node_index_from_pos(pos); + let leaf_index = NodesUtils::leaf_node_index_to_leaf_index(leaf_pos); + assert!(NodesUtils::right_branch_ending_in_leaf(leaf_index).contains(&pos)); + } + } #[test] fn should_calculate_number_of_leaves_correctly() { diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index d025910a9ee5c..53226f8419988 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -15,9 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{mmr::utils, mock::*, *}; +use crate::{ + mmr::{storage::PruningMap, utils}, + mock::*, + *, +}; -use frame_support::traits::OnInitialize; +use frame_support::traits::{Get, OnInitialize}; use mmr_lib::helper; use sp_core::{ offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, @@ -47,7 +51,6 @@ fn new_block() -> u64 { fn peaks_from_leaves_count(leaves_count: NodeIndex) -> Vec { let size = utils::NodesUtils::new(leaves_count).size(); - helper::get_peaks(size) } @@ -73,7 +76,7 @@ fn decode_node( } } -fn init_chain(blocks: usize) { +fn add_blocks(blocks: usize) { // given for _ in 0..blocks { new_block(); @@ -115,9 +118,10 @@ fn should_start_empty() { fn should_append_to_mmr_when_on_initialize_is_called() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); - ext.execute_with(|| { + let (parent_b1, parent_b2) = ext.execute_with(|| { // when new_block(); + let parent_b1 = >::parent_hash(); // then assert_eq!(crate::NumberOfLeaves::::get(), 1); @@ -136,6 +140,7 @@ fn should_append_to_mmr_when_on_initialize_is_called() { // when new_block(); + let parent_b2 = >::parent_hash(); // then assert_eq!(crate::NumberOfLeaves::::get(), 2); @@ -157,26 +162,33 @@ fn should_append_to_mmr_when_on_initialize_is_called() { hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), ) ); - }); + (parent_b1, parent_b2) + }); // make sure the leaves end up in the offchain DB ext.persist_offchain_overlay(); + let offchain_db = ext.offchain_db(); assert_eq!( - offchain_db.get(&MMR::offchain_key(0)).map(decode_node), + offchain_db.get(&MMR::node_offchain_key(parent_b1, 0)).map(decode_node), Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1),))) ); assert_eq!( - offchain_db.get(&MMR::offchain_key(1)).map(decode_node), + offchain_db.get(&MMR::node_offchain_key(parent_b2, 1)).map(decode_node), Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2),))) ); assert_eq!( - offchain_db.get(&MMR::offchain_key(2)).map(decode_node), + offchain_db.get(&MMR::node_offchain_key(parent_b2, 2)).map(decode_node), Some(mmr::Node::Hash(hex( "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" ))) ); - assert_eq!(offchain_db.get(&MMR::offchain_key(3)), None); + assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_b2, 3)), None); + + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(0)), None); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(1)), None); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(2)), None); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(3)), None); } #[test] @@ -184,7 +196,7 @@ fn should_construct_larger_mmr_correctly() { let _ = env_logger::try_init(); new_test_ext().execute_with(|| { // when - init_chain(7); + add_blocks(7); // then assert_eq!(crate::NumberOfLeaves::::get(), 7); @@ -215,7 +227,7 @@ fn should_generate_proofs_correctly() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); // given - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proofs now. This requires the offchain extensions to be present @@ -283,7 +295,7 @@ fn should_generate_batch_proof_correctly() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); // given - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proofs now. This requires the offchain extensions to be present @@ -316,7 +328,7 @@ fn should_verify() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -328,7 +340,7 @@ fn should_verify() { }); ext.execute_with(|| { - init_chain(7); + add_blocks(7); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof5), Ok(())); }); @@ -341,7 +353,7 @@ fn should_verify_batch_proof() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -353,7 +365,7 @@ fn should_verify_batch_proof() { }); ext.execute_with(|| { - init_chain(7); + add_blocks(7); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); }); @@ -366,7 +378,7 @@ fn verification_should_be_stateless() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -393,7 +405,7 @@ fn should_verify_batch_proof_statelessly() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -424,7 +436,7 @@ fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); // given - ext.execute_with(|| init_chain(7)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); register_offchain_ext(&mut ext); @@ -438,3 +450,238 @@ fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { assert_eq!(crate::Pallet::::verify_leaves(leaves, proof5), Ok(())); }); } + +#[test] +fn should_verify_pruning_map() { + use sp_core::offchain::StorageKind; + use sp_io::offchain; + + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + register_offchain_ext(&mut ext); + + ext.execute_with(|| { + type TestPruningMap = PruningMap; + fn offchain_decoded(key: Vec) -> Option> { + offchain::local_storage_get(StorageKind::PERSISTENT, &key) + .and_then(|v| codec::Decode::decode(&mut &*v).ok()) + } + + // test append + { + TestPruningMap::append(1, H256::repeat_byte(1)); + + TestPruningMap::append(2, H256::repeat_byte(21)); + TestPruningMap::append(2, H256::repeat_byte(22)); + + TestPruningMap::append(3, H256::repeat_byte(31)); + TestPruningMap::append(3, H256::repeat_byte(32)); + TestPruningMap::append(3, H256::repeat_byte(33)); + + // `0` not present + let map_key = TestPruningMap::pruning_map_offchain_key(0); + assert_eq!(offchain::local_storage_get(StorageKind::PERSISTENT, &map_key), None); + + // verify `1` entries + let map_key = TestPruningMap::pruning_map_offchain_key(1); + let expected = vec![H256::repeat_byte(1)]; + assert_eq!(offchain_decoded(map_key), Some(expected)); + + // verify `2` entries + let map_key = TestPruningMap::pruning_map_offchain_key(2); + let expected = vec![H256::repeat_byte(21), H256::repeat_byte(22)]; + assert_eq!(offchain_decoded(map_key), Some(expected)); + + // verify `3` entries + let map_key = TestPruningMap::pruning_map_offchain_key(3); + let expected = + vec![H256::repeat_byte(31), H256::repeat_byte(32), H256::repeat_byte(33)]; + assert_eq!(offchain_decoded(map_key), Some(expected)); + + // `4` not present + let map_key = TestPruningMap::pruning_map_offchain_key(4); + assert_eq!(offchain::local_storage_get(StorageKind::PERSISTENT, &map_key), None); + } + + // test remove + { + // `0` doesn't return anything + assert_eq!(TestPruningMap::remove(0), None); + + // remove and verify `1` entries + let expected = vec![H256::repeat_byte(1)]; + assert_eq!(TestPruningMap::remove(1), Some(expected)); + + // remove and verify `2` entries + let expected = vec![H256::repeat_byte(21), H256::repeat_byte(22)]; + assert_eq!(TestPruningMap::remove(2), Some(expected)); + + // remove and verify `3` entries + let expected = + vec![H256::repeat_byte(31), H256::repeat_byte(32), H256::repeat_byte(33)]; + assert_eq!(TestPruningMap::remove(3), Some(expected)); + + // `4` doesn't return anything + assert_eq!(TestPruningMap::remove(4), None); + + // no entries left in offchain map + for block in 0..5 { + let map_key = TestPruningMap::pruning_map_offchain_key(block); + assert_eq!(offchain::local_storage_get(StorageKind::PERSISTENT, &map_key), None); + } + } + }) +} + +#[test] +fn should_canonicalize_offchain() { + use frame_support::traits::Hooks; + + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + register_offchain_ext(&mut ext); + + // adding 13 blocks that we'll later check have been canonicalized, + // (test assumes `13 < frame_system::BlockHashCount`). + let to_canon_count = 13u32; + + // add 13 blocks and verify leaves and nodes for them have been added to + // offchain MMR using fork-proof keys. + for blocknum in 0..to_canon_count { + ext.execute_with(|| { + new_block(); + as Hooks>::offchain_worker(blocknum.into()); + }); + ext.persist_offchain_overlay(); + } + let offchain_db = ext.offchain_db(); + ext.execute_with(|| { + // verify leaves added by blocks 1..=13 + for block_num in 1..=to_canon_count { + let parent_num: BlockNumber = (block_num - 1).into(); + let leaf_index = u64::from(block_num - 1); + let pos = helper::leaf_index_to_pos(leaf_index.into()); + // not canon, + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(pos)), None); + let parent_hash = >::block_hash(parent_num); + // but available in fork-proof storage. + assert_eq!( + offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)).map(decode_node), + Some(mmr::Node::Data(( + (leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())), + LeafData::new(block_num.into()), + ))) + ); + } + + // verify a couple of nodes and peaks: + // `pos` is node to verify, + // `leaf_index` is leaf that added node `pos`, + // `expected` is expected value of node at `pos`. + let verify = |pos: NodeIndex, leaf_index: LeafIndex, expected: H256| { + let parent_num: BlockNumber = leaf_index.try_into().unwrap(); + let parent_hash = >::block_hash(parent_num); + // not canon, + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(pos)), None); + // but available in fork-proof storage. + assert_eq!( + offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)).map(decode_node), + Some(mmr::Node::Hash(expected)) + ); + }; + verify(2, 1, hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")); + verify(13, 7, hex("441bf63abc7cf9b9e82eb57b8111c883d50ae468d9fd7f301e12269fc0fa1e75")); + verify(21, 11, hex("f323ac1a7f56de5f40ed8df3e97af74eec0ee9d72883679e49122ffad2ffd03b")); + }); + + // add another `frame_system::BlockHashCount` blocks and verify all nodes and leaves + // added by our original `to_canon_count` blocks have now been canonicalized in offchain db. + let block_hash_size: u64 = ::BlockHashCount::get(); + let base = to_canon_count; + for blocknum in base..(base + u32::try_from(block_hash_size).unwrap()) { + ext.execute_with(|| { + new_block(); + as Hooks>::offchain_worker(blocknum.into()); + }); + ext.persist_offchain_overlay(); + } + ext.execute_with(|| { + // verify leaves added by blocks 1..=13, should be in offchain under canon key. + for block_num in 1..=to_canon_count { + let leaf_index = u64::from(block_num - 1); + let pos = helper::leaf_index_to_pos(leaf_index.into()); + let parent_num: BlockNumber = (block_num - 1).into(); + let parent_hash = >::block_hash(parent_num); + // no longer available in fork-proof storage (was pruned), + assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)), None); + // but available using canon key. + assert_eq!( + offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), + Some(mmr::Node::Data(( + (leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())), + LeafData::new(block_num.into()), + ))) + ); + } + + // also check some nodes and peaks: + // `pos` is node to verify, + // `leaf_index` is leaf that added node `pos`, + // `expected` is expected value of node at `pos`. + let verify = |pos: NodeIndex, leaf_index: LeafIndex, expected: H256| { + let parent_num: BlockNumber = leaf_index.try_into().unwrap(); + let parent_hash = >::block_hash(parent_num); + // no longer available in fork-proof storage (was pruned), + assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)), None); + // but available using canon key. + assert_eq!( + offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), + Some(mmr::Node::Hash(expected)) + ); + }; + verify(2, 1, hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")); + verify(13, 7, hex("441bf63abc7cf9b9e82eb57b8111c883d50ae468d9fd7f301e12269fc0fa1e75")); + verify(21, 11, hex("f323ac1a7f56de5f40ed8df3e97af74eec0ee9d72883679e49122ffad2ffd03b")); + }); +} + +#[test] +fn should_verify_canonicalized() { + use frame_support::traits::Hooks; + let _ = env_logger::try_init(); + + // How deep is our fork-aware storage (in terms of blocks/leaves, nodes will be more). + let block_hash_size: u64 = ::BlockHashCount::get(); + + // Start off with chain initialisation and storing indexing data off-chain. + // Create twice as many leaf entries than our fork-aware capacity, + // resulting in ~half of MMR storage to use canonical keys and the other half fork-aware keys. + // Verify that proofs can be generated (using leaves and nodes from full set) and verified. + let mut ext = new_test_ext(); + register_offchain_ext(&mut ext); + for blocknum in 0u32..(2 * block_hash_size).try_into().unwrap() { + ext.execute_with(|| { + new_block(); + as Hooks>::offchain_worker(blocknum.into()); + }); + ext.persist_offchain_overlay(); + } + + // Generate proofs for some blocks. + let (leaves, proofs) = + ext.execute_with(|| crate::Pallet::::generate_batch_proof(vec![0, 4, 5, 7]).unwrap()); + // Verify all previously generated proofs. + ext.execute_with(|| { + assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); + }); + + // Generate proofs for some new blocks. + let (leaves, proofs) = ext.execute_with(|| { + crate::Pallet::::generate_batch_proof(vec![block_hash_size + 7]).unwrap() + }); + // Add some more blocks then verify all previously generated proofs. + ext.execute_with(|| { + add_blocks(7); + assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); + }); +} diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 5a339d069062c..8a2e901aefddf 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -81,7 +81,7 @@ pub struct Proof { /// A full leaf content stored in the offchain-db. pub trait FullLeaf: Clone + PartialEq + fmt::Debug { - /// Encode the leaf either in it's full or compact form. + /// Encode the leaf either in its full or compact form. /// /// NOTE the encoding returned here MUST be `Decode`able into `FullLeaf`. fn using_encoded R>(&self, f: F, compact: bool) -> R; @@ -167,18 +167,18 @@ impl EncodableOpaqueLeaf { } } -/// An element representing either full data or it's hash. +/// An element representing either full data or its hash. /// /// See [Compact] to see how it may be used in practice to reduce the size /// of proofs in case multiple [LeafDataProvider]s are composed together. /// This is also used internally by the MMR to differentiate leaf nodes (data) /// and inner nodes (hashes). /// -/// [DataOrHash::hash] method calculates the hash of this element in it's compact form, +/// [DataOrHash::hash] method calculates the hash of this element in its compact form, /// so should be used instead of hashing the encoded form (which will always be non-compact). #[derive(RuntimeDebug, Clone, PartialEq)] pub enum DataOrHash { - /// Arbitrary data in it's full form. + /// Arbitrary data in its full form. Data(L), /// A hash of some data. Hash(H::Output), @@ -339,7 +339,7 @@ where A: FullLeaf, B: FullLeaf, { - /// Retrieve a hash of this item in it's compact form. + /// Retrieve a hash of this item in its compact form. pub fn hash(&self) -> H::Output { self.using_encoded(::hash, true) } @@ -447,7 +447,7 @@ sp_api::decl_runtime_apis! { /// Note this function does not require any on-chain storage - the /// proof is verified against given MMR root hash. /// - /// The leaf data is expected to be encoded in it's compact form. + /// The leaf data is expected to be encoded in its compact form. fn verify_proof_stateless(root: Hash, leaf: EncodableOpaqueLeaf, proof: Proof) -> Result<(), Error>; From d871277f981d3e4d3105b9f7c4f7a5f6a1323cc5 Mon Sep 17 00:00:00 2001 From: Andronik Date: Fri, 8 Jul 2022 14:27:58 +0200 Subject: [PATCH 025/135] expose grandpa BeforeBestBlockBy voting rule (#11798) --- client/finality-grandpa/src/voting_rule.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 209b0f1b33ced..051c7f2c03658 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -89,7 +89,7 @@ where /// can prioritize shorter chains over longer ones, the vote may be /// closer to the best block than N. #[derive(Clone)] -pub struct BeforeBestBlockBy(N); +pub struct BeforeBestBlockBy(pub N); impl VotingRule for BeforeBestBlockBy> where Block: BlockT, From 8310936bd25519cee81abb01d2b164805f01bc25 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sat, 9 Jul 2022 14:39:26 +0100 Subject: [PATCH 026/135] Un-deprecate Transactional Macro (#11807) * un-deprecate transactional macro * add transactional back to nomination pools --- frame/nomination-pools/src/lib.rs | 15 ++++++++++++++- frame/support/procedural/src/lib.rs | 1 - 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index d43d43c806d61..f7ccac3e35411 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -316,7 +316,7 @@ use frame_support::{ Currency, Defensive, DefensiveOption, DefensiveResult, DefensiveSaturating, ExistenceRequirement, Get, }, - CloneNoBound, DefaultNoBound, RuntimeDebugNoBound, + transactional, CloneNoBound, DefaultNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; use sp_core::U256; @@ -1412,6 +1412,7 @@ pub mod pallet { /// `existential deposit + amount` in their account. /// * Only a pool with [`PoolState::Open`] can be joined #[pallet::weight(T::WeightInfo::join())] + #[transactional] pub fn join( origin: OriginFor, #[pallet::compact] amount: BalanceOf, @@ -1472,6 +1473,7 @@ pub mod pallet { T::WeightInfo::bond_extra_transfer() .max(T::WeightInfo::bond_extra_reward()) )] + #[transactional] pub fn bond_extra(origin: OriginFor, extra: BondExtra>) -> DispatchResult { let who = ensure_signed(origin)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&who)?; @@ -1510,6 +1512,7 @@ pub mod pallet { /// The member will earn rewards pro rata based on the members stake vs the sum of the /// members in the pools stake. Rewards do not "expire". #[pallet::weight(T::WeightInfo::claim_payout())] + #[transactional] pub fn claim_payout(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&who)?; @@ -1549,6 +1552,7 @@ pub mod pallet { /// there are too many unlocking chunks, the result of this call will likely be the /// `NoMoreChunks` error from the staking system. #[pallet::weight(T::WeightInfo::unbond())] + #[transactional] pub fn unbond( origin: OriginFor, member_account: T::AccountId, @@ -1626,6 +1630,7 @@ pub mod pallet { /// would probably see an error like `NoMoreChunks` emitted from the staking system when /// they attempt to unbond. #[pallet::weight(T::WeightInfo::pool_withdraw_unbonded(*num_slashing_spans))] + #[transactional] pub fn pool_withdraw_unbonded( origin: OriginFor, pool_id: PoolId, @@ -1662,6 +1667,7 @@ pub mod pallet { #[pallet::weight( T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans) )] + #[transactional] pub fn withdraw_unbonded( origin: OriginFor, member_account: T::AccountId, @@ -1787,6 +1793,7 @@ pub mod pallet { /// In addition to `amount`, the caller will transfer the existential deposit; so the caller /// needs at have at least `amount + existential_deposit` transferrable. #[pallet::weight(T::WeightInfo::create())] + #[transactional] pub fn create( origin: OriginFor, #[pallet::compact] amount: BalanceOf, @@ -1875,6 +1882,7 @@ pub mod pallet { /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. #[pallet::weight(T::WeightInfo::nominate(validators.len() as u32))] + #[transactional] pub fn nominate( origin: OriginFor, pool_id: PoolId, @@ -1891,6 +1899,7 @@ pub mod pallet { /// The dispatch origin of this call must be signed by the state toggler, or the root role /// of the pool. #[pallet::weight(T::WeightInfo::set_state())] + #[transactional] pub fn set_state( origin: OriginFor, pool_id: PoolId, @@ -1921,6 +1930,7 @@ pub mod pallet { /// The dispatch origin of this call must be signed by the state toggler, or the root role /// of the pool. #[pallet::weight(T::WeightInfo::set_metadata(metadata.len() as u32))] + #[transactional] pub fn set_metadata( origin: OriginFor, pool_id: PoolId, @@ -1952,6 +1962,7 @@ pub mod pallet { /// * `max_members` - Set [`MaxPoolMembers`]. /// * `max_members_per_pool` - Set [`MaxPoolMembersPerPool`]. #[pallet::weight(T::WeightInfo::set_configs())] + #[transactional] pub fn set_configs( origin: OriginFor, min_join_bond: ConfigOp>, @@ -1988,6 +1999,7 @@ pub mod pallet { /// It emits an event, notifying UIs of the role change. This event is quite relevant to /// most pool members and they should be informed of changes to pool roles. #[pallet::weight(T::WeightInfo::update_roles())] + #[transactional] pub fn update_roles( origin: OriginFor, pool_id: PoolId, @@ -2040,6 +2052,7 @@ pub mod pallet { /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. #[pallet::weight(T::WeightInfo::chill())] + #[transactional] pub fn chill(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 418fad56b7674..00204b7a4d906 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -429,7 +429,6 @@ pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { /// } /// ``` #[proc_macro_attribute] -#[deprecated(note = "This is now the default behaviour for all extrinsics.")] pub fn transactional(attr: TokenStream, input: TokenStream) -> TokenStream { transactional::transactional(attr, input).unwrap_or_else(|e| e.to_compile_error().into()) } From d513c8b9ad373432b671a46f28b1835c1f879923 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Tue, 12 Jul 2022 13:14:42 +0200 Subject: [PATCH 027/135] sync: Fixed clearing subsequent ranges (#11815) * sync: Fixed clearing subsequent ranges * Warp sync fix * Better documentation --- client/network/sync/src/blocks.rs | 48 +++++++++++++++++++++++++------ client/network/sync/src/lib.rs | 3 ++ 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/client/network/sync/src/blocks.rs b/client/network/sync/src/blocks.rs index 8d9b039b14fba..26753f120a170 100644 --- a/client/network/sync/src/blocks.rs +++ b/client/network/sync/src/blocks.rs @@ -180,6 +180,9 @@ impl BlockCollection { /// Get a valid chain of blocks ordered in descending order and ready for importing into /// the blockchain. + /// `from` is the maximum block number for the start of the range that we are interested in. + /// The function will return empty Vec if the first block ready is higher than `from`. + /// For each returned block hash `clear_queued` must be called at some later stage. pub fn ready_blocks(&mut self, from: NumberFor) -> Vec> { let mut ready = Vec::new(); @@ -192,6 +195,10 @@ impl BlockCollection { BlockRangeState::Complete(blocks) => { let len = (blocks.len() as u32).into(); prev = start + len; + if let Some(BlockData { block, .. }) = blocks.first() { + self.queued_blocks + .insert(block.hash, (start, start + (blocks.len() as u32).into())); + } // Remove all elements from `blocks` and add them to `ready` ready.append(blocks); len @@ -201,18 +208,12 @@ impl BlockCollection { }; *range_data = BlockRangeState::Queued { len }; } - - if let Some(BlockData { block, .. }) = ready.first() { - self.queued_blocks - .insert(block.hash, (from, from + (ready.len() as u32).into())); - } - trace!(target: "sync", "{} blocks ready for import", ready.len()); ready } - pub fn clear_queued(&mut self, from_hash: &B::Hash) { - if let Some((from, to)) = self.queued_blocks.remove(from_hash) { + pub fn clear_queued(&mut self, hash: &B::Hash) { + if let Some((from, to)) = self.queued_blocks.remove(hash) { let mut block_num = from; while block_num < to { self.blocks.remove(&block_num); @@ -399,4 +400,35 @@ mod test { assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(45..50)); } + + #[test] + fn clear_queued_subsequent_ranges() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + let peer = PeerId::random(); + + let blocks = generate_blocks(10); + + // Request 2 ranges + assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(40..45)); + assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(45..50)); + + // got a response on the request for `40..50` + bc.clear_peer_download(&peer); + bc.insert(40, blocks.to_vec(), peer.clone()); + + // request any blocks starting from 1000 or lower. + let ready = bc.ready_blocks(1000); + assert_eq!( + ready, + blocks + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer.clone()) }) + .collect::>() + ); + + bc.clear_queued(&blocks[0].hash); + assert!(bc.blocks.is_empty()); + assert!(bc.queued_blocks.is_empty()); + } } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 5b5216c745c98..1ce69b6dc816f 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -1521,6 +1521,9 @@ where for (_, hash) in &results { self.queue_blocks.remove(hash); self.blocks.clear_queued(hash); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_queued(hash); + } } for (result, hash) in results { if has_error { From 43fd9a9dc2ba2644f88531b52a196a4f0800c2bf Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Tue, 12 Jul 2022 23:19:13 +1200 Subject: [PATCH 028/135] CLI flag to configure tx ban duration (#11786) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add tx-ban-seconds * fix * trigger CI * trigger CI * remove test print * Update client/cli/src/params/transaction_pool_params.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> --- bin/node/cli/benches/transaction_pool.rs | 3 +++ client/cli/src/commands/run_cmd.rs | 4 ++-- client/cli/src/config.rs | 4 ++-- client/cli/src/params/transaction_pool_params.rs | 14 +++++++++++++- client/transaction-pool/src/graph/pool.rs | 5 ++++- client/transaction-pool/src/graph/rotator.rs | 5 +++++ .../transaction-pool/src/graph/validated_pool.rs | 3 ++- 7 files changed, 31 insertions(+), 7 deletions(-) diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index f1fce16d8c1b3..5b96355b9ca70 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -16,6 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::time::Duration; + use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; use futures::{future, StreamExt}; use node_cli::service::{create_extrinsic, fetch_nonce, FullClient, TransactionPool}; @@ -58,6 +60,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { ready: PoolLimit { count: 100_000, total_bytes: 100 * 1024 * 1024 }, future: PoolLimit { count: 100_000, total_bytes: 100 * 1024 * 1024 }, reject_future_transactions: false, + ban_time: Duration::from_secs(30 * 60), }, network: network_config, keystore: KeystoreConfig::InMemory, diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index b4a3885e39906..12774eecc4174 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -480,8 +480,8 @@ impl CliConfiguration for RunCmd { Ok(self.ws_max_out_buffer_capacity) } - fn transaction_pool(&self) -> Result { - Ok(self.pool_config.transaction_pool()) + fn transaction_pool(&self, is_dev: bool) -> Result { + Ok(self.pool_config.transaction_pool(is_dev)) } fn max_runtime_instances(&self) -> Result> { diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 6e1317c11fbc4..01c541bf75255 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -145,7 +145,7 @@ pub trait CliConfiguration: Sized { /// Get the transaction pool options /// /// By default this is `TransactionPoolOptions::default()`. - fn transaction_pool(&self) -> Result { + fn transaction_pool(&self, _is_dev: bool) -> Result { Ok(Default::default()) } @@ -523,7 +523,7 @@ pub trait CliConfiguration: Sized { impl_name: C::impl_name(), impl_version: C::impl_version(), tokio_handle, - transaction_pool: self.transaction_pool()?, + transaction_pool: self.transaction_pool(is_dev)?, network: self.network_config( &chain_spec, is_dev, diff --git a/client/cli/src/params/transaction_pool_params.rs b/client/cli/src/params/transaction_pool_params.rs index efb78430ced55..6429dfec3f908 100644 --- a/client/cli/src/params/transaction_pool_params.rs +++ b/client/cli/src/params/transaction_pool_params.rs @@ -29,11 +29,15 @@ pub struct TransactionPoolParams { /// Maximum number of kilobytes of all transactions stored in the pool. #[clap(long, value_name = "COUNT", default_value = "20480")] pub pool_kbytes: usize, + + /// How long a transaction is banned for, if it is considered invalid. Defaults to 1800s. + #[clap(long, value_name = "SECONDS")] + pub tx_ban_seconds: Option, } impl TransactionPoolParams { /// Fill the given `PoolConfiguration` by looking at the cli parameters. - pub fn transaction_pool(&self) -> TransactionPoolOptions { + pub fn transaction_pool(&self, is_dev: bool) -> TransactionPoolOptions { let mut opts = TransactionPoolOptions::default(); // ready queue @@ -45,6 +49,14 @@ impl TransactionPoolParams { opts.future.count = self.pool_limit / factor; opts.future.total_bytes = self.pool_kbytes * 1024 / factor; + opts.ban_time = if let Some(ban_seconds) = self.tx_ban_seconds { + std::time::Duration::from_secs(ban_seconds) + } else if is_dev { + std::time::Duration::from_secs(0) + } else { + std::time::Duration::from_secs(30 * 60) + }; + opts } } diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 618ba8ccf24d5..4ce7954f8d479 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, sync::Arc, time::Duration}; use futures::{channel::mpsc::Receiver, Future}; use sc_transaction_pool_api::error; @@ -108,6 +108,8 @@ pub struct Options { pub future: base::Limit, /// Reject future transactions. pub reject_future_transactions: bool, + /// How long the extrinsic is banned for. + pub ban_time: Duration, } impl Default for Options { @@ -116,6 +118,7 @@ impl Default for Options { ready: base::Limit { count: 8192, total_bytes: 20 * 1024 * 1024 }, future: base::Limit { count: 512, total_bytes: 1 * 1024 * 1024 }, reject_future_transactions: false, + ban_time: Duration::from_secs(60 * 30), } } } diff --git a/client/transaction-pool/src/graph/rotator.rs b/client/transaction-pool/src/graph/rotator.rs index b897fe7885033..c91c8e407bc0f 100644 --- a/client/transaction-pool/src/graph/rotator.rs +++ b/client/transaction-pool/src/graph/rotator.rs @@ -51,6 +51,11 @@ impl Default for PoolRotator { } impl PoolRotator { + /// New rotator instance with specified ban time. + pub fn new(ban_time: Duration) -> Self { + Self { ban_time, banned_until: Default::default() } + } + /// Returns `true` if extrinsic hash is currently banned. pub fn is_banned(&self, hash: &Hash) -> bool { self.banned_until.read().contains_key(hash) diff --git a/client/transaction-pool/src/graph/validated_pool.rs b/client/transaction-pool/src/graph/validated_pool.rs index 084b04842ee90..e59c5afbdc512 100644 --- a/client/transaction-pool/src/graph/validated_pool.rs +++ b/client/transaction-pool/src/graph/validated_pool.rs @@ -125,6 +125,7 @@ impl ValidatedPool { /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { let base_pool = base::BasePool::new(options.reject_future_transactions); + let ban_time = options.ban_time; Self { is_validator, options, @@ -132,7 +133,7 @@ impl ValidatedPool { api, pool: RwLock::new(base_pool), import_notification_sinks: Default::default(), - rotator: Default::default(), + rotator: PoolRotator::new(ban_time), } } From 5677b91eb6ad8e280cf7dcbdb8990d31bd2d74b1 Mon Sep 17 00:00:00 2001 From: Roman Date: Tue, 12 Jul 2022 21:00:00 +0400 Subject: [PATCH 029/135] Upgrade libp2p to 0.46.1 (#11787) * Update libp2p to 0.46.0 * Update libp2p to 0.46.1 * Fix telemetry initialization * Fix tests --- Cargo.lock | 362 ++++++------------ client/authority-discovery/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/consensus/common/Cargo.toml | 2 +- client/network-gossip/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- client/network/common/Cargo.toml | 2 +- client/network/light/Cargo.toml | 2 +- client/network/src/discovery.rs | 6 +- client/network/src/peer_info.rs | 5 +- client/network/src/protocol.rs | 5 +- .../src/protocol/notifications/tests.rs | 6 +- client/network/src/request_responses.rs | 7 +- client/network/src/transport.rs | 9 +- client/network/sync/Cargo.toml | 2 +- client/network/test/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- client/telemetry/src/transport.rs | 3 +- 19 files changed, 151 insertions(+), 274 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d68330cfb15c2..26f43fa4ae0ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3207,7 +3207,7 @@ dependencies = [ "http", "jsonrpsee-core", "jsonrpsee-types", - "pin-project 1.0.10", + "pin-project", "rustls-native-certs", "soketto", "thiserror", @@ -3461,9 +3461,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.45.1" +version = "0.46.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41726ee8f662563fafba2d2d484b14037cc8ecb8c953fbfc8439d4ce3a0a9029" +checksum = "81327106887e42d004fbdab1fef93675be2e2e07c1b95fce45e2cc813485611d" dependencies = [ "bytes", "futures", @@ -3472,7 +3472,7 @@ dependencies = [ "instant", "lazy_static", "libp2p-autonat", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-deflate", "libp2p-dns", "libp2p-floodsub", @@ -3498,69 +3498,35 @@ dependencies = [ "libp2p-yamux", "multiaddr", "parking_lot 0.12.0", - "pin-project 1.0.10", + "pin-project", "rand 0.7.3", "smallvec", ] [[package]] name = "libp2p-autonat" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50de7c1d5c3f040fccb469e8a2d189e068b7627d760dd74ef914071c16bbe905" +checksum = "4decc51f3573653a9f4ecacb31b1b922dd20c25a6322bb15318ec04287ec46f9" dependencies = [ "async-trait", "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-request-response", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", - "rand 0.8.4", -] - -[[package]] -name = "libp2p-core" -version = "0.32.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db5b02602099fb75cb2d16f9ea860a320d6eb82ce41e95ab680912c454805cd5" -dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek", - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "lazy_static", - "log", - "multiaddr", - "multihash", - "multistream-select", - "parking_lot 0.12.0", - "pin-project 1.0.10", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "rand 0.8.4", - "ring", - "rw-stream-sink 0.2.1", - "sha2 0.10.2", - "smallvec", - "thiserror", - "unsigned-varint", - "void", - "zeroize", ] [[package]] name = "libp2p-core" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d46fca305dee6757022e2f5a4f6c023315084d0ed7441c3ab244e76666d979" +checksum = "fbf9b94cefab7599b2d3dff2f93bee218c6621d68590b23ede4485813cbcece6" dependencies = [ "asn1_der", "bs58", @@ -3577,12 +3543,12 @@ dependencies = [ "multihash", "multistream-select", "parking_lot 0.12.0", - "pin-project 1.0.10", - "prost 0.10.3", - "prost-build 0.10.4", + "pin-project", + "prost", + "prost-build", "rand 0.8.4", "ring", - "rw-stream-sink 0.3.0", + "rw-stream-sink", "sha2 0.10.2", "smallvec", "thiserror", @@ -3593,24 +3559,24 @@ dependencies = [ [[package]] name = "libp2p-deflate" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86adefc55ea4ed8201149f052fb441210727481dff1fb0b8318460206a79f5fb" +checksum = "d0183dc2a3da1fbbf85e5b6cf51217f55b14f5daea0c455a9536eef646bfec71" dependencies = [ "flate2", "futures", - "libp2p-core 0.33.0", + "libp2p-core", ] [[package]] name = "libp2p-dns" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb462ec3a51fab457b4b44ac295e8b0a4b04dc175127e615cf996b1f0f1a268" +checksum = "6cbf54723250fa5d521383be789bf60efdabe6bacfb443f87da261019a49b4b5" dependencies = [ "async-std-resolver", "futures", - "libp2p-core 0.33.0", + "libp2p-core", "log", "parking_lot 0.12.0", "smallvec", @@ -3619,27 +3585,27 @@ dependencies = [ [[package]] name = "libp2p-floodsub" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a505d0c6f851cbf2919535150198e530825def8bd3757477f13dc3a57f46cbcc" +checksum = "98a4b6ffd53e355775d24b76f583fdda54b3284806f678499b57913adb94f231" dependencies = [ "cuckoofilter", "fnv", "futures", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.7.3", "smallvec", ] [[package]] name = "libp2p-gossipsub" -version = "0.38.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9be947d8cea8e6b469201314619395826896d2c051053c3723910ba98e68e04" +checksum = "74b4b888cfbeb1f5551acd3aa1366e01bf88ede26cc3c4645d0d2d004d5ca7b0" dependencies = [ "asynchronous-codec", "base64", @@ -3649,12 +3615,12 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", "prometheus-client", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -3665,19 +3631,19 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.36.1" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84b53490442d086db1fa5375670c9666e79143dccadef3f7c74a4346899a984" +checksum = "c50b585518f8efd06f93ac2f976bd672e17cdac794644b3117edd078e96bda06" dependencies = [ "asynchronous-codec", "futures", "futures-timer", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", "lru", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "prost-codec", "smallvec", "thiserror", @@ -3686,9 +3652,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.37.1" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6b5d4de90fcd35feb65ea6223fd78f3b747a64ca4b65e0813fbe66a27d56aa" +checksum = "740862893bb5f06ac24acc9d49bdeadc3a5e52e51818a30a25c1f3519da2c851" dependencies = [ "arrayvec 0.7.2", "asynchronous-codec", @@ -3698,11 +3664,11 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.7.3", "sha2 0.10.2", "smallvec", @@ -3714,9 +3680,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4783f8cf00c7b6c1ff0f1870b4fcf50b042b45533d2e13b6fb464caf447a6951" +checksum = "66e5e5919509603281033fd16306c61df7a4428ce274b67af5e14b07de5cdcb2" dependencies = [ "async-io", "data-encoding", @@ -3724,7 +3690,7 @@ dependencies = [ "futures", "if-watch", "lazy_static", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", "rand 0.8.4", @@ -3735,11 +3701,11 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4357140141ba9739eee71b20aa735351c0fc642635b2bffc7f57a6b5c1090" +checksum = "ef8aff4a1abef42328fbb30b17c853fff9be986dc39af17ee39f9c5f755c5e0c" dependencies = [ - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-gossipsub", "libp2p-identify", "libp2p-kad", @@ -3751,14 +3717,14 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ff9c893f2367631a711301d703c47432af898c9bb8253bea0e2c051a13f7640" +checksum = "61fd1b20638ec209c5075dfb2e8ce6a7ea4ec3cd3ad7b77f7a477c06d53322e2" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.33.0", + "libp2p-core", "log", "nohash-hasher", "parking_lot 0.12.0", @@ -3769,18 +3735,18 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2cee1dad1c83325bbd182a8e94555778699cec8a9da00086efb7522c4c15ad" +checksum = "762408cb5d84b49a600422d7f9a42c18012d8da6ebcd570f9a4a4290ba41fb6f" dependencies = [ "bytes", "curve25519-dalek 3.0.2", "futures", "lazy_static", - "libp2p-core 0.33.0", + "libp2p-core", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.8.4", "sha2 0.10.2", "snow", @@ -3791,14 +3757,14 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d41516c82fe8dd148ec925eead0c5ec08a0628f7913597e93e126e4dfb4e0787" +checksum = "100a6934ae1dbf8a693a4e7dd1d730fd60b774dafc45688ed63b554497c6c925" dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", "rand 0.7.3", @@ -3807,17 +3773,17 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db007e737adc5d28b2e03223b0210164928ad742591127130796a72aa8eaf54f" +checksum = "be27bf0820a6238a4e06365b096d428271cce85a129cf16f2fe9eb1610c4df86" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.33.0", + "libp2p-core", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "unsigned-varint", "void", ] @@ -3830,7 +3796,7 @@ checksum = "0f1a458bbda880107b5b36fcb9b5a1ef0c329685da0e203ed692a8ebe64cc92c" dependencies = [ "futures", "log", - "pin-project 1.0.10", + "pin-project", "rand 0.7.3", "salsa20", "sha3 0.9.1", @@ -3838,9 +3804,9 @@ dependencies = [ [[package]] name = "libp2p-relay" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624ead3406f64437a0d4567c31bd128a9a0b8226d5f16c074038f5d0fc32f650" +checksum = "4931547ee0cce03971ccc1733ff05bb0c4349fd89120a39e9861e2bbe18843c3" dependencies = [ "asynchronous-codec", "bytes", @@ -3848,12 +3814,12 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", - "pin-project 1.0.10", - "prost 0.10.3", - "prost-build 0.10.4", + "pin-project", + "prost", + "prost-build", "prost-codec", "rand 0.8.4", "smallvec", @@ -3864,20 +3830,20 @@ dependencies = [ [[package]] name = "libp2p-rendezvous" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59967ea2db2c7560f641aa58ac05982d42131863fcd3dd6dcf0dd1daf81c60c" +checksum = "9511c9672ba33284838e349623319c8cad2d18cfad243ae46c6b7e8a2982ea4e" dependencies = [ "asynchronous-codec", "bimap", "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.8.4", "sha2 0.10.2", "thiserror", @@ -3887,15 +3853,15 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b02e0acb725e5a757d77c96b95298fd73a7394fe82ba7b8bbeea510719cbe441" +checksum = "508a189e2795d892c8f5c1fa1e9e0b1845d32d7b0b249dbf7b05b18811361843" dependencies = [ "async-trait", "bytes", "futures", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "libp2p-swarm", "log", "rand 0.7.3", @@ -3905,18 +3871,18 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.36.1" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4bb21c5abadbf00360c734f16bf87f1712ed4f23cd46148f625d2ddb867346" +checksum = "95ac5be6c2de2d1ff3f7693fda6faf8a827b1f3e808202277783fea9f527d114" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core", "log", - "pin-project 1.0.10", + "pin-project", "rand 0.7.3", "smallvec", "thiserror", @@ -3925,9 +3891,9 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf2fe8c80b43561355f4d51875273b5b6dfbac37952e8f64b1270769305c9d7" +checksum = "9f54a64b6957249e0ce782f8abf41d97f69330d02bf229f0672d864f0650cc76" dependencies = [ "quote", "syn", @@ -3935,9 +3901,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4933e38ef21b50698aefc87799c24f2a365c9d3f6cf50471f3f6a0bc410892" +checksum = "8a6771dc19aa3c65d6af9a8c65222bfc8fcd446630ddca487acd161fa6096f3b" dependencies = [ "async-io", "futures", @@ -3945,32 +3911,32 @@ dependencies = [ "if-watch", "ipnet", "libc", - "libp2p-core 0.33.0", + "libp2p-core", "log", "socket2 0.4.4", ] [[package]] name = "libp2p-uds" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24bdab114f7f2701757d6541266e1131b429bbae382008f207f2114ee4222dcb" +checksum = "d125e3e5f0d58f3c6ac21815b20cf4b6a88b8db9dc26368ea821838f4161fd4d" dependencies = [ "async-std", "futures", - "libp2p-core 0.32.1", + "libp2p-core", "log", ] [[package]] name = "libp2p-wasm-ext" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f066f2b8b1a1d64793f05da2256e6842ecd0293d6735ca2e9bda89831a1bdc06" +checksum = "ec894790eec3c1608f8d1a8a0bdf0dbeb79ed4de2dce964222011c2896dfa05a" dependencies = [ "futures", "js-sys", - "libp2p-core 0.33.0", + "libp2p-core", "parity-send-wrapper", "wasm-bindgen", "wasm-bindgen-futures", @@ -3978,18 +3944,18 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39d398fbb29f432c4128fabdaac2ed155c3bcaf1b9bd40eeeb10a471eefacbf5" +checksum = "9808e57e81be76ff841c106b4c5974fb4d41a233a7bdd2afbf1687ac6def3818" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.33.0", + "libp2p-core", "log", "parking_lot 0.12.0", "quicksink", - "rw-stream-sink 0.3.0", + "rw-stream-sink", "soketto", "url", "webpki-roots", @@ -3997,12 +3963,12 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f" +checksum = "c6dea686217a06072033dc025631932810e2f6ad784e4fafa42e27d311c7a81c" dependencies = [ "futures", - "libp2p-core 0.33.0", + "libp2p-core", "parking_lot 0.12.0", "thiserror", "yamux", @@ -4462,7 +4428,7 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.10", + "pin-project", "smallvec", "unsigned-varint", ] @@ -6836,33 +6802,13 @@ dependencies = [ "indexmap", ] -[[package]] -name = "pin-project" -version = "0.4.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909" -dependencies = [ - "pin-project-internal 0.4.29", -] - [[package]] name = "pin-project" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ - "pin-project-internal 1.0.10", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "pin-project-internal", ] [[package]] @@ -7114,16 +7060,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" -dependencies = [ - "bytes", - "prost-derive 0.9.0", -] - [[package]] name = "prost" version = "0.10.3" @@ -7131,27 +7067,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc03e116981ff7d8da8e5c220e374587b98d294af7ba7dd7fda761158f00086f" dependencies = [ "bytes", - "prost-derive 0.10.1", -] - -[[package]] -name = "prost-build" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" -dependencies = [ - "bytes", - "heck 0.3.2", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost 0.9.0", - "prost-types 0.9.0", - "regex", - "tempfile", - "which", + "prost-derive", ] [[package]] @@ -7169,8 +7085,8 @@ dependencies = [ "log", "multimap", "petgraph", - "prost 0.10.3", - "prost-types 0.10.1", + "prost", + "prost-types", "regex", "tempfile", "which", @@ -7184,24 +7100,11 @@ checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" dependencies = [ "asynchronous-codec", "bytes", - "prost 0.10.3", + "prost", "thiserror", "unsigned-varint", ] -[[package]] -name = "prost-derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "prost-derive" version = "0.10.1" @@ -7215,16 +7118,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" -dependencies = [ - "bytes", - "prost 0.9.0", -] - [[package]] name = "prost-types" version = "0.10.1" @@ -7232,7 +7125,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" dependencies = [ "bytes", - "prost 0.10.3", + "prost", ] [[package]] @@ -7922,17 +7815,6 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" -[[package]] -name = "rw-stream-sink" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" -dependencies = [ - "futures", - "pin-project 0.4.29", - "static_assertions", -] - [[package]] name = "rw-stream-sink" version = "0.3.0" @@ -7940,7 +7822,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" dependencies = [ "futures", - "pin-project 1.0.10", + "pin-project", "static_assertions", ] @@ -7998,8 +7880,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "quickcheck", "rand 0.7.3", "sc-client-api", @@ -8651,9 +8533,9 @@ dependencies = [ "lru", "parity-scale-codec", "parking_lot 0.12.0", - "pin-project 1.0.10", - "prost 0.10.3", - "prost-build 0.10.4", + "pin-project", + "prost", + "prost-build", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8691,7 +8573,7 @@ dependencies = [ "futures", "libp2p", "parity-scale-codec", - "prost-build 0.10.4", + "prost-build", "sc-peerset", "smallvec", ] @@ -8723,8 +8605,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "sc-client-api", "sc-network-common", "sc-peerset", @@ -8746,8 +8628,8 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "quickcheck", "sc-block-builder", "sc-client-api", @@ -8954,7 +8836,7 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "parking_lot 0.12.0", - "pin-project 1.0.10", + "pin-project", "rand 0.7.3", "sc-block-builder", "sc-chain-spec", @@ -9099,7 +8981,7 @@ dependencies = [ "libp2p", "log", "parking_lot 0.12.0", - "pin-project 1.0.10", + "pin-project", "rand 0.7.3", "serde", "serde_json", @@ -11189,7 +11071,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.10", + "pin-project", "tracing", ] diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 32a5e23bd49d9..012e6096aab80 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = futures = "0.3.21" futures-timer = "3.0.1" ip_network = "0.4.1" -libp2p = { version = "0.45.1", default-features = false, features = ["kad"] } +libp2p = { version = "0.46.1", default-features = false, features = ["kad"] } log = "0.4.17" prost = "0.10" rand = "0.7.2" diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 72b4efde077ff..ea60e4c9f87e5 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -18,7 +18,7 @@ clap = { version = "3.1.18", features = ["derive"] } fdlimit = "0.2.1" futures = "0.3.21" hex = "0.4.2" -libp2p = "0.45.1" +libp2p = "0.46.1" log = "0.4.17" names = { version = "0.13.0", default-features = false } parity-scale-codec = "3.0.0" diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 6d76eba0935ff..12b630f36b89b 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = "0.1.42" futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" -libp2p = { version = "0.45.1", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" parking_lot = "0.12.0" serde = { version = "1.0", features = ["derive"] } diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 0fac96b0fdde0..144c5ad168996 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] ahash = "0.7.6" futures = "0.3.21" futures-timer = "3.0.1" -libp2p = { version = "0.45.1", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" lru = "0.7.5" tracing = "0.1.29" diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 1b525844395b5..dfd2db2e6b844 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -29,7 +29,7 @@ futures = "0.3.21" futures-timer = "3.0.2" hex = "0.4.0" ip_network = "0.4.1" -libp2p = "0.45.1" +libp2p = "0.46.1" linked_hash_set = "0.1.3" linked-hash-map = "0.5.4" log = "0.4.17" diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 553eb57958a5d..e69787d7aff77 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -21,6 +21,6 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" -libp2p = "0.45.1" +libp2p = "0.46.1" smallvec = "1.8.0" sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index 924b8ed06ab0a..0037177fb4046 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" -libp2p = "0.45.1" +libp2p = "0.46.1" log = "0.4.16" prost = "0.10" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 2bae2fb807f7e..f3d1588c0280e 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -52,8 +52,8 @@ use futures_timer::Delay; use ip_network::IpNetwork; use libp2p::{ core::{ - connection::{ConnectionId, ListenerId}, - ConnectedPoint, Multiaddr, PeerId, PublicKey, + connection::ConnectionId, transport::ListenerId, ConnectedPoint, Multiaddr, PeerId, + PublicKey, }, kad::{ handler::KademliaHandlerProto, @@ -1030,7 +1030,7 @@ mod tests { let noise_keys = noise::Keypair::::new().into_authentic(&keypair).unwrap(); - let transport = MemoryTransport + let transport = MemoryTransport::new() .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) .multiplex(yamux::YamuxConfig::default()) diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index c3e79437bdd06..d668cb25ea455 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -21,9 +21,8 @@ use fnv::FnvHashMap; use futures::prelude::*; use libp2p::{ core::{ - connection::{ConnectionId, ListenerId}, - either::EitherOutput, - ConnectedPoint, PeerId, PublicKey, + connection::ConnectionId, either::EitherOutput, transport::ListenerId, ConnectedPoint, + PeerId, PublicKey, }, identify::{Identify, IdentifyConfig, IdentifyEvent, IdentifyInfo}, ping::{Ping, PingConfig, PingEvent, PingSuccess}, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 387a7b3fdde90..348d2d0bf8877 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -27,10 +27,7 @@ use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; use futures::{channel::oneshot, prelude::*}; use libp2p::{ - core::{ - connection::{ConnectionId, ListenerId}, - ConnectedPoint, - }, + core::{connection::ConnectionId, transport::ListenerId, ConnectedPoint}, request_response::OutboundFailure, swarm::{ ConnectionHandler, IntoConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, diff --git a/client/network/src/protocol/notifications/tests.rs b/client/network/src/protocol/notifications/tests.rs index da12dbf216c4c..fa79366d20283 100644 --- a/client/network/src/protocol/notifications/tests.rs +++ b/client/network/src/protocol/notifications/tests.rs @@ -23,8 +23,8 @@ use crate::protocol::notifications::{Notifications, NotificationsOut, ProtocolCo use futures::prelude::*; use libp2p::{ core::{ - connection::{ConnectionId, ListenerId}, - transport::MemoryTransport, + connection::ConnectionId, + transport::{ListenerId, MemoryTransport}, upgrade, ConnectedPoint, }, identity, noise, @@ -56,7 +56,7 @@ fn build_nodes() -> (Swarm, Swarm) { let noise_keys = noise::Keypair::::new().into_authentic(&keypair).unwrap(); - let transport = MemoryTransport + let transport = MemoryTransport::new() .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) .multiplex(yamux::YamuxConfig::default()) diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 6c7b0f3fcdfc8..cec4aa2a07fba 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -40,10 +40,7 @@ use futures::{ prelude::*, }; use libp2p::{ - core::{ - connection::{ConnectionId, ListenerId}, - ConnectedPoint, Multiaddr, PeerId, - }, + core::{connection::ConnectionId, transport::ListenerId, ConnectedPoint, Multiaddr, PeerId}, request_response::{ handler::RequestResponseHandler, ProtocolSupport, RequestResponse, RequestResponseCodec, RequestResponseConfig, RequestResponseEvent, RequestResponseMessage, ResponseChannel, @@ -968,7 +965,7 @@ mod tests { let noise_keys = noise::Keypair::::new().into_authentic(&keypair).unwrap(); - let transport = MemoryTransport + let transport = MemoryTransport::new() .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) .multiplex(libp2p::yamux::YamuxConfig::default()) diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index 883f828e7db51..23645b11795c3 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -54,16 +54,17 @@ pub fn build_transport( ) -> (Boxed<(PeerId, StreamMuxerBox)>, Arc) { // Build the base layer of the transport. let transport = if !memory_only { - let desktop_trans = tcp::TcpConfig::new().nodelay(true); + let tcp_config = tcp::GenTcpConfig::new().nodelay(true); + let desktop_trans = tcp::TcpTransport::new(tcp_config.clone()); let desktop_trans = websocket::WsConfig::new(desktop_trans) - .or_transport(tcp::TcpConfig::new().nodelay(true)); + .or_transport(tcp::TcpTransport::new(tcp_config.clone())); let dns_init = futures::executor::block_on(dns::DnsConfig::system(desktop_trans)); EitherTransport::Left(if let Ok(dns) = dns_init { EitherTransport::Left(dns) } else { - let desktop_trans = tcp::TcpConfig::new().nodelay(true); + let desktop_trans = tcp::TcpTransport::new(tcp_config.clone()); let desktop_trans = websocket::WsConfig::new(desktop_trans) - .or_transport(tcp::TcpConfig::new().nodelay(true)); + .or_transport(tcp::TcpTransport::new(tcp_config)); EitherTransport::Right(desktop_trans.map_err(dns::DnsErr::Transport)) }) } else { diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 9185e812bc6cd..a61f780fd56ad 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ ] } either = "1.5.3" futures = "0.3.21" -libp2p = "0.45.1" +libp2p = "0.46.1" log = "0.4.17" lru = "0.7.5" prost = "0.10" diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 2af1dd8cb9b54..fdb9befc41cb1 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -17,7 +17,7 @@ async-std = "1.11.0" async-trait = "0.1.50" futures = "0.3.21" futures-timer = "3.0.1" -libp2p = { version = "0.45.1", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" parking_lot = "0.12.0" rand = "0.7.2" diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index f150c728613ba..510ebf7006155 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.21" -libp2p = { version = "0.45.1", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" serde_json = "1.0.79" wasm-timer = "0.2" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 9682dc824f930..0d966dacfc2c8 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] chrono = "0.4.19" futures = "0.3.21" -libp2p = { version = "0.45.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } +libp2p = { version = "0.46.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.17" parking_lot = "0.12.0" pin-project = "1.0.10" diff --git a/client/telemetry/src/transport.rs b/client/telemetry/src/transport.rs index e21a2380be255..d64da44a83b6b 100644 --- a/client/telemetry/src/transport.rs +++ b/client/telemetry/src/transport.rs @@ -31,7 +31,8 @@ const CONNECT_TIMEOUT: Duration = Duration::from_secs(20); pub(crate) fn initialize_transport() -> Result { let transport = { - let inner = block_on(libp2p::dns::DnsConfig::system(libp2p::tcp::TcpConfig::new()))?; + let tcp_transport = libp2p::tcp::TcpTransport::new(libp2p::tcp::GenTcpConfig::new()); + let inner = block_on(libp2p::dns::DnsConfig::system(tcp_transport))?; libp2p::websocket::framed::WsConfig::new(inner).and_then(|connec, _| { let connec = connec .with(|item| { From 3c48c61ac6a088901668820a825e2d5823bdea82 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 12 Jul 2022 18:08:54 +0100 Subject: [PATCH 030/135] prep council election pallet for being dissolved (#11790) * prep council election pallet for being dissolved + make it temporarily bounded * fix tests * fix * Update frame/elections-phragmen/src/lib.rs * fix bench? --- Cargo.lock | 1 + frame/elections-phragmen/Cargo.toml | 1 + frame/elections-phragmen/src/benchmarking.rs | 86 ++----------- frame/elections-phragmen/src/lib.rs | 129 ++++++++----------- 4 files changed, 66 insertions(+), 151 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 26f43fa4ae0ba..f98d98a3c571f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5673,6 +5673,7 @@ dependencies = [ "sp-npos-elections", "sp-runtime", "sp-std", + "sp-tracing", "substrate-test-utils", ] diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 0f195f12cce3c..d71a74f76a114 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -30,6 +30,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } sp-core = { version = "6.0.0", path = "../../primitives/core" } +sp-tracing = { path = "../../primitives/tracing" } substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } [features] diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 44dd6aff09f0c..9a9d448449eca 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -22,17 +22,12 @@ use super::*; use frame_benchmarking::{account, benchmarks, whitelist, BenchmarkError, BenchmarkResult}; -use frame_support::{ - dispatch::{DispatchResultWithPostInfo, UnfilteredDispatchable}, - traits::OnInitialize, -}; +use frame_support::{dispatch::DispatchResultWithPostInfo, traits::OnInitialize}; use frame_system::RawOrigin; use crate::Pallet as Elections; const BALANCE_FACTOR: u32 = 250; -const MAX_VOTERS: u32 = 500; -const MAX_CANDIDATES: u32 = 200; type Lookup = <::Lookup as StaticLookup>::Source; @@ -345,38 +340,6 @@ benchmarks! { ))?; } - // -- Root ones - #[extra] // this calls into phragmen and consumes a full block for now. - remove_member_without_replacement_extra { - // worse case is when we remove a member and we have no runner as a replacement. This - // triggers phragmen again. The only parameter is how many candidates will compete for the - // new slot. - let c in 1 .. MAX_CANDIDATES; - clean::(); - - // fill only desired members. no runners-up. - let all_members = fill_seats_up_to::(T::DesiredMembers::get())?; - assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); - - // submit a new one to compensate, with self-vote. - let replacements = submit_candidates_with_self_vote::(c, "new_candidate")?; - - // create some voters for these replacements. - distribute_voters::(replacements, MAX_VOTERS, MAXIMUM_VOTE)?; - - let to_remove = as_lookup::(all_members[0].clone()); - }: remove_member(RawOrigin::Root, to_remove, false) - verify { - // must still have the desired number of members members. - assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); - #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } - } - remove_member_with_replacement { // easy case. We have a runner up. Nothing will have that much of an impact. m will be // number of members and runners. There is always at least one runner. @@ -385,7 +348,7 @@ benchmarks! { let _ = fill_seats_up_to::(m)?; let removing = as_lookup::(>::members_ids()[0].clone()); - }: remove_member(RawOrigin::Root, removing, true) + }: remove_member(RawOrigin::Root, removing, true, false) verify { // must still have enough members. assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); @@ -397,39 +360,6 @@ benchmarks! { } } - remove_member_wrong_refund { - // The root call by mistake indicated that this will have no replacement, while it has! - // this has now consumed a lot of weight and need to refund. - let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); - clean::(); - - let _ = fill_seats_up_to::(m)?; - let removing = as_lookup::(>::members_ids()[0].clone()); - let who = T::Lookup::lookup(removing.clone()).expect("member was added above"); - let call = Call::::remove_member { who: removing, has_replacement: false }.encode(); - }: { - assert_eq!( - as Decode>::decode(&mut &*call) - .expect("call is encoded above, encoding must be correct") - .dispatch_bypass_filter(RawOrigin::Root.into()) - .unwrap_err() - .error, - Error::::InvalidReplacement.into(), - ); - } - verify { - // must still have enough members. - assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); - // on fail, `who` must still be a member - assert!(>::members_ids().contains(&who)); - #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } - } - clean_defunct_voters { // total number of voters. let v in (MAX_VOTERS / 2) .. MAX_VOTERS; @@ -439,7 +369,7 @@ benchmarks! { // remove any previous stuff. clean::(); - let all_candidates = submit_candidates::(v, "candidates")?; + let all_candidates = submit_candidates::(MAX_CANDIDATES, "candidates")?; distribute_voters::(all_candidates, v, MAXIMUM_VOTE)?; // all candidates leave. @@ -474,7 +404,7 @@ benchmarks! { let votes_per_voter = (e / v).min(MAXIMUM_VOTE as u32); let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; - let _ = distribute_voters::(all_candidates, v, votes_per_voter as usize)?; + let _ = distribute_voters::(all_candidates, v.saturating_sub(c), votes_per_voter as usize)?; }: { >::on_initialize(T::TermDuration::get()); } @@ -503,7 +433,11 @@ benchmarks! { let votes_per_voter = e / fixed_v; let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; - let _ = distribute_voters::(all_candidates, fixed_v, votes_per_voter as usize)?; + let _ = distribute_voters::( + all_candidates, + fixed_v - c, + votes_per_voter as usize, + )?; }: { >::on_initialize(T::TermDuration::get()); } @@ -525,7 +459,7 @@ benchmarks! { #[extra] election_phragmen_v { let v in 4 .. 16; - let fixed_c = MAX_CANDIDATES; + let fixed_c = MAX_CANDIDATES / 10; let fixed_e = 64; clean::(); diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index ec2234cde5a6e..28fed28f18e5c 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -100,7 +100,6 @@ use codec::{Decode, Encode}; use frame_support::{ - dispatch::WithPostDispatchInfo, traits::{ defensive_prelude::*, ChangeMembers, Contains, ContainsLengthBound, Currency, CurrencyToVote, Get, InitializeMembers, LockIdentifier, LockableCurrency, OnUnbalanced, @@ -126,6 +125,17 @@ pub mod migrations; /// The maximum votes allowed per voter. pub const MAXIMUM_VOTE: usize = 16; +// Some safe temp values to make the wasm execution sane while we still use this pallet. +#[cfg(test)] +pub(crate) const MAX_CANDIDATES: u32 = 100; +#[cfg(not(test))] +pub(crate) const MAX_CANDIDATES: u32 = 1000; + +#[cfg(test)] +pub(crate) const MAX_VOTERS: u32 = 1000; +#[cfg(not(test))] +pub(crate) const MAX_VOTERS: u32 = 10 * 1000; + type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = <::Currency as Currency< @@ -385,8 +395,9 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let actual_count = >::decode_len().unwrap_or(0); - ensure!(actual_count as u32 <= candidate_count, Error::::InvalidWitnessData); + let actual_count = >::decode_len().unwrap_or(0) as u32; + ensure!(actual_count <= candidate_count, Error::::InvalidWitnessData); + ensure!(actual_count <= MAX_CANDIDATES, Error::::TooManyCandidates); let index = Self::is_candidate(&who).err().ok_or(Error::::DuplicatedCandidate)?; @@ -469,7 +480,11 @@ pub mod pallet { /// the outgoing member is slashed. /// /// If a runner-up is available, then the best runner-up will be removed and replaces the - /// outgoing member. Otherwise, a new phragmen election is started. + /// outgoing member. Otherwise, if `rerun_election` is `true`, a new phragmen election is + /// started, else, nothing happens. + /// + /// If `slash_bond` is set to true, the bond of the member being removed is slashed. Else, + /// it is returned. /// /// The dispatch origin of this call must be root. /// @@ -479,33 +494,24 @@ pub mod pallet { /// If we have a replacement, we use a small weight. Else, since this is a root call and /// will go into phragmen, we assume full block for now. /// # - #[pallet::weight(if *has_replacement { - T::WeightInfo::remove_member_with_replacement() - } else { + #[pallet::weight(if *rerun_election { T::WeightInfo::remove_member_without_replacement() + } else { + T::WeightInfo::remove_member_with_replacement() })] pub fn remove_member( origin: OriginFor, who: ::Source, - has_replacement: bool, + slash_bond: bool, + rerun_election: bool, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; - let will_have_replacement = >::decode_len().map_or(false, |l| l > 0); - if will_have_replacement != has_replacement { - // In both cases, we will change more weight than need. Refund and abort. - return Err(Error::::InvalidReplacement.with_weight( - // refund. The weight value comes from a benchmark which is special to this. - T::WeightInfo::remove_member_wrong_refund(), - )) - } - - let had_replacement = Self::remove_and_replace_member(&who, true)?; - debug_assert_eq!(has_replacement, had_replacement); + let _ = Self::remove_and_replace_member(&who, slash_bond)?; Self::deposit_event(Event::MemberKicked { member: who }); - if !had_replacement { + if rerun_election { Self::do_phragmen(); } @@ -585,10 +591,10 @@ pub mod pallet { UnableToPayBond, /// Must be a voter. MustBeVoter, - /// Cannot report self. - ReportSelf, /// Duplicated candidate submission. DuplicatedCandidate, + /// Too many candidates have been created. + TooManyCandidates, /// Member cannot re-submit candidacy. MemberSubmit, /// Runner cannot re-submit candidacy. @@ -893,7 +899,7 @@ impl Pallet { if candidates_and_deposit.len().is_zero() { Self::deposit_event(Event::EmptyTerm); - return T::DbWeight::get().reads(5) + return T::DbWeight::get().reads(3) } // All of the new winners that come out of phragmen will thus have a deposit recorded. @@ -906,10 +912,28 @@ impl Pallet { let to_balance = |e: ExtendedBalance| T::CurrencyToVote::to_currency(e, total_issuance); let mut num_edges: u32 = 0; + // used for prime election. - let voters_and_stakes = Voting::::iter() - .map(|(voter, Voter { stake, votes, .. })| (voter, stake, votes)) - .collect::>(); + let mut voters_and_stakes = Vec::new(); + match Voting::::iter().try_for_each(|(voter, Voter { stake, votes, .. })| { + if voters_and_stakes.len() < MAX_VOTERS as usize { + voters_and_stakes.push((voter, stake, votes)); + Ok(()) + } else { + Err(()) + } + }) { + Ok(_) => (), + Err(_) => { + log::error!( + target: "runtime::elections-phragmen", + "Failed to run election. Number of voters exceeded", + ); + Self::deposit_event(Event::ElectionError); + return T::DbWeight::get().reads(3 + MAX_VOTERS as u64) + }, + } + // used for phragmen. let voters_and_votes = voters_and_stakes .iter() @@ -1137,7 +1161,7 @@ mod tests { use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, ModuleError, + BuildStorage, }; use substrate_test_utils::assert_eq_uvec; @@ -1321,6 +1345,7 @@ mod tests { self } pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + sp_tracing::try_init_simple(); MEMBERS.with(|m| { *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>() @@ -2494,7 +2519,7 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(3))); assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::remove_member(Origin::root(), 4, false)); + assert_ok!(Elections::remove_member(Origin::root(), 4, true, true)); assert_eq!(balances(&4), (35, 2)); // slashed assert_eq!(Elections::election_rounds(), 2); // new election round @@ -2502,52 +2527,6 @@ mod tests { }); } - #[test] - fn remove_member_should_indicate_replacement() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![4, 5]); - - // no replacement yet. - let unwrapped_error = Elections::remove_member(Origin::root(), 4, true).unwrap_err(); - assert!(matches!( - unwrapped_error.error, - DispatchError::Module(ModuleError { message: Some("InvalidReplacement"), .. }) - )); - assert!(unwrapped_error.post_info.actual_weight.is_some()); - }); - - ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![4, 5]); - assert_eq!(runners_up_ids(), vec![3]); - - // there is a replacement! and this one needs a weight refund. - let unwrapped_error = Elections::remove_member(Origin::root(), 4, false).unwrap_err(); - assert!(matches!( - unwrapped_error.error, - DispatchError::Module(ModuleError { message: Some("InvalidReplacement"), .. }) - )); - assert!(unwrapped_error.post_info.actual_weight.is_some()); - }); - } - #[test] fn seats_should_be_released_when_no_vote() { ExtBuilder::default().build_and_execute(|| { @@ -2684,7 +2663,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![2, 4]); - assert_ok!(Elections::remove_member(Origin::root(), 2, true)); + assert_ok!(Elections::remove_member(Origin::root(), 2, true, false)); assert_eq!(members_ids(), vec![4, 5]); }); } From b0777b4c7f7d8d3c635319d0153953d0a22aa58e Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Tue, 12 Jul 2022 23:34:17 +0300 Subject: [PATCH 031/135] Network sync refactoring (part 4) (#11412) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove direct dependency of `sc-network` on `sc-network-light` * Move `WarpSyncProvider` trait and surrounding data structures into `sc-network-common` * Move `WarpSyncProvider` trait and surrounding data structures into `sc-network-common` * Create `sync` module in `sc-network-common`, create `ChainSync` trait there (not used yet), move a bunch of associated data structures from `sc-network-sync` * Switch from concrete implementation to `ChainSync` trait from `sc-network-common` * Introduce `OpaqueStateRequest`/`OpaqueStateResponse` to remove generics from `StateSync` trait * Introduce `OpaqueBlockRequest`/`OpaqueBlockResponse`, make `scheme` module of `sc-network-sync` private * Surface `sc-network-sync` into `sc-service` and make `sc-network` not depend on it anymore * Remove now unnecessary dependency from `sc-network` * Replace crate links with just text since dependencies are gone now * Remove `warp_sync` re-export from `sc-network-common` * Update copyright in network-related files * Address review comments about documentation * Apply review suggestion * Rename `extra_requests` module to `metrics` Co-authored-by: Bastian Köcher --- Cargo.lock | 13 +- client/beefy/src/tests.rs | 14 +- client/consensus/aura/src/lib.rs | 14 +- client/consensus/babe/src/tests.rs | 15 +- client/finality-grandpa/Cargo.toml | 1 + client/finality-grandpa/src/tests.rs | 20 +- client/finality-grandpa/src/warp_proof.rs | 2 +- client/network/Cargo.toml | 5 +- client/network/common/Cargo.toml | 5 + client/network/common/src/lib.rs | 1 + client/network/common/src/sync.rs | 394 ++++++ .../{sync/src => common/src/sync}/message.rs | 2 +- client/network/common/src/sync/metrics.rs | 25 + client/network/common/src/sync/warp.rs | 94 ++ client/network/src/behaviour.rs | 68 +- client/network/src/bitswap.rs | 5 +- client/network/src/config.rs | 33 +- client/network/src/lib.rs | 8 +- client/network/src/protocol.rs | 310 ++--- client/network/src/protocol/message.rs | 10 +- client/network/src/service.rs | 27 +- client/network/src/service/tests.rs | 24 +- client/network/sync/Cargo.toml | 2 - .../network/sync/src/block_request_handler.rs | 6 +- client/network/sync/src/blocks.rs | 4 +- client/network/sync/src/extra_requests.rs | 11 +- client/network/sync/src/lib.rs | 1112 ++++++++--------- client/network/sync/src/schema.rs | 2 +- client/network/sync/src/state.rs | 10 +- client/network/sync/src/warp.rs | 45 +- .../network/sync/src/warp_request_handler.rs | 47 +- client/network/test/Cargo.toml | 2 + client/network/test/src/lib.rs | 110 +- client/service/Cargo.toml | 4 +- client/service/src/builder.rs | 28 +- 35 files changed, 1359 insertions(+), 1114 deletions(-) create mode 100644 client/network/common/src/sync.rs rename client/network/{sync/src => common/src/sync}/message.rs (99%) create mode 100644 client/network/common/src/sync/metrics.rs create mode 100644 client/network/common/src/sync/warp.rs diff --git a/Cargo.lock b/Cargo.lock index f98d98a3c571f..3b7c315be4390 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8429,6 +8429,7 @@ dependencies = [ "sc-consensus", "sc-keystore", "sc-network", + "sc-network-common", "sc-network-gossip", "sc-network-test", "sc-telemetry", @@ -8553,7 +8554,6 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", - "sp-finality-grandpa", "sp-runtime", "sp-test-primitives", "sp-tracing", @@ -8571,12 +8571,17 @@ dependencies = [ name = "sc-network-common" version = "0.10.0-dev" dependencies = [ + "bitflags", "futures", "libp2p", "parity-scale-codec", "prost-build", + "sc-consensus", "sc-peerset", "smallvec", + "sp-consensus", + "sp-finality-grandpa", + "sp-runtime", ] [[package]] @@ -8621,8 +8626,6 @@ dependencies = [ name = "sc-network-sync" version = "0.10.0-dev" dependencies = [ - "bitflags", - "either", "fork-tree", "futures", "libp2p", @@ -8667,6 +8670,8 @@ dependencies = [ "sc-consensus", "sc-network", "sc-network-common", + "sc-network-light", + "sc-network-sync", "sc-service", "sp-blockchain", "sp-consensus", @@ -8849,6 +8854,8 @@ dependencies = [ "sc-keystore", "sc-network", "sc-network-common", + "sc-network-light", + "sc-network-sync", "sc-offchain", "sc-rpc", "sc-rpc-server", diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index b5ff27c808908..8090c425e71db 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -28,7 +28,6 @@ use sc_chain_spec::{ChainSpec, GenericChainSpec}; use sc_client_api::HeaderBackend; use sc_consensus::BoxJustificationImport; use sc_keystore::LocalKeystore; -use sc_network::config::ProtocolConfig; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, PassThroughVerifier, Peer, PeersClient, TestNetFactory, @@ -111,6 +110,7 @@ pub(crate) struct PeerData { pub(crate) beefy_link_half: Mutex>, } +#[derive(Default)] pub(crate) struct BeefyTestNet { peers: Vec, } @@ -166,17 +166,7 @@ impl TestNetFactory for BeefyTestNet { type BlockImport = PeersClient; type PeerData = PeerData; - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - BeefyTestNet { peers: Vec::new() } - } - - fn make_verifier( - &self, - _client: PeersClient, - _cfg: &ProtocolConfig, - _: &PeerData, - ) -> Self::Verifier { + fn make_verifier(&self, _client: PeersClient, _: &PeerData) -> Self::Verifier { PassThroughVerifier::new(false) // use non-instant finality. } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index ac3b89f2ff9a2..ee8be727dcdac 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -566,7 +566,6 @@ mod tests { use sc_consensus::BoxJustificationImport; use sc_consensus_slots::{BackoffAuthoringOnFinalizedHeadLagging, SimpleSlotWorker}; use sc_keystore::LocalKeystore; - use sc_network::config::ProtocolConfig; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::AURA; use sp_consensus::{ @@ -645,6 +644,7 @@ mod tests { >; type AuraPeer = Peer<(), PeersClient>; + #[derive(Default)] pub struct AuraTestNet { peers: Vec, } @@ -654,17 +654,7 @@ mod tests { type PeerData = (); type BlockImport = PeersClient; - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - AuraTestNet { peers: Vec::new() } - } - - fn make_verifier( - &self, - client: PeersClient, - _cfg: &ProtocolConfig, - _peer_data: &(), - ) -> Self::Verifier { + fn make_verifier(&self, client: PeersClient, _peer_data: &()) -> Self::Verifier { let client = client.as_client(); let slot_duration = slot_duration(&*client).expect("slot duration available"); diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index e0590fc0cd86e..c0a7a8c6c013a 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -29,7 +29,6 @@ use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer}; use sc_consensus::{BoxBlockImport, BoxJustificationImport}; use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_keystore::LocalKeystore; -use sc_network::config::ProtocolConfig; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::BABE; use sp_consensus::{AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal}; @@ -220,6 +219,7 @@ where type BabePeer = Peer, BabeBlockImport>; +#[derive(Default)] pub struct BabeTestNet { peers: Vec, } @@ -278,12 +278,6 @@ impl TestNetFactory for BabeTestNet { type PeerData = Option; type BlockImport = BabeBlockImport; - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - debug!(target: "babe", "Creating test network from config"); - BabeTestNet { peers: Vec::new() } - } - fn make_block_import( &self, client: PeersClient, @@ -309,12 +303,7 @@ impl TestNetFactory for BabeTestNet { ) } - fn make_verifier( - &self, - client: PeersClient, - _cfg: &ProtocolConfig, - maybe_link: &Option, - ) -> Self::Verifier { + fn make_verifier(&self, client: PeersClient, maybe_link: &Option) -> Self::Verifier { use substrate_test_runtime_client::DefaultTestClientBuilderExt; let client = client.as_client(); diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 77cd847d48169..a5f20b9f3261d 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -36,6 +36,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 2d12232b04f15..623ac577c5579 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -28,7 +28,7 @@ use sc_consensus::{ BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, ImportedAux, }; -use sc_network::config::{ProtocolConfig, Role}; +use sc_network::config::Role; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, TestClient, TestNetFactory, @@ -73,6 +73,7 @@ type GrandpaBlockImport = crate::GrandpaBlockImport< LongestChain, >; +#[derive(Default)] struct GrandpaTestNet { peers: Vec, test_config: TestApi, @@ -110,16 +111,6 @@ impl TestNetFactory for GrandpaTestNet { type PeerData = PeerData; type BlockImport = GrandpaBlockImport; - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - GrandpaTestNet { peers: Vec::new(), test_config: Default::default() } - } - - fn default_config() -> ProtocolConfig { - // This is unused. - ProtocolConfig::default() - } - fn add_full_peer(&mut self) { self.add_full_peer_with_config(FullPeerConfig { notifications_protocols: vec![grandpa_protocol_name::NAME.into()], @@ -128,12 +119,7 @@ impl TestNetFactory for GrandpaTestNet { }) } - fn make_verifier( - &self, - _client: PeersClient, - _cfg: &ProtocolConfig, - _: &PeerData, - ) -> Self::Verifier { + fn make_verifier(&self, _client: PeersClient, _: &PeerData) -> Self::Verifier { PassThroughVerifier::new(false) // use non-instant finality. } diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index 90f6828a1105d..a31a0a8b91908 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -23,7 +23,7 @@ use crate::{ BlockNumberOps, GrandpaJustification, SharedAuthoritySet, }; use sc_client_api::Backend as ClientBackend; -use sc_network::warp_request_handler::{EncodedProof, VerificationResult, WarpSyncProvider}; +use sc_network_common::sync::warp::{EncodedProof, VerificationResult, WarpSyncProvider}; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_finality_grandpa::{AuthorityList, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::{ diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index dfd2db2e6b844..2742262b57e40 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -51,15 +51,12 @@ sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-network-common = { version = "0.10.0-dev", path = "./common" } -sc-network-light = { version = "0.10.0-dev", path = "./light" } -sc-network-sync = { version = "0.10.0-dev", path = "./sync" } sc-peerset = { version = "4.0.0-dev", path = "../peerset" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-arithmetic = { version = "5.0.0", path = "../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } sp-core = { version = "6.0.0", path = "../../primitives/core" } -sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] @@ -67,6 +64,8 @@ assert_matches = "1.3" async-std = "1.11.0" rand = "0.7.2" tempfile = "3.1.0" +sc-network-light = { version = "0.10.0-dev", path = "./light" } +sc-network-sync = { version = "0.10.0-dev", path = "./sync" } sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index e69787d7aff77..b0e3a8fe42a83 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -17,10 +17,15 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] +bitflags = "1.3.2" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" libp2p = "0.46.1" smallvec = "1.8.0" +sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } +sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } diff --git a/client/network/common/src/lib.rs b/client/network/common/src/lib.rs index 81769e23debbb..9fbedc542c123 100644 --- a/client/network/common/src/lib.rs +++ b/client/network/common/src/lib.rs @@ -21,3 +21,4 @@ pub mod config; pub mod message; pub mod request_responses; +pub mod sync; diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs new file mode 100644 index 0000000000000..2ee8f8c51814b --- /dev/null +++ b/client/network/common/src/sync.rs @@ -0,0 +1,394 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Abstract interfaces and data structures related to network sync. + +pub mod message; +pub mod metrics; +pub mod warp; + +use libp2p::PeerId; +use message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}; +use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; +use sp_consensus::BlockOrigin; +use sp_runtime::{ + traits::{Block as BlockT, NumberFor}, + Justifications, +}; +use std::{any::Any, fmt, fmt::Formatter, task::Poll}; +use warp::{EncodedProof, WarpProofRequest, WarpSyncProgress}; + +/// The sync status of a peer we are trying to sync with +#[derive(Debug)] +pub struct PeerInfo { + /// Their best block hash. + pub best_hash: Block::Hash, + /// Their best block number. + pub best_number: NumberFor, +} + +/// Reported sync state. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum SyncState { + /// Initial sync is complete, keep-up sync is active. + Idle, + /// Actively catching up with the chain. + Downloading, +} + +/// Reported state download progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct StateDownloadProgress { + /// Estimated download percentage. + pub percentage: u32, + /// Total state size in bytes downloaded so far. + pub size: u64, +} + +/// Syncing status and statistics. +#[derive(Clone)] +pub struct SyncStatus { + /// Current global sync state. + pub state: SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_peers: u32, + /// Number of blocks queued for import + pub queued_blocks: u32, + /// State sync status in progress, if any. + pub state_sync: Option, + /// Warp sync in progress, if any. + pub warp_sync: Option>, +} + +/// A peer did not behave as expected and should be reported. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BadPeer(pub PeerId, pub sc_peerset::ReputationChange); + +impl fmt::Display for BadPeer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) + } +} + +impl std::error::Error for BadPeer {} + +/// Result of [`ChainSync::on_block_data`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OnBlockData { + /// The block should be imported. + Import(BlockOrigin, Vec>), + /// A new block request needs to be made to the given peer. + Request(PeerId, BlockRequest), +} + +/// Result of [`ChainSync::on_block_justification`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OnBlockJustification { + /// The justification needs no further handling. + Nothing, + /// The justification should be imported. + Import { + peer: PeerId, + hash: Block::Hash, + number: NumberFor, + justifications: Justifications, + }, +} + +/// Result of [`ChainSync::on_state_data`]. +#[derive(Debug)] +pub enum OnStateData { + /// The block and state that should be imported. + Import(BlockOrigin, IncomingBlock), + /// A new state request needs to be made to the given peer. + Continue, +} + +/// Result of [`ChainSync::poll_block_announce_validation`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PollBlockAnnounceValidation { + /// The announcement failed at validation. + /// + /// The peer reputation should be decreased. + Failure { + /// Who sent the processed block announcement? + who: PeerId, + /// Should the peer be disconnected? + disconnect: bool, + }, + /// The announcement does not require further handling. + Nothing { + /// Who sent the processed block announcement? + who: PeerId, + /// Was this their new best block? + is_best: bool, + /// The announcement. + announce: BlockAnnounce, + }, + /// The announcement header should be imported. + ImportHeader { + /// Who sent the processed block announcement? + who: PeerId, + /// Was this their new best block? + is_best: bool, + /// The announcement. + announce: BlockAnnounce, + }, + /// The block announcement should be skipped. + Skip, +} + +/// Operation mode. +#[derive(Debug, PartialEq, Eq)] +pub enum SyncMode { + // Sync headers only + Light, + // Sync headers and block bodies + Full, + // Sync headers and the last finalied state + LightState { storage_chain_mode: bool, skip_proofs: bool }, + // Warp sync mode. + Warp, +} + +#[derive(Debug)] +pub struct Metrics { + pub queued_blocks: u32, + pub fork_targets: u32, + pub justifications: metrics::Metrics, +} + +/// Wrapper for implementation-specific state request. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueStateRequest(pub Box); + +impl fmt::Debug for OpaqueStateRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueStateRequest").finish() + } +} + +/// Wrapper for implementation-specific state response. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueStateResponse(pub Box); + +impl fmt::Debug for OpaqueStateResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueStateResponse").finish() + } +} + +/// Wrapper for implementation-specific block request. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueBlockRequest(pub Box); + +impl fmt::Debug for OpaqueBlockRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueBlockRequest").finish() + } +} + +/// Wrapper for implementation-specific block response. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueBlockResponse(pub Box); + +impl fmt::Debug for OpaqueBlockResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueBlockResponse").finish() + } +} + +/// Something that represents the syncing strategy to download past and future blocks of the chain. +pub trait ChainSync: Send { + /// Returns the state of the sync of the given peer. + /// + /// Returns `None` if the peer is unknown. + fn peer_info(&self, who: &PeerId) -> Option>; + + /// Returns the current sync status. + fn status(&self) -> SyncStatus; + + /// Number of active forks requests. This includes + /// requests that are pending or could be issued right away. + fn num_sync_requests(&self) -> usize; + + /// Number of downloaded blocks. + fn num_downloaded_blocks(&self) -> usize; + + /// Returns the current number of peers stored within this state machine. + fn num_peers(&self) -> usize; + + /// Handle a new connected peer. + /// + /// Call this method whenever we connect to a new peer. + fn new_peer( + &mut self, + who: PeerId, + best_hash: Block::Hash, + best_number: NumberFor, + ) -> Result>, BadPeer>; + + /// Signal that a new best block has been imported. + fn update_chain_info(&mut self, best_hash: &Block::Hash, best_number: NumberFor); + + /// Schedule a justification request for the given block. + fn request_justification(&mut self, hash: &Block::Hash, number: NumberFor); + + /// Clear all pending justification requests. + fn clear_justification_requests(&mut self); + + /// Request syncing for the given block from given set of peers. + fn set_sync_fork_request( + &mut self, + peers: Vec, + hash: &Block::Hash, + number: NumberFor, + ); + + /// Get an iterator over all scheduled justification requests. + fn justification_requests( + &mut self, + ) -> Box)> + '_>; + + /// Get an iterator over all block requests of all peers. + fn block_requests(&mut self) -> Box)> + '_>; + + /// Get a state request, if any. + fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)>; + + /// Get a warp sync request, if any. + fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)>; + + /// Handle a response from the remote to a block request that we made. + /// + /// `request` must be the original request that triggered `response`. + /// or `None` if data comes from the block announcement. + /// + /// If this corresponds to a valid block, this outputs the block that + /// must be imported in the import queue. + fn on_block_data( + &mut self, + who: &PeerId, + request: Option>, + response: BlockResponse, + ) -> Result, BadPeer>; + + /// Handle a response from the remote to a state request that we made. + fn on_state_data( + &mut self, + who: &PeerId, + response: OpaqueStateResponse, + ) -> Result, BadPeer>; + + /// Handle a response from the remote to a warp proof request that we made. + fn on_warp_sync_data(&mut self, who: &PeerId, response: EncodedProof) -> Result<(), BadPeer>; + + /// Handle a response from the remote to a justification request that we made. + /// + /// `request` must be the original request that triggered `response`. + fn on_block_justification( + &mut self, + who: PeerId, + response: BlockResponse, + ) -> Result, BadPeer>; + + /// A batch of blocks have been processed, with or without errors. + /// + /// Call this when a batch of blocks have been processed by the import + /// queue, with or without errors. + fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, Block::Hash)>, + ) -> Box), BadPeer>>>; + + /// Call this when a justification has been processed by the import queue, + /// with or without errors. + fn on_justification_import( + &mut self, + hash: Block::Hash, + number: NumberFor, + success: bool, + ); + + /// Notify about finalization of the given block. + fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); + + /// Push a block announce validation. + /// + /// It is required that [`ChainSync::poll_block_announce_validation`] is called + /// to check for finished block announce validations. + fn push_block_announce_validation( + &mut self, + who: PeerId, + hash: Block::Hash, + announce: BlockAnnounce, + is_best: bool, + ); + + /// Poll block announce validation. + /// + /// Block announce validations can be pushed by using + /// [`ChainSync::push_block_announce_validation`]. + /// + /// This should be polled until it returns [`Poll::Pending`]. + /// + /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to + /// import passed header (call `on_block_data`). The network request isn't sent in this case. + fn poll_block_announce_validation( + &mut self, + cx: &mut std::task::Context, + ) -> Poll>; + + /// Call when a peer has disconnected. + /// Canceled obsolete block request may result in some blocks being ready for + /// import, so this functions checks for such blocks and returns them. + fn peer_disconnected(&mut self, who: &PeerId) -> Option>; + + /// Return some key metrics. + fn metrics(&self) -> Metrics; + + /// Create implementation-specific block request. + fn create_opaque_block_request(&self, request: &BlockRequest) -> OpaqueBlockRequest; + + /// Encode implementation-specific block request. + fn encode_block_request(&self, request: &OpaqueBlockRequest) -> Result, String>; + + /// Decode implementation-specific block response. + fn decode_block_response(&self, response: &[u8]) -> Result; + + /// Access blocks from implementation-specific block response. + fn block_response_into_blocks( + &self, + request: &BlockRequest, + response: OpaqueBlockResponse, + ) -> Result>, String>; + + /// Encode implementation-specific state request. + fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String>; + + /// Decode implementation-specific state response. + fn decode_state_response(&self, response: &[u8]) -> Result; +} diff --git a/client/network/sync/src/message.rs b/client/network/common/src/sync/message.rs similarity index 99% rename from client/network/sync/src/message.rs rename to client/network/common/src/sync/message.rs index 996ee5231cf2e..27ab2704e6471 100644 --- a/client/network/sync/src/message.rs +++ b/client/network/common/src/sync/message.rs @@ -124,8 +124,8 @@ impl BlockAnnounce { /// Generic types. pub mod generic { use super::{BlockAttributes, BlockState, Direction}; + use crate::message::RequestId; use codec::{Decode, Encode, Input, Output}; - use sc_network_common::message::RequestId; use sp_runtime::{EncodedJustification, Justifications}; /// Block data sent in the response. diff --git a/client/network/common/src/sync/metrics.rs b/client/network/common/src/sync/metrics.rs new file mode 100644 index 0000000000000..15ff090a8ccac --- /dev/null +++ b/client/network/common/src/sync/metrics.rs @@ -0,0 +1,25 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[derive(Debug)] +pub struct Metrics { + pub pending_requests: u32, + pub active_requests: u32, + pub importing_requests: u32, + pub failed_requests: u32, +} diff --git a/client/network/common/src/sync/warp.rs b/client/network/common/src/sync/warp.rs new file mode 100644 index 0000000000000..339a4c33a7eeb --- /dev/null +++ b/client/network/common/src/sync/warp.rs @@ -0,0 +1,94 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use codec::{Decode, Encode}; +pub use sp_finality_grandpa::{AuthorityList, SetId}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::fmt; + +/// Scale-encoded warp sync proof response. +pub struct EncodedProof(pub Vec); + +/// Warp sync request +#[derive(Encode, Decode, Debug)] +pub struct WarpProofRequest { + /// Start collecting proofs from this block. + pub begin: B::Hash, +} + +/// Proof verification result. +pub enum VerificationResult { + /// Proof is valid, but the target was not reached. + Partial(SetId, AuthorityList, Block::Hash), + /// Target finality is proved. + Complete(SetId, AuthorityList, Block::Header), +} + +/// Warp sync backend. Handles retrieveing and verifying warp sync proofs. +pub trait WarpSyncProvider: Send + Sync { + /// Generate proof starting at given block hash. The proof is accumulated until maximum proof + /// size is reached. + fn generate( + &self, + start: Block::Hash, + ) -> Result>; + /// Verify warp proof against current set of authorities. + fn verify( + &self, + proof: &EncodedProof, + set_id: SetId, + authorities: AuthorityList, + ) -> Result, Box>; + /// Get current list of authorities. This is supposed to be genesis authorities when starting + /// sync. + fn current_authorities(&self) -> AuthorityList; +} + +/// Reported warp sync phase. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum WarpSyncPhase { + /// Waiting for peers to connect. + AwaitingPeers, + /// Downloading and verifying grandpa warp proofs. + DownloadingWarpProofs, + /// Downloading state data. + DownloadingState, + /// Importing state. + ImportingState, + /// Downloading block history. + DownloadingBlocks(NumberFor), +} + +impl fmt::Display for WarpSyncPhase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::AwaitingPeers => write!(f, "Waiting for peers"), + Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), + Self::DownloadingState => write!(f, "Downloading state"), + Self::ImportingState => write!(f, "Importing state"), + Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), + } + } +} + +/// Reported warp sync progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct WarpSyncProgress { + /// Estimated download percentage. + pub phase: WarpSyncPhase, + /// Total bytes downloaded so far. + pub total_bytes: u64, +} diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 091dd116e4c9c..515608df13d0f 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -38,7 +38,7 @@ use libp2p::{ NetworkBehaviour, }; use log::debug; -use prost::Message; + use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::import_queue::{IncomingBlock, Origin}; use sc_network_common::{config::ProtocolId, request_responses::ProtocolConfig}; @@ -382,42 +382,44 @@ where .events .push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), CustomMessageOutcome::BlockRequest { target, request, pending_response } => { - let mut buf = Vec::with_capacity(request.encoded_len()); - if let Err(err) = request.encode(&mut buf) { - log::warn!( - target: "sync", - "Failed to encode block request {:?}: {:?}", - request, err - ); - return + match self.substrate.encode_block_request(&request) { + Ok(data) => { + self.request_responses.send_request( + &target, + &self.block_request_protocol_name, + data, + pending_response, + IfDisconnected::ImmediateError, + ); + }, + Err(err) => { + log::warn!( + target: "sync", + "Failed to encode block request {:?}: {:?}", + request, err + ); + }, } - - self.request_responses.send_request( - &target, - &self.block_request_protocol_name, - buf, - pending_response, - IfDisconnected::ImmediateError, - ); }, CustomMessageOutcome::StateRequest { target, request, pending_response } => { - let mut buf = Vec::with_capacity(request.encoded_len()); - if let Err(err) = request.encode(&mut buf) { - log::warn!( - target: "sync", - "Failed to encode state request {:?}: {:?}", - request, err - ); - return + match self.substrate.encode_state_request(&request) { + Ok(data) => { + self.request_responses.send_request( + &target, + &self.state_request_protocol_name, + data, + pending_response, + IfDisconnected::ImmediateError, + ); + }, + Err(err) => { + log::warn!( + target: "sync", + "Failed to encode state request {:?}: {:?}", + request, err + ); + }, } - - self.request_responses.send_request( - &target, - &self.state_request_protocol_name, - buf, - pending_response, - IfDisconnected::ImmediateError, - ); }, CustomMessageOutcome::WarpSyncRequest { target, request, pending_response } => match &self.warp_sync_protocol_name { diff --git a/client/network/src/bitswap.rs b/client/network/src/bitswap.rs index d5039faaca113..2dab45adc5618 100644 --- a/client/network/src/bitswap.rs +++ b/client/network/src/bitswap.rs @@ -1,4 +1,4 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. +// Copyright 2022 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify @@ -118,8 +118,7 @@ where fn upgrade_outbound(self, mut socket: TSocket, _info: Self::Info) -> Self::Future { Box::pin(async move { - let mut data = Vec::with_capacity(self.encoded_len()); - self.encode(&mut data)?; + let data = self.encode_to_vec(); upgrade::write_length_prefixed(&mut socket, data).await }) } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index e44977e5be6b3..430efd697a18c 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -26,16 +26,11 @@ pub use sc_network_common::{ request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, + sync::warp::WarpSyncProvider, }; -pub use sc_network_sync::warp_request_handler::WarpSyncProvider; pub use libp2p::{build_multiaddr, core::PublicKey, identity}; -// Note: this re-export shouldn't be part of the public API of the crate and will be removed in -// the future. -#[doc(hidden)] -pub use crate::protocol::ProtocolConfig; - use crate::ExHashT; use core::{fmt, iter}; @@ -46,7 +41,7 @@ use libp2p::{ }; use prometheus_endpoint::Registry; use sc_consensus::ImportQueue; -use sp_consensus::block_validation::BlockAnnounceValidator; +use sc_network_common::sync::ChainSync; use sp_runtime::traits::Block as BlockT; use std::{ borrow::Cow, @@ -101,8 +96,14 @@ where /// valid. pub import_queue: Box>, - /// Type to check incoming block announcements. - pub block_announce_validator: Box + Send>, + /// Factory function that creates a new instance of chain sync. + pub create_chain_sync: Box< + dyn FnOnce( + sc_network_common::sync::SyncMode, + Arc, + Option>>, + ) -> crate::error::Result>>, + >, /// Registry for recording prometheus metrics to. pub metrics_registry: Option, @@ -114,26 +115,26 @@ where /// block requests, if enabled. /// /// Can be constructed either via - /// [`sc_network_sync::block_request_handler::generate_protocol_config`] allowing outgoing but - /// not incoming requests, or constructed via [`sc_network_sync::block_request_handler:: - /// BlockRequestHandler::new`] allowing both outgoing and incoming requests. + /// `sc_network_sync::block_request_handler::generate_protocol_config` allowing outgoing but + /// not incoming requests, or constructed via `sc_network_sync::block_request_handler:: + /// BlockRequestHandler::new` allowing both outgoing and incoming requests. pub block_request_protocol_config: RequestResponseConfig, /// Request response configuration for the light client request protocol. /// /// Can be constructed either via - /// [`sc_network_light::light_client_requests::generate_protocol_config`] allowing outgoing but + /// `sc_network_light::light_client_requests::generate_protocol_config` allowing outgoing but /// not incoming requests, or constructed via - /// [`sc_network_light::light_client_requests::handler::LightClientRequestHandler::new`] + /// `sc_network_light::light_client_requests::handler::LightClientRequestHandler::new` /// allowing both outgoing and incoming requests. pub light_client_request_protocol_config: RequestResponseConfig, /// Request response configuration for the state request protocol. /// /// Can be constructed either via - /// [`sc_network_sync::block_request_handler::generate_protocol_config`] allowing outgoing but + /// `sc_network_sync::state_request_handler::generate_protocol_config` allowing outgoing but /// not incoming requests, or constructed via - /// [`crate::state_request_handler::StateRequestHandler::new`] allowing + /// `sc_network_sync::state_request_handler::StateRequestHandler::new` allowing /// both outgoing and incoming requests. pub state_request_protocol_config: RequestResponseConfig, diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index fff30550eb8c9..83bc1075b8bad 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -266,13 +266,9 @@ pub use protocol::{ event::{DhtEvent, Event, ObservedRole}, PeerInfo, }; -pub use sc_network_light::light_client_requests; -pub use sc_network_sync::{ - block_request_handler, - state::StateDownloadProgress, - state_request_handler, +pub use sc_network_common::sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, - warp_request_handler, SyncState, + StateDownloadProgress, SyncState, }; pub use service::{ DecodingError, IfDisconnected, KademliaKey, Keypair, NetworkService, NetworkWorker, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 348d2d0bf8877..3698a6b936ed5 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -20,7 +20,6 @@ use crate::{ config, error, request_responses::RequestFailure, utils::{interval, LruHashSet}, - warp_request_handler::{EncodedProof, WarpSyncProvider}, }; use bytes::Bytes; @@ -42,21 +41,23 @@ use message::{ }; use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; -use prost::Message as _; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::{BlockImportError, BlockImportStatus, IncomingBlock, Origin}; -use sc_network_common::config::ProtocolId; -use sc_network_sync::{ - message::{ - BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, BlockState, - FromBlock, +use sc_network_common::{ + config::ProtocolId, + sync::{ + message::{ + BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, BlockState, + }, + warp::{EncodedProof, WarpProofRequest}, + BadPeer, ChainSync, OnBlockData, OnBlockJustification, OnStateData, OpaqueBlockRequest, + OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PollBlockAnnounceValidation, + SyncStatus, }, - schema::v1::StateResponse, - BadPeer, ChainSync, OnBlockData, OnBlockJustification, OnStateData, - PollBlockAnnounceValidation, Status as SyncStatus, }; use sp_arithmetic::traits::SaturatedConversion; -use sp_consensus::{block_validation::BlockAnnounceValidator, BlockOrigin}; +use sp_blockchain::HeaderMetadata; +use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, @@ -79,7 +80,6 @@ pub mod event; pub mod message; pub use notifications::{NotificationsSink, NotifsHandlerError, Ready}; -use sp_blockchain::HeaderMetadata; /// Interval at which we perform time based maintenance const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); @@ -167,11 +167,12 @@ pub struct Protocol { tick_timeout: Pin + Send>>, /// Pending list of messages to return from `poll` as a priority. pending_messages: VecDeque>, - config: ProtocolConfig, + /// Assigned roles. + roles: Roles, genesis_hash: B::Hash, /// State machine that handles the list of in-progress requests. Only full node peers are /// registered. - sync: ChainSync, + chain_sync: Box>, // All connected peers. Contains both full and light node peers. peers: HashMap>, chain: Arc, @@ -231,38 +232,6 @@ pub struct PeerInfo { pub best_number: ::Number, } -/// Configuration for the Substrate-specific part of the networking layer. -#[derive(Clone)] -pub struct ProtocolConfig { - /// Assigned roles. - pub roles: Roles, - /// Maximum number of peers to ask the same blocks in parallel. - pub max_parallel_downloads: u32, - /// Enable state sync. - pub sync_mode: config::SyncMode, -} - -impl ProtocolConfig { - fn sync_mode(&self) -> sc_network_sync::SyncMode { - if self.roles.is_light() { - sc_network_sync::SyncMode::Light - } else { - match self.sync_mode { - config::SyncMode::Full => sc_network_sync::SyncMode::Full, - config::SyncMode::Fast { skip_proofs, storage_chain_mode } => - sc_network_sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, - config::SyncMode::Warp => sc_network_sync::SyncMode::Warp, - } - } - } -} - -impl Default for ProtocolConfig { - fn default() -> ProtocolConfig { - Self { roles: Roles::FULL, max_parallel_downloads: 5, sync_mode: config::SyncMode::Full } - } -} - /// Handshake sent when we open a block announces substream. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] struct BlockAnnouncesHandshake { @@ -278,12 +247,12 @@ struct BlockAnnouncesHandshake { impl BlockAnnouncesHandshake { fn build( - protocol_config: &ProtocolConfig, + roles: Roles, best_number: NumberFor, best_hash: B::Hash, genesis_hash: B::Hash, ) -> Self { - Self { genesis_hash, roles: protocol_config.roles, best_number, best_hash } + Self { genesis_hash, roles, best_number, best_hash } } } @@ -300,24 +269,15 @@ where { /// Create a new instance. pub fn new( - config: ProtocolConfig, + roles: Roles, chain: Arc, protocol_id: ProtocolId, network_config: &config::NetworkConfiguration, notifications_protocols_handshakes: Vec>, - block_announce_validator: Box + Send>, metrics_registry: Option<&Registry>, - warp_sync_provider: Option>>, - ) -> error::Result<(Protocol, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { + chain_sync: Box>, + ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); - let sync = ChainSync::new( - config.sync_mode(), - chain.clone(), - block_announce_validator, - config.max_parallel_downloads, - warp_sync_provider, - ) - .map_err(Box::new)?; let boot_node_ids = { let mut list = HashSet::new(); @@ -405,7 +365,7 @@ where let genesis_hash = info.genesis_hash; let block_announces_handshake = - BlockAnnouncesHandshake::::build(&config, best_number, best_hash, genesis_hash) + BlockAnnouncesHandshake::::build(roles, best_number, best_hash, genesis_hash) .encode(); let sync_protocol_config = notifications::ProtocolConfig { @@ -438,11 +398,11 @@ where let protocol = Self { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), pending_messages: VecDeque::new(), - config, + roles, peers: HashMap::new(), chain, genesis_hash: info.genesis_hash, - sync, + chain_sync, important_peers, default_peers_set_num_full: network_config.default_peers_set_num_full as usize, default_peers_set_num_light: { @@ -510,49 +470,49 @@ where /// Current global sync state. pub fn sync_state(&self) -> SyncStatus { - self.sync.status() + self.chain_sync.status() } /// Target sync block number. pub fn best_seen_block(&self) -> Option> { - self.sync.status().best_seen_block + self.chain_sync.status().best_seen_block } /// Number of peers participating in syncing. pub fn num_sync_peers(&self) -> u32 { - self.sync.status().num_peers + self.chain_sync.status().num_peers } /// Number of blocks in the import queue. pub fn num_queued_blocks(&self) -> u32 { - self.sync.status().queued_blocks + self.chain_sync.status().queued_blocks } /// Number of downloaded blocks. pub fn num_downloaded_blocks(&self) -> usize { - self.sync.num_downloaded_blocks() + self.chain_sync.num_downloaded_blocks() } /// Number of active sync requests. pub fn num_sync_requests(&self) -> usize { - self.sync.num_sync_requests() + self.chain_sync.num_sync_requests() } /// Inform sync about new best imported block. pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { debug!(target: "sync", "New best block imported {:?}/#{}", hash, number); - self.sync.update_chain_info(&hash, number); + self.chain_sync.update_chain_info(&hash, number); self.behaviour.set_notif_protocol_handshake( HARDCODED_PEERSETS_SYNC, - BlockAnnouncesHandshake::::build(&self.config, number, hash, self.genesis_hash) + BlockAnnouncesHandshake::::build(self.roles, number, hash, self.genesis_hash) .encode(), ); } fn update_peer_info(&mut self, who: &PeerId) { - if let Some(info) = self.sync.peer_info(who) { + if let Some(info) = self.chain_sync.peer_info(who) { if let Some(ref mut peer) = self.peers.get_mut(who) { peer.info.best_hash = info.best_hash; peer.info.best_number = info.best_number; @@ -565,14 +525,6 @@ where self.peers.iter().map(|(id, peer)| (id, &peer.info)) } - fn prepare_block_request( - &mut self, - who: PeerId, - request: BlockRequest, - ) -> CustomMessageOutcome { - prepare_block_request::(&mut self.peers, who, request) - } - /// Called by peer when it is disconnecting. /// /// Returns a result if the handshake of this peer was indeed accepted. @@ -584,7 +536,9 @@ where } if let Some(_peer_data) = self.peers.remove(&peer) { - if let Some(OnBlockData::Import(origin, blocks)) = self.sync.peer_disconnected(&peer) { + if let Some(OnBlockData::Import(origin, blocks)) = + self.chain_sync.peer_disconnected(&peer) + { self.pending_messages .push_back(CustomMessageOutcome::BlockImport(origin, blocks)); } @@ -605,62 +559,9 @@ where &mut self, peer_id: PeerId, request: BlockRequest, - response: sc_network_sync::schema::v1::BlockResponse, + response: OpaqueBlockResponse, ) -> CustomMessageOutcome { - let blocks = response - .blocks - .into_iter() - .map(|block_data| { - Ok(BlockData:: { - hash: Decode::decode(&mut block_data.hash.as_ref())?, - header: if !block_data.header.is_empty() { - Some(Decode::decode(&mut block_data.header.as_ref())?) - } else { - None - }, - body: if request.fields.contains(BlockAttributes::BODY) { - Some( - block_data - .body - .iter() - .map(|body| Decode::decode(&mut body.as_ref())) - .collect::, _>>()?, - ) - } else { - None - }, - indexed_body: if request.fields.contains(BlockAttributes::INDEXED_BODY) { - Some(block_data.indexed_body) - } else { - None - }, - receipt: if !block_data.receipt.is_empty() { - Some(block_data.receipt) - } else { - None - }, - message_queue: if !block_data.message_queue.is_empty() { - Some(block_data.message_queue) - } else { - None - }, - justification: if !block_data.justification.is_empty() { - Some(block_data.justification) - } else if block_data.is_empty_justification { - Some(Vec::new()) - } else { - None - }, - justifications: if !block_data.justifications.is_empty() { - Some(DecodeAll::decode_all(&mut block_data.justifications.as_ref())?) - } else { - None - }, - }) - }) - .collect::, codec::Error>>(); - - let blocks = match blocks { + let blocks = match self.chain_sync.block_response_into_blocks(&request, response) { Ok(blocks) => blocks, Err(err) => { debug!(target: "sync", "Failed to decode block response from {}: {}", peer_id, err); @@ -690,7 +591,7 @@ where ); if request.fields == BlockAttributes::JUSTIFICATION { - match self.sync.on_block_justification(peer_id, block_response) { + match self.chain_sync.on_block_justification(peer_id, block_response) { Ok(OnBlockJustification::Nothing) => CustomMessageOutcome::None, Ok(OnBlockJustification::Import { peer, hash, number, justifications }) => CustomMessageOutcome::JustificationImport(peer, hash, number, justifications), @@ -701,10 +602,11 @@ where }, } } else { - match self.sync.on_block_data(&peer_id, Some(request), block_response) { + match self.chain_sync.on_block_data(&peer_id, Some(request), block_response) { Ok(OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), - Ok(OnBlockData::Request(peer, req)) => self.prepare_block_request(peer, req), + Ok(OnBlockData::Request(peer, req)) => + prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req), Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -719,9 +621,9 @@ where pub fn on_state_response( &mut self, peer_id: PeerId, - response: StateResponse, + response: OpaqueStateResponse, ) -> CustomMessageOutcome { - match self.sync.on_state_data(&peer_id, response) { + match self.chain_sync.on_state_data(&peer_id, response) { Ok(OnStateData::Import(origin, block)) => CustomMessageOutcome::BlockImport(origin, vec![block]), Ok(OnStateData::Continue) => CustomMessageOutcome::None, @@ -738,9 +640,9 @@ where pub fn on_warp_sync_response( &mut self, peer_id: PeerId, - response: crate::warp_request_handler::EncodedProof, + response: EncodedProof, ) -> CustomMessageOutcome { - match self.sync.on_warp_sync_data(&peer_id, response) { + match self.chain_sync.on_warp_sync_data(&peer_id, response) { Ok(()) => CustomMessageOutcome::None, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); @@ -798,7 +700,7 @@ where return Err(()) } - if self.config.roles.is_light() { + if self.roles.is_light() { // we're not interested in light peers if status.roles.is_light() { debug!(target: "sync", "Peer {} is unable to serve light requests", who); @@ -821,14 +723,15 @@ where } } - if status.roles.is_full() && self.sync.num_peers() >= self.default_peers_set_num_full { + if status.roles.is_full() && self.chain_sync.num_peers() >= self.default_peers_set_num_full + { debug!(target: "sync", "Too many full nodes, rejecting {}", who); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); return Err(()) } if status.roles.is_light() && - (self.peers.len() - self.sync.num_peers()) >= self.default_peers_set_num_light + (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light { // Make sure that not all slots are occupied by light clients. debug!(target: "sync", "Too many light nodes, rejecting {}", who); @@ -849,7 +752,7 @@ where }; let req = if peer.info.roles.is_full() { - match self.sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { + match self.chain_sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { Ok(req) => req, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); @@ -868,8 +771,12 @@ where .push_back(CustomMessageOutcome::PeerNewBest(who, status.best_number)); if let Some(req) = req { - let event = self.prepare_block_request(who, req); - self.pending_messages.push_back(event); + self.pending_messages.push_back(prepare_block_request( + self.chain_sync.as_ref(), + &mut self.peers, + who, + req, + )); } Ok(()) @@ -953,7 +860,7 @@ where }; if peer.info.roles.is_full() { - self.sync.push_block_announce_validation(who, hash, announce, is_best); + self.chain_sync.push_block_announce_validation(who, hash, announce, is_best); } } @@ -1010,7 +917,7 @@ where // to import header from announced block let's construct response to request that normally // would have been sent over network (but it is not in our case) - let blocks_to_import = self.sync.on_block_data( + let blocks_to_import = self.chain_sync.on_block_data( &who, None, BlockResponse:: { @@ -1035,7 +942,8 @@ where match blocks_to_import { Ok(OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), - Ok(OnBlockData::Request(peer, req)) => self.prepare_block_request(peer, req), + Ok(OnBlockData::Request(peer, req)) => + prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req), Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -1047,7 +955,7 @@ where /// Call this when a block has been finalized. The sync layer may have some additional /// requesting to perform. pub fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) { - self.sync.on_block_finalized(&hash, *header.number()) + self.chain_sync.on_block_finalized(&hash, *header.number()) } /// Request a justification for the given block. @@ -1055,12 +963,12 @@ where /// Uses `protocol` to queue a new justification request and tries to dispatch all pending /// requests. pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.sync.request_justification(hash, number) + self.chain_sync.request_justification(hash, number) } /// Clear all pending justification requests. pub fn clear_justification_requests(&mut self) { - self.sync.clear_justification_requests(); + self.chain_sync.clear_justification_requests(); } /// Request syncing for the given block from given set of peers. @@ -1072,7 +980,7 @@ where hash: &B::Hash, number: NumberFor, ) { - self.sync.set_sync_fork_request(peers, hash, number) + self.chain_sync.set_sync_fork_request(peers, hash, number) } /// A batch of blocks have been processed, with or without errors. @@ -1084,11 +992,12 @@ where count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { - let results = self.sync.on_blocks_processed(imported, count, results); + let results = self.chain_sync.on_blocks_processed(imported, count, results); for result in results { match result { Ok((id, req)) => { self.pending_messages.push_back(prepare_block_request( + self.chain_sync.as_ref(), &mut self.peers, id, req, @@ -1111,7 +1020,7 @@ where number: NumberFor, success: bool, ) { - self.sync.on_justification_import(hash, number, success); + self.chain_sync.on_justification_import(hash, number, success); if !success { info!("💔 Invalid justification provided by {} for #{}", who, hash); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); @@ -1228,12 +1137,22 @@ where } } + /// Encode implementation-specific block request. + pub fn encode_block_request(&self, request: &OpaqueBlockRequest) -> Result, String> { + self.chain_sync.encode_block_request(request) + } + + /// Encode implementation-specific state request. + pub fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String> { + self.chain_sync.encode_state_request(request) + } + fn report_metrics(&self) { if let Some(metrics) = &self.metrics { let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX); metrics.peers.set(n); - let m = self.sync.metrics(); + let m = self.chain_sync.metrics(); metrics.fork_targets.set(m.fork_targets.into()); metrics.queued_blocks.set(m.queued_blocks.into()); @@ -1259,6 +1178,7 @@ where } fn prepare_block_request( + chain_sync: &dyn ChainSync, peers: &mut HashMap>, who: PeerId, request: BlockRequest, @@ -1269,19 +1189,7 @@ fn prepare_block_request( peer.request = Some((PeerRequest::Block(request.clone()), rx)); } - let request = sc_network_sync::schema::v1::BlockRequest { - fields: request.fields.to_be_u32(), - from_block: match request.from { - FromBlock::Hash(h) => - Some(sc_network_sync::schema::v1::block_request::FromBlock::Hash(h.encode())), - FromBlock::Number(n) => - Some(sc_network_sync::schema::v1::block_request::FromBlock::Number(n.encode())), - }, - to_block: request.to.map(|h| h.encode()).unwrap_or_default(), - direction: request.direction as i32, - max_blocks: request.max.unwrap_or(0), - support_multiple_justifications: true, - }; + let request = chain_sync.create_opaque_block_request(&request); CustomMessageOutcome::BlockRequest { target: who, request, pending_response: tx } } @@ -1289,7 +1197,7 @@ fn prepare_block_request( fn prepare_state_request( peers: &mut HashMap>, who: PeerId, - request: sc_network_sync::schema::v1::StateRequest, + request: OpaqueStateRequest, ) -> CustomMessageOutcome { let (tx, rx) = oneshot::channel(); @@ -1302,7 +1210,7 @@ fn prepare_state_request( fn prepare_warp_sync_request( peers: &mut HashMap>, who: PeerId, - request: crate::warp_request_handler::Request, + request: WarpProofRequest, ) -> CustomMessageOutcome { let (tx, rx) = oneshot::channel(); @@ -1346,19 +1254,19 @@ pub enum CustomMessageOutcome { /// A new block request must be emitted. BlockRequest { target: PeerId, - request: sc_network_sync::schema::v1::BlockRequest, + request: OpaqueBlockRequest, pending_response: oneshot::Sender, RequestFailure>>, }, /// A new storage request must be emitted. StateRequest { target: PeerId, - request: sc_network_sync::schema::v1::StateRequest, + request: OpaqueStateRequest, pending_response: oneshot::Sender, RequestFailure>>, }, /// A new warp sync request must be emitted. WarpSyncRequest { target: PeerId, - request: crate::warp_request_handler::Request, + request: WarpProofRequest, pending_response: oneshot::Sender, RequestFailure>>, }, /// Peer has a reported a new head of chain. @@ -1455,10 +1363,8 @@ where let (req, _) = peer.request.take().unwrap(); match req { PeerRequest::Block(req) => { - let protobuf_response = - match sc_network_sync::schema::v1::BlockResponse::decode( - &resp[..], - ) { + let response = + match self.chain_sync.decode_block_response(&resp[..]) { Ok(proto) => proto, Err(e) => { debug!( @@ -1474,13 +1380,11 @@ where }, }; - finished_block_requests.push((*id, req, protobuf_response)); + finished_block_requests.push((*id, req, response)); }, PeerRequest::State => { - let protobuf_response = - match sc_network_sync::schema::v1::StateResponse::decode( - &resp[..], - ) { + let response = + match self.chain_sync.decode_state_response(&resp[..]) { Ok(proto) => proto, Err(e) => { debug!( @@ -1496,7 +1400,7 @@ where }, }; - finished_state_requests.push((*id, protobuf_response)); + finished_state_requests.push((*id, response)); }, PeerRequest::WarpProof => { finished_warp_sync_requests.push((*id, resp)); @@ -1555,12 +1459,12 @@ where } } } - for (id, req, protobuf_response) in finished_block_requests { - let ev = self.on_block_response(id, req, protobuf_response); + for (id, req, response) in finished_block_requests { + let ev = self.on_block_response(id, req, response); self.pending_messages.push_back(ev); } - for (id, protobuf_response) in finished_state_requests { - let ev = self.on_state_response(id, protobuf_response); + for (id, response) in finished_state_requests { + let ev = self.on_state_response(id, response); self.pending_messages.push_back(ev); } for (id, response) in finished_warp_sync_requests { @@ -1572,25 +1476,32 @@ where self.tick(); } - for (id, request) in self.sync.block_requests() { - let event = prepare_block_request(&mut self.peers, *id, request); + for (id, request) in self + .chain_sync + .block_requests() + .map(|(peer_id, request)| (*peer_id, request)) + .collect::>() + { + let event = + prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, id, request); self.pending_messages.push_back(event); } - if let Some((id, request)) = self.sync.state_request() { + if let Some((id, request)) = self.chain_sync.state_request() { let event = prepare_state_request(&mut self.peers, id, request); self.pending_messages.push_back(event); } - for (id, request) in self.sync.justification_requests() { - let event = prepare_block_request(&mut self.peers, id, request); + for (id, request) in self.chain_sync.justification_requests().collect::>() { + let event = + prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, id, request); self.pending_messages.push_back(event); } - if let Some((id, request)) = self.sync.warp_sync_request() { + if let Some((id, request)) = self.chain_sync.warp_sync_request() { let event = prepare_warp_sync_request(&mut self.peers, id, request); self.pending_messages.push_back(event); } // Check if there is any block announcement validation finished. - while let Poll::Ready(result) = self.sync.poll_block_announce_validation(cx) { + while let Poll::Ready(result) = self.chain_sync.poll_block_announce_validation(cx) { match self.process_block_announce_validation_result(result) { CustomMessageOutcome::None => {}, outcome => self.pending_messages.push_back(outcome), @@ -1771,7 +1682,8 @@ where // Make sure that the newly added block announce validation future was // polled once to be registered in the task. - if let Poll::Ready(res) = self.sync.poll_block_announce_validation(cx) { + if let Poll::Ready(res) = self.chain_sync.poll_block_announce_validation(cx) + { self.process_block_announce_validation_result(res) } else { CustomMessageOutcome::None diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index a57740ec2746b..c9512f82e23bb 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -63,10 +63,12 @@ pub mod generic { use bitflags::bitflags; use codec::{Decode, Encode, Input, Output}; use sc_client_api::StorageProof; - use sc_network_common::message::RequestId; - use sc_network_sync::message::{ - generic::{BlockRequest, BlockResponse}, - BlockAnnounce, + use sc_network_common::{ + message::RequestId, + sync::message::{ + generic::{BlockRequest, BlockResponse}, + BlockAnnounce, + }, }; use sp_runtime::ConsensusEngineId; diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 1cc717a50d039..ef7ef2f5a2deb 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -30,7 +30,7 @@ use crate::{ behaviour::{self, Behaviour, BehaviourOut}, bitswap::Bitswap, - config::{parse_str_addr, Params, TransportConfig}, + config::{self, parse_str_addr, Params, TransportConfig}, discovery::DiscoveryConfig, error::Error, network_state::{ @@ -60,7 +60,7 @@ use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; -use sc_network_sync::{Status as SyncStatus, SyncState}; +use sc_network_common::sync::{SyncMode, SyncState, SyncStatus}; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; @@ -207,13 +207,23 @@ where None => (None, None), }; - let (protocol, peerset_handle, mut known_addresses) = Protocol::new( - protocol::ProtocolConfig { - roles: From::from(¶ms.role), - max_parallel_downloads: params.network_config.max_parallel_downloads, - sync_mode: params.network_config.sync_mode.clone(), + let chain_sync = (params.create_chain_sync)( + if params.role.is_light() { + SyncMode::Light + } else { + match params.network_config.sync_mode { + config::SyncMode::Full => SyncMode::Full, + config::SyncMode::Fast { skip_proofs, storage_chain_mode } => + SyncMode::LightState { skip_proofs, storage_chain_mode }, + config::SyncMode::Warp => SyncMode::Warp, + } }, params.chain.clone(), + warp_sync_provider, + )?; + let (protocol, peerset_handle, mut known_addresses) = Protocol::new( + From::from(¶ms.role), + params.chain.clone(), params.protocol_id.clone(), ¶ms.network_config, iter::once(Vec::new()) @@ -222,9 +232,8 @@ where .map(|_| default_notif_handshake_message.clone()), ) .collect(), - params.block_announce_validator, params.metrics_registry.as_ref(), - warp_sync_provider, + chain_sync, )?; // List of multiaddresses that we know in the network. diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 808546d67fc7c..181d58130aa6b 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -16,15 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{ - config, state_request_handler::StateRequestHandler, Event, NetworkService, NetworkWorker, -}; +use crate::{config, Event, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; use sc_network_common::config::ProtocolId; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; -use sc_network_sync::block_request_handler::BlockRequestHandler; +use sc_network_sync::{ + block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + ChainSync, +}; +use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use sp_runtime::traits::{Block as BlockT, Header as _}; use std::{borrow::Cow, sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; @@ -109,6 +111,7 @@ fn build_test_full_node( protocol_config }; + let max_parallel_downloads = config.max_parallel_downloads; let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, @@ -120,8 +123,17 @@ fn build_test_full_node( transaction_pool: Arc::new(crate::config::EmptyTransactionPool), protocol_id, import_queue, - block_announce_validator: Box::new( - sp_consensus::block_validation::DefaultBlockAnnounceValidator, + create_chain_sync: Box::new( + move |sync_mode, chain, warp_sync_provider| match ChainSync::new( + sync_mode, + chain, + Box::new(DefaultBlockAnnounceValidator), + max_parallel_downloads, + warp_sync_provider, + ) { + Ok(chain_sync) => Ok(Box::new(chain_sync)), + Err(error) => Err(Box::new(error).into()), + }, ), metrics_registry: None, block_request_protocol_config, diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index a61f780fd56ad..3e3526146400a 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -17,11 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] -bitflags = "1.3.2" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } -either = "1.5.3" futures = "0.3.21" libp2p = "0.46.1" log = "0.4.17" diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 4cd69d1fac4f5..2a847a8bf36ec 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -17,10 +17,7 @@ //! Helper for handling (i.e. answering) block requests from a remote peer via the //! `crate::request_responses::RequestResponsesBehaviour`. -use crate::{ - message::BlockAttributes, - schema::v1::{block_request::FromBlock, BlockResponse, Direction}, -}; +use crate::schema::v1::{block_request::FromBlock, BlockResponse, Direction}; use codec::{Decode, Encode}; use futures::{ channel::{mpsc, oneshot}, @@ -34,6 +31,7 @@ use sc_client_api::BlockBackend; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, + sync::message::BlockAttributes, }; use sp_blockchain::HeaderBackend; use sp_runtime::{ diff --git a/client/network/sync/src/blocks.rs b/client/network/sync/src/blocks.rs index 26753f120a170..5fb1484675071 100644 --- a/client/network/sync/src/blocks.rs +++ b/client/network/sync/src/blocks.rs @@ -16,9 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::message; use libp2p::PeerId; use log::trace; +use sc_network_common::sync::message; use sp_runtime::traits::{Block as BlockT, NumberFor, One}; use std::{ cmp, @@ -245,8 +245,8 @@ impl BlockCollection { #[cfg(test)] mod test { use super::{BlockCollection, BlockData, BlockRangeState}; - use crate::message; use libp2p::PeerId; + use sc_network_common::sync::message; use sp_core::H256; use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; diff --git a/client/network/sync/src/extra_requests.rs b/client/network/sync/src/extra_requests.rs index c684d8e72783e..6206f8a61bcf4 100644 --- a/client/network/sync/src/extra_requests.rs +++ b/client/network/sync/src/extra_requests.rs @@ -20,6 +20,7 @@ use crate::{PeerSync, PeerSyncState}; use fork_tree::ForkTree; use libp2p::PeerId; use log::{debug, trace, warn}; +use sc_network_common::sync::metrics::Metrics; use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::{ @@ -56,15 +57,6 @@ pub(crate) struct ExtraRequests { request_type_name: &'static str, } -#[derive(Debug)] -pub struct Metrics { - pub pending_requests: u32, - pub active_requests: u32, - pub importing_requests: u32, - pub failed_requests: u32, - _priv: (), -} - impl ExtraRequests { pub(crate) fn new(request_type_name: &'static str) -> Self { Self { @@ -258,7 +250,6 @@ impl ExtraRequests { active_requests: self.active_requests.len().try_into().unwrap_or(std::u32::MAX), failed_requests: self.failed_requests.len().try_into().unwrap_or(std::u32::MAX), importing_requests: self.importing_requests.len().try_into().unwrap_or(std::u32::MAX), - _priv: (), } } } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 1ce69b6dc816f..aff773bd12ed6 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -30,8 +30,7 @@ pub mod block_request_handler; pub mod blocks; -pub mod message; -pub mod schema; +mod schema; pub mod state; pub mod state_request_handler; pub mod warp; @@ -39,22 +38,28 @@ pub mod warp_request_handler; use crate::{ blocks::BlockCollection, - message::{BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse}, schema::v1::{StateRequest, StateResponse}, - state::{StateDownloadProgress, StateSync}, - warp::{ - EncodedProof, WarpProofImportResult, WarpProofRequest, WarpSync, WarpSyncPhase, - WarpSyncProgress, WarpSyncProvider, - }, + state::StateSync, + warp::{WarpProofImportResult, WarpSync}, }; -use codec::Encode; -use either::Either; +use codec::{Decode, DecodeAll, Encode}; use extra_requests::ExtraRequests; use futures::{stream::FuturesUnordered, task::Poll, Future, FutureExt, StreamExt}; use libp2p::PeerId; use log::{debug, error, info, trace, warn}; +use prost::Message; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; +use sc_network_common::sync::{ + message::{ + BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, + FromBlock, + }, + warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, WarpSyncProvider}, + BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, + OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, + PollBlockAnnounceValidation, SyncMode, SyncState, SyncStatus, +}; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_consensus::{ @@ -71,7 +76,6 @@ use sp_runtime::{ }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, - fmt, ops::Range, pin::Pin, sync::Arc, @@ -283,15 +287,6 @@ impl PeerSync { } } -/// The sync status of a peer we are trying to sync with -#[derive(Debug)] -pub struct PeerInfo { - /// Their best block hash. - pub best_hash: B::Hash, - /// Their best block number. - pub best_number: NumberFor, -} - struct ForkTarget { number: NumberFor, parent_hash: Option, @@ -330,108 +325,6 @@ impl PeerSyncState { } } -/// Reported sync state. -#[derive(Clone, Eq, PartialEq, Debug)] -pub enum SyncState { - /// Initial sync is complete, keep-up sync is active. - Idle, - /// Actively catching up with the chain. - Downloading, -} - -/// Syncing status and statistics. -#[derive(Clone)] -pub struct Status { - /// Current global sync state. - pub state: SyncState, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_peers: u32, - /// Number of blocks queued for import - pub queued_blocks: u32, - /// State sync status in progress, if any. - pub state_sync: Option, - /// Warp sync in progress, if any. - pub warp_sync: Option>, -} - -/// A peer did not behave as expected and should be reported. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct BadPeer(pub PeerId, pub sc_peerset::ReputationChange); - -impl fmt::Display for BadPeer { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) - } -} - -impl std::error::Error for BadPeer {} - -/// Result of [`ChainSync::on_block_data`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockData { - /// The block should be imported. - Import(BlockOrigin, Vec>), - /// A new block request needs to be made to the given peer. - Request(PeerId, BlockRequest), -} - -impl OnBlockData { - /// Returns `self` as request. - #[cfg(test)] - fn into_request(self) -> Option<(PeerId, BlockRequest)> { - if let Self::Request(peer, req) = self { - Some((peer, req)) - } else { - None - } - } -} - -/// Result of [`ChainSync::on_state_data`]. -#[derive(Debug)] -pub enum OnStateData { - /// The block and state that should be imported. - Import(BlockOrigin, IncomingBlock), - /// A new state request needs to be made to the given peer. - Continue, -} - -/// Result of [`ChainSync::poll_block_announce_validation`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum PollBlockAnnounceValidation { - /// The announcement failed at validation. - /// - /// The peer reputation should be decreased. - Failure { - /// Who sent the processed block announcement? - who: PeerId, - /// Should the peer be disconnected? - disconnect: bool, - }, - /// The announcement does not require further handling. - Nothing { - /// Who sent the processed block announcement? - who: PeerId, - /// Was this their new best block? - is_best: bool, - /// The announcement. - announce: BlockAnnounce, - }, - /// The announcement header should be imported. - ImportHeader { - /// Who sent the processed block announcement? - who: PeerId, - /// Was this their new best block? - is_best: bool, - /// The announcement. - announce: BlockAnnounce, - }, - /// The block announcement should be skipped. - Skip, -} - /// Result of [`ChainSync::block_announce_validation`]. #[derive(Debug, Clone, PartialEq, Eq)] enum PreValidateBlockAnnounce { @@ -467,28 +360,6 @@ enum PreValidateBlockAnnounce { Skip, } -/// Result of [`ChainSync::on_block_justification`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockJustification { - /// The justification needs no further handling. - Nothing, - /// The justification should be imported. - Import { peer: PeerId, hash: B::Hash, number: NumberFor, justifications: Justifications }, -} - -/// Operation mode. -#[derive(Debug, PartialEq, Eq)] -pub enum SyncMode { - // Sync headers only - Light, - // Sync headers and block bodies - Full, - // Sync headers and the last finalied state - LightState { storage_chain_mode: bool, skip_proofs: bool }, - // Warp sync mode. - Warp, -} - /// Result of [`ChainSync::has_slot_for_block_announce_validation`]. enum HasSlotForBlockAnnounceValidation { /// Yes, there is a slot for the block announce validation. @@ -499,7 +370,7 @@ enum HasSlotForBlockAnnounceValidation { MaximumPeerSlotsReached, } -impl ChainSync +impl ChainSyncT for ChainSync where B: BlockT, Client: HeaderBackend @@ -510,88 +381,14 @@ where + Sync + 'static, { - /// Create a new instance. - pub fn new( - mode: SyncMode, - client: Arc, - block_announce_validator: Box + Send>, - max_parallel_downloads: u32, - warp_sync_provider: Option>>, - ) -> Result { - let mut sync = Self { - client, - peers: HashMap::new(), - blocks: BlockCollection::new(), - best_queued_hash: Default::default(), - best_queued_number: Zero::zero(), - extra_justifications: ExtraRequests::new("justification"), - mode, - queue_blocks: Default::default(), - fork_targets: Default::default(), - allowed_requests: Default::default(), - block_announce_validator, - max_parallel_downloads, - downloaded_blocks: 0, - block_announce_validation: Default::default(), - block_announce_validation_per_peer_stats: Default::default(), - state_sync: None, - warp_sync: None, - warp_sync_provider, - import_existing: false, - gap_sync: None, - }; - sync.reset_sync_start_point()?; - Ok(sync) - } - - fn required_block_attributes(&self) -> BlockAttributes { - match self.mode { - SyncMode::Full => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, - SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, - SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, - SyncMode::LightState { storage_chain_mode: true, .. } => - BlockAttributes::HEADER | - BlockAttributes::JUSTIFICATION | - BlockAttributes::INDEXED_BODY, - } - } - - fn skip_execution(&self) -> bool { - match self.mode { - SyncMode::Full => false, - SyncMode::Light => true, - SyncMode::LightState { .. } => true, - SyncMode::Warp => true, - } - } - - /// Returns the state of the sync of the given peer. - /// - /// Returns `None` if the peer is unknown. - pub fn peer_info(&self, who: &PeerId) -> Option> { + fn peer_info(&self, who: &PeerId) -> Option> { self.peers .get(who) .map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) } - /// Returns the best seen block. - fn best_seen(&self) -> Option> { - let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); - - if best_seens.is_empty() { - None - } else { - let middle = best_seens.len() / 2; - - // Not the "perfect median" when we have an even number of peers. - Some(*best_seens.select_nth_unstable(middle).1) - } - } - /// Returns the current sync status. - pub fn status(&self) -> Status { + fn status(&self) -> SyncStatus { let best_seen = self.best_seen(); let sync_state = if let Some(n) = best_seen { // A chain is classified as downloading if the provided best block is @@ -617,7 +414,7 @@ where _ => None, }; - Status { + SyncStatus { state: sync_state, best_seen_block: best_seen, num_peers: self.peers.len() as u32, @@ -627,29 +424,22 @@ where } } - /// Number of active forks requests. This includes - /// requests that are pending or could be issued right away. - pub fn num_sync_requests(&self) -> usize { + fn num_sync_requests(&self) -> usize { self.fork_targets .values() .filter(|f| f.number <= self.best_queued_number) .count() } - /// Number of downloaded blocks. - pub fn num_downloaded_blocks(&self) -> usize { + fn num_downloaded_blocks(&self) -> usize { self.downloaded_blocks } - /// Returns the current number of peers stored within this state machine. - pub fn num_peers(&self) -> usize { + fn num_peers(&self) -> usize { self.peers.len() } - /// Handle a new connected peer. - /// - /// Call this method whenever we connect to a new peer. - pub fn new_peer( + fn new_peer( &mut self, who: PeerId, best_hash: B::Hash, @@ -773,27 +563,22 @@ where } } - /// Signal that a new best block has been imported. - /// `ChainSync` state with that information. - pub fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { + fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { self.on_block_queued(best_hash, best_number); } - /// Schedule a justification request for the given block. - pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { let client = &self.client; self.extra_justifications .schedule((*hash, number), |base, block| is_descendent_of(&**client, base, block)) } - /// Clear all pending justification requests. - pub fn clear_justification_requests(&mut self) { + fn clear_justification_requests(&mut self) { self.extra_justifications.reset(); } - /// Request syncing for the given block from given set of peers. // The implementation is similar to on_block_announce with unknown parent hash. - pub fn set_sync_fork_request( + fn set_sync_fork_request( &mut self, mut peers: Vec, hash: &B::Hash, @@ -845,13 +630,12 @@ where .extend(peers); } - /// Get an iterator over all scheduled justification requests. - pub fn justification_requests( + fn justification_requests( &mut self, - ) -> impl Iterator)> + '_ { + ) -> Box)> + '_> { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); - std::iter::from_fn(move || { + Box::new(std::iter::from_fn(move || { if let Some((peer, request)) = matcher.next(peers) { peers .get_mut(&peer) @@ -859,33 +643,32 @@ where "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", ) .state = PeerSyncState::DownloadingJustification(request.0); - let req = message::generic::BlockRequest { + let req = BlockRequest:: { id: 0, fields: BlockAttributes::JUSTIFICATION, - from: message::FromBlock::Hash(request.0), + from: FromBlock::Hash(request.0), to: None, - direction: message::Direction::Ascending, + direction: Direction::Ascending, max: Some(1), }; Some((peer, req)) } else { None } - }) + })) } - /// Get an iterator over all block requests of all peers. - pub fn block_requests(&mut self) -> impl Iterator)> + '_ { + fn block_requests(&mut self) -> Box)> + '_> { if self.allowed_requests.is_empty() || self.state_sync.is_some() || self.mode == SyncMode::Warp { - return Either::Left(std::iter::empty()) + return Box::new(std::iter::empty()) } if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { trace!(target: "sync", "Too many blocks in the queue."); - return Either::Left(std::iter::empty()) + return Box::new(std::iter::empty()) } let major_sync = self.status().state == SyncState::Downloading; let attrs = self.required_block_attributes(); @@ -982,11 +765,10 @@ where None } }); - Either::Right(iter) + Box::new(iter) } - /// Get a state request, if any. - pub fn state_request(&mut self) -> Option<(PeerId, StateRequest)> { + fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { if self.allowed_requests.is_empty() { return None } @@ -1007,7 +789,7 @@ where let request = sync.next_request(); trace!(target: "sync", "New StateRequest for {}: {:?}", id, request); self.allowed_requests.clear(); - return Some((*id, request)) + return Some((*id, OpaqueStateRequest(Box::new(request)))) } } } @@ -1023,7 +805,7 @@ where trace!(target: "sync", "New StateRequest for {}: {:?}", id, request); peer.state = PeerSyncState::DownloadingState; self.allowed_requests.clear(); - return Some((*id, request)) + return Some((*id, OpaqueStateRequest(Box::new(request)))) } } } @@ -1031,8 +813,7 @@ where None } - /// Get a warp sync request, if any. - pub fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { + fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { if let Some(sync) = &self.warp_sync { if self.allowed_requests.is_empty() || sync.is_complete() || @@ -1063,14 +844,7 @@ where None } - /// Handle a response from the remote to a block request that we made. - /// - /// `request` must be the original request that triggered `response`. - /// or `None` if data comes from the block announcement. - /// - /// If this corresponds to a valid block, this outputs the block that - /// must be imported in the import queue. - pub fn on_block_data( + fn on_block_data( &mut self, who: &PeerId, request: Option>, @@ -1080,10 +854,7 @@ where let mut gap = false; let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(who) { let mut blocks = response.blocks; - if request - .as_ref() - .map_or(false, |r| r.direction == message::Direction::Descending) - { + if request.as_ref().map_or(false, |r| r.direction == Direction::Descending) { trace!(target: "sync", "Reversing incoming block list"); blocks.reverse() } @@ -1297,14 +1068,20 @@ where Ok(self.validate_and_queue_blocks(new_blocks, gap)) } - /// Handle a response from the remote to a state request that we made. - /// - /// Returns next request if any. - pub fn on_state_data( + fn on_state_data( &mut self, who: &PeerId, - response: StateResponse, + response: OpaqueStateResponse, ) -> Result, BadPeer> { + let response: Box = response.0.downcast().map_err(|_error| { + error!( + target: "sync", + "Failed to downcast opaque state response, this is an implementation bug." + ); + + BadPeer(*who, rep::BAD_RESPONSE) + })?; + if let Some(peer) = self.peers.get_mut(who) { if let PeerSyncState::DownloadingState = peer.state { peer.state = PeerSyncState::Available; @@ -1319,7 +1096,7 @@ where response.entries.len(), response.proof.len(), ); - sync.import(response) + sync.import(*response) } else if let Some(sync) = &mut self.warp_sync { debug!( target: "sync", @@ -1328,7 +1105,7 @@ where response.entries.len(), response.proof.len(), ); - sync.import_state(response) + sync.import_state(*response) } else { debug!(target: "sync", "Ignored obsolete state response from {}", who); return Err(BadPeer(*who, rep::NOT_REQUESTED)) @@ -1360,14 +1137,7 @@ where } } - /// Handle a response from the remote to a warp proof request that we made. - /// - /// Returns next request. - pub fn on_warp_sync_data( - &mut self, - who: &PeerId, - response: EncodedProof, - ) -> Result<(), BadPeer> { + fn on_warp_sync_data(&mut self, who: &PeerId, response: EncodedProof) -> Result<(), BadPeer> { if let Some(peer) = self.peers.get_mut(who) { if let PeerSyncState::DownloadingWarpProof = peer.state { peer.state = PeerSyncState::Available; @@ -1396,57 +1166,7 @@ where } } - fn validate_and_queue_blocks( - &mut self, - mut new_blocks: Vec>, - gap: bool, - ) -> OnBlockData { - let orig_len = new_blocks.len(); - new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); - if new_blocks.len() != orig_len { - debug!( - target: "sync", - "Ignoring {} blocks that are already queued", - orig_len - new_blocks.len(), - ); - } - - let origin = if !gap && self.status().state != SyncState::Downloading { - BlockOrigin::NetworkBroadcast - } else { - BlockOrigin::NetworkInitialSync - }; - - if let Some((h, n)) = new_blocks - .last() - .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) - { - trace!( - target:"sync", - "Accepted {} blocks ({:?}) with origin {:?}", - new_blocks.len(), - h, - origin, - ); - self.on_block_queued(h, n) - } - self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); - OnBlockData::Import(origin, new_blocks) - } - - fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { - if let Some(peer) = self.peers.get_mut(peer_id) { - peer.update_common_number(new_common); - } - } - - /// Handle a response from the remote to a justification request that we made. - /// - /// `request` must be the original request that triggered `response`. - /// - /// Returns `Some` if this produces a justification that must be imported - /// into the import queue. - pub fn on_block_justification( + fn on_block_justification( &mut self, who: PeerId, response: BlockResponse, @@ -1501,18 +1221,12 @@ where Ok(OnBlockJustification::Nothing) } - /// A batch of blocks have been processed, with or without errors. - /// - /// Call this when a batch of blocks have been processed by the import - /// queue, with or without errors. - /// - /// `peer_info` is passed in case of a restart. - pub fn on_blocks_processed( + fn on_blocks_processed( &mut self, imported: usize, count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) -> impl Iterator), BadPeer>> { + ) -> Box), BadPeer>>> { trace!(target: "sync", "Imported {} of {}", imported, count); let mut output = Vec::new(); @@ -1654,54 +1368,435 @@ where } self.allowed_requests.set_all(); - output.into_iter() + Box::new(output.into_iter()) + } + + fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; + self.extra_justifications + .try_finalize_root((hash, number), finalization_result, true); + self.allowed_requests.set_all(); + } + + fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { + is_descendent_of(&**client, base, block) + }); + + if let SyncMode::LightState { skip_proofs, .. } = &self.mode { + if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { + // Finalized a recent block. + let mut heads: Vec<_> = + self.peers.iter().map(|(_, peer)| peer.best_number).collect(); + heads.sort(); + let median = heads[heads.len() / 2]; + if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { + if let Ok(Some(header)) = self.client.header(BlockId::hash(*hash)) { + log::debug!( + target: "sync", + "Starting state sync for #{} ({})", + number, + hash, + ); + self.state_sync = + Some(StateSync::new(self.client.clone(), header, *skip_proofs)); + self.allowed_requests.set_all(); + } + } + } + } + + if let Err(err) = r { + warn!( + target: "sync", + "💔 Error cleaning up pending extra justification data requests: {}", + err, + ); + } + } + + fn push_block_announce_validation( + &mut self, + who: PeerId, + hash: B::Hash, + announce: BlockAnnounce, + is_best: bool, + ) { + let header = &announce.header; + let number = *header.number(); + debug!( + target: "sync", + "Pre-validating received block announcement {:?} with number {:?} from {}", + hash, + number, + who, + ); + + if number.is_zero() { + self.block_announce_validation.push( + async move { + warn!( + target: "sync", + "💔 Ignored genesis block (#0) announcement from {}: {}", + who, + hash, + ); + PreValidateBlockAnnounce::Skip + } + .boxed(), + ); + return + } + + // Check if there is a slot for this block announce validation. + match self.has_slot_for_block_announce_validation(&who) { + HasSlotForBlockAnnounceValidation::Yes => {}, + HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached => { + self.block_announce_validation.push( + async move { + warn!( + target: "sync", + "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots are occupied.", + number, + hash, + who, + ); + PreValidateBlockAnnounce::Skip + } + .boxed(), + ); + return + }, + HasSlotForBlockAnnounceValidation::MaximumPeerSlotsReached => { + self.block_announce_validation.push(async move { + warn!( + target: "sync", + "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots for this peer are occupied.", + number, + hash, + who, + ); + PreValidateBlockAnnounce::Skip + }.boxed()); + return + }, + } + + // Let external validator check the block announcement. + let assoc_data = announce.data.as_ref().map_or(&[][..], |v| v.as_slice()); + let future = self.block_announce_validator.validate(header, assoc_data); + + self.block_announce_validation.push( + async move { + match future.await { + Ok(Validation::Success { is_new_best }) => PreValidateBlockAnnounce::Process { + is_new_best: is_new_best || is_best, + announce, + who, + }, + Ok(Validation::Failure { disconnect }) => { + debug!( + target: "sync", + "Block announcement validation of block {:?} from {} failed", + hash, + who, + ); + PreValidateBlockAnnounce::Failure { who, disconnect } + }, + Err(e) => { + debug!( + target: "sync", + "💔 Block announcement validation of block {:?} errored: {}", + hash, + e, + ); + PreValidateBlockAnnounce::Error { who } + }, + } + } + .boxed(), + ); + } + + fn poll_block_announce_validation( + &mut self, + cx: &mut std::task::Context, + ) -> Poll> { + match self.block_announce_validation.poll_next_unpin(cx) { + Poll::Ready(Some(res)) => { + self.peer_block_announce_validation_finished(&res); + Poll::Ready(self.finish_block_announce_validation(res)) + }, + _ => Poll::Pending, + } + } + + fn peer_disconnected(&mut self, who: &PeerId) -> Option> { + self.blocks.clear_peer_download(who); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_peer_download(who) + } + self.peers.remove(who); + self.extra_justifications.peer_disconnected(who); + self.allowed_requests.set_all(); + self.fork_targets.retain(|_, target| { + target.peers.remove(who); + !target.peers.is_empty() + }); + let blocks = self.ready_blocks(); + (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) + } + + fn metrics(&self) -> Metrics { + Metrics { + queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), + fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), + justifications: self.extra_justifications.metrics(), + } + } + + /// Create implementation-specific block request. + fn create_opaque_block_request(&self, request: &BlockRequest) -> OpaqueBlockRequest { + OpaqueBlockRequest(Box::new(schema::v1::BlockRequest { + fields: request.fields.to_be_u32(), + from_block: match request.from { + FromBlock::Hash(h) => Some(schema::v1::block_request::FromBlock::Hash(h.encode())), + FromBlock::Number(n) => + Some(schema::v1::block_request::FromBlock::Number(n.encode())), + }, + to_block: request.to.map(|h| h.encode()).unwrap_or_default(), + direction: request.direction as i32, + max_blocks: request.max.unwrap_or(0), + support_multiple_justifications: true, + })) + } + + fn encode_block_request(&self, request: &OpaqueBlockRequest) -> Result, String> { + let request: &schema::v1::BlockRequest = request.0.downcast_ref().ok_or_else(|| { + "Failed to downcast opaque block response during encoding, this is an \ + implementation bug." + .to_string() + })?; + + Ok(request.encode_to_vec()) + } + + fn decode_block_response(&self, response: &[u8]) -> Result { + let response = schema::v1::BlockResponse::decode(response) + .map_err(|error| format!("Failed to decode block response: {error}"))?; + + Ok(OpaqueBlockResponse(Box::new(response))) + } + + fn block_response_into_blocks( + &self, + request: &BlockRequest, + response: OpaqueBlockResponse, + ) -> Result>, String> { + let response: Box = response.0.downcast().map_err(|_error| { + "Failed to downcast opaque block response during encoding, this is an \ + implementation bug." + .to_string() + })?; + + response + .blocks + .into_iter() + .map(|block_data| { + Ok(BlockData:: { + hash: Decode::decode(&mut block_data.hash.as_ref())?, + header: if !block_data.header.is_empty() { + Some(Decode::decode(&mut block_data.header.as_ref())?) + } else { + None + }, + body: if request.fields.contains(BlockAttributes::BODY) { + Some( + block_data + .body + .iter() + .map(|body| Decode::decode(&mut body.as_ref())) + .collect::, _>>()?, + ) + } else { + None + }, + indexed_body: if request.fields.contains(BlockAttributes::INDEXED_BODY) { + Some(block_data.indexed_body) + } else { + None + }, + receipt: if !block_data.receipt.is_empty() { + Some(block_data.receipt) + } else { + None + }, + message_queue: if !block_data.message_queue.is_empty() { + Some(block_data.message_queue) + } else { + None + }, + justification: if !block_data.justification.is_empty() { + Some(block_data.justification) + } else if block_data.is_empty_justification { + Some(Vec::new()) + } else { + None + }, + justifications: if !block_data.justifications.is_empty() { + Some(DecodeAll::decode_all(&mut block_data.justifications.as_ref())?) + } else { + None + }, + }) + }) + .collect::>() + .map_err(|error: codec::Error| error.to_string()) + } + + fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String> { + let request: &StateRequest = request.0.downcast_ref().ok_or_else(|| { + "Failed to downcast opaque state response during encoding, this is an \ + implementation bug." + .to_string() + })?; + + Ok(request.encode_to_vec()) + } + + fn decode_state_response(&self, response: &[u8]) -> Result { + let response = StateResponse::decode(response) + .map_err(|error| format!("Failed to decode state response: {error}"))?; + + Ok(OpaqueStateResponse(Box::new(response))) + } +} + +impl ChainSync +where + Self: ChainSyncT, + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + /// Create a new instance. + pub fn new( + mode: SyncMode, + client: Arc, + block_announce_validator: Box + Send>, + max_parallel_downloads: u32, + warp_sync_provider: Option>>, + ) -> Result { + let mut sync = Self { + client, + peers: HashMap::new(), + blocks: BlockCollection::new(), + best_queued_hash: Default::default(), + best_queued_number: Zero::zero(), + extra_justifications: ExtraRequests::new("justification"), + mode, + queue_blocks: Default::default(), + fork_targets: Default::default(), + allowed_requests: Default::default(), + block_announce_validator, + max_parallel_downloads, + downloaded_blocks: 0, + block_announce_validation: Default::default(), + block_announce_validation_per_peer_stats: Default::default(), + state_sync: None, + warp_sync: None, + warp_sync_provider, + import_existing: false, + gap_sync: None, + }; + sync.reset_sync_start_point()?; + Ok(sync) } - /// Call this when a justification has been processed by the import queue, - /// with or without errors. - pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; - self.extra_justifications - .try_finalize_root((hash, number), finalization_result, true); - self.allowed_requests.set_all(); + /// Returns the best seen block. + fn best_seen(&self) -> Option> { + let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); + + if best_seens.is_empty() { + None + } else { + let middle = best_seens.len() / 2; + + // Not the "perfect median" when we have an even number of peers. + Some(*best_seens.select_nth_unstable(middle).1) + } } - /// Notify about finalization of the given block. - pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { - is_descendent_of(&**client, base, block) - }); + fn required_block_attributes(&self) -> BlockAttributes { + match self.mode { + SyncMode::Full => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, + SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::LightState { storage_chain_mode: true, .. } => + BlockAttributes::HEADER | + BlockAttributes::JUSTIFICATION | + BlockAttributes::INDEXED_BODY, + } + } - if let SyncMode::LightState { skip_proofs, .. } = &self.mode { - if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { - // Finalized a recent block. - let mut heads: Vec<_> = - self.peers.iter().map(|(_, peer)| peer.best_number).collect(); - heads.sort(); - let median = heads[heads.len() / 2]; - if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { - if let Ok(Some(header)) = self.client.header(BlockId::hash(*hash)) { - log::debug!( - target: "sync", - "Starting state sync for #{} ({})", - number, - hash, - ); - self.state_sync = - Some(StateSync::new(self.client.clone(), header, *skip_proofs)); - self.allowed_requests.set_all(); - } - } - } + fn skip_execution(&self) -> bool { + match self.mode { + SyncMode::Full => false, + SyncMode::Light => true, + SyncMode::LightState { .. } => true, + SyncMode::Warp => true, } + } - if let Err(err) = r { - warn!( + fn validate_and_queue_blocks( + &mut self, + mut new_blocks: Vec>, + gap: bool, + ) -> OnBlockData { + let orig_len = new_blocks.len(); + new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); + if new_blocks.len() != orig_len { + debug!( target: "sync", - "💔 Error cleaning up pending extra justification data requests: {}", - err, + "Ignoring {} blocks that are already queued", + orig_len - new_blocks.len(), + ); + } + + let origin = if !gap && self.status().state != SyncState::Downloading { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; + + if let Some((h, n)) = new_blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) + { + trace!( + target:"sync", + "Accepted {} blocks ({:?}) with origin {:?}", + new_blocks.len(), + h, + origin, ); + self.on_block_queued(h, n) + } + self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); + OnBlockData::Import(origin, new_blocks) + } + + fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.update_common_number(new_common); } } @@ -1779,135 +1874,6 @@ where } } - /// Push a block announce validation. - /// - /// It is required that [`ChainSync::poll_block_announce_validation`] is called - /// to check for finished block announce validations. - pub fn push_block_announce_validation( - &mut self, - who: PeerId, - hash: B::Hash, - announce: BlockAnnounce, - is_best: bool, - ) { - let header = &announce.header; - let number = *header.number(); - debug!( - target: "sync", - "Pre-validating received block announcement {:?} with number {:?} from {}", - hash, - number, - who, - ); - - if number.is_zero() { - self.block_announce_validation.push( - async move { - warn!( - target: "sync", - "💔 Ignored genesis block (#0) announcement from {}: {}", - who, - hash, - ); - PreValidateBlockAnnounce::Skip - } - .boxed(), - ); - return - } - - // Check if there is a slot for this block announce validation. - match self.has_slot_for_block_announce_validation(&who) { - HasSlotForBlockAnnounceValidation::Yes => {}, - HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached => { - self.block_announce_validation.push( - async move { - warn!( - target: "sync", - "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots are occupied.", - number, - hash, - who, - ); - PreValidateBlockAnnounce::Skip - } - .boxed(), - ); - return - }, - HasSlotForBlockAnnounceValidation::MaximumPeerSlotsReached => { - self.block_announce_validation.push(async move { - warn!( - target: "sync", - "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots for this peer are occupied.", - number, - hash, - who, - ); - PreValidateBlockAnnounce::Skip - }.boxed()); - return - }, - } - - // Let external validator check the block announcement. - let assoc_data = announce.data.as_ref().map_or(&[][..], |v| v.as_slice()); - let future = self.block_announce_validator.validate(header, assoc_data); - - self.block_announce_validation.push( - async move { - match future.await { - Ok(Validation::Success { is_new_best }) => PreValidateBlockAnnounce::Process { - is_new_best: is_new_best || is_best, - announce, - who, - }, - Ok(Validation::Failure { disconnect }) => { - debug!( - target: "sync", - "Block announcement validation of block {:?} from {} failed", - hash, - who, - ); - PreValidateBlockAnnounce::Failure { who, disconnect } - }, - Err(e) => { - debug!( - target: "sync", - "💔 Block announcement validation of block {:?} errored: {}", - hash, - e, - ); - PreValidateBlockAnnounce::Error { who } - }, - } - } - .boxed(), - ); - } - - /// Poll block announce validation. - /// - /// Block announce validations can be pushed by using - /// [`ChainSync::push_block_announce_validation`]. - /// - /// This should be polled until it returns [`Poll::Pending`]. - /// - /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to - /// import passed header (call `on_block_data`). The network request isn't sent in this case. - pub fn poll_block_announce_validation( - &mut self, - cx: &mut std::task::Context, - ) -> Poll> { - match self.block_announce_validation.poll_next_unpin(cx) { - Poll::Ready(Some(res)) => { - self.peer_block_announce_validation_finished(&res); - Poll::Ready(self.finish_block_announce_validation(res)) - }, - _ => Poll::Pending, - } - } - /// Should be called when a block announce validation is finished, to update the slots /// of the peer that send the block announce. fn peer_block_announce_validation_finished( @@ -2065,29 +2031,6 @@ where PollBlockAnnounceValidation::Nothing { is_best, who, announce } } - /// Call when a peer has disconnected. - /// Canceled obsolete block request may result in some blocks being ready for - /// import, so this functions checks for such blocks and returns them. - pub fn peer_disconnected(&mut self, who: &PeerId) -> Option> { - self.blocks.clear_peer_download(who); - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_peer_download(who) - } - self.peers.remove(who); - self.extra_justifications.peer_disconnected(who); - self.allowed_requests.set_all(); - self.fork_targets.retain(|_, target| { - target.peers.remove(who); - !target.peers.is_empty() - }); - let blocks = self.ready_blocks(); - if !blocks.is_empty() { - Some(self.validate_and_queue_blocks(blocks, false)) - } else { - None - } - } - /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. @@ -2190,16 +2133,6 @@ where .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) } - /// Return some key metrics. - pub fn metrics(&self) -> Metrics { - Metrics { - queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), - fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), - justifications: self.extra_justifications.metrics(), - _priv: (), - } - } - /// Get the set of downloaded blocks that are ready to be queued for import. fn ready_blocks(&mut self) -> Vec> { self.blocks @@ -2237,23 +2170,15 @@ fn legacy_justification_mapping( justification.map(|just| (*b"FRNK", just).into()) } -#[derive(Debug)] -pub struct Metrics { - pub queued_blocks: u32, - pub fork_targets: u32, - pub justifications: extra_requests::Metrics, - _priv: (), -} - /// Request the ancestry for a block. Sends a request for header and justification for the given /// block number. Used during ancestry search. fn ancestry_request(block: NumberFor) -> BlockRequest { - message::generic::BlockRequest { + BlockRequest:: { id: 0, fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, - from: message::FromBlock::Number(block), + from: FromBlock::Number(block), to: None, - direction: message::Direction::Ascending, + direction: Direction::Ascending, max: Some(1), } } @@ -2331,7 +2256,7 @@ fn peer_block_request( id: &PeerId, peer: &PeerSync, blocks: &mut BlockCollection, - attrs: message::BlockAttributes, + attrs: BlockAttributes, max_parallel_downloads: u32, finalized: NumberFor, best_num: NumberFor, @@ -2359,17 +2284,17 @@ fn peer_block_request( let last = range.end.saturating_sub(One::one()); let from = if peer.best_number == last { - message::FromBlock::Hash(peer.best_hash) + FromBlock::Hash(peer.best_hash) } else { - message::FromBlock::Number(last) + FromBlock::Number(last) }; - let request = message::generic::BlockRequest { + let request = BlockRequest:: { id: 0, fields: attrs, from, to: None, - direction: message::Direction::Descending, + direction: Direction::Descending, max: Some((range.end - range.start).saturated_into::()), }; @@ -2381,7 +2306,7 @@ fn peer_gap_block_request( id: &PeerId, peer: &PeerSync, blocks: &mut BlockCollection, - attrs: message::BlockAttributes, + attrs: BlockAttributes, target: NumberFor, common_number: NumberFor, ) -> Option<(Range>, BlockRequest)> { @@ -2396,14 +2321,14 @@ fn peer_gap_block_request( // The end is not part of the range. let last = range.end.saturating_sub(One::one()); - let from = message::FromBlock::Number(last); + let from = FromBlock::Number(last); - let request = message::generic::BlockRequest { + let request = BlockRequest:: { id: 0, fields: attrs, from, to: None, - direction: message::Direction::Descending, + direction: Direction::Descending, max: Some((range.end - range.start).saturated_into::()), }; Some((range, request)) @@ -2415,7 +2340,7 @@ fn fork_sync_request( targets: &mut HashMap>, best_num: NumberFor, finalized: NumberFor, - attributes: message::BlockAttributes, + attributes: BlockAttributes, check_block: impl Fn(&B::Hash) -> BlockStatus, ) -> Option<(B::Hash, BlockRequest)> { targets.retain(|hash, r| { @@ -2448,12 +2373,12 @@ fn fork_sync_request( trace!(target: "sync", "Downloading requested fork {:?} from {}, {} blocks", hash, id, count); return Some(( *hash, - message::generic::BlockRequest { + BlockRequest:: { id: 0, fields: attributes, - from: message::FromBlock::Hash(*hash), + from: FromBlock::Hash(*hash), to: None, - direction: message::Direction::Descending, + direction: Direction::Descending, max: Some(count), }, )) @@ -2488,7 +2413,7 @@ where /// /// It is expected that `blocks` are in ascending order. fn validate_blocks( - blocks: &Vec>, + blocks: &Vec>, who: &PeerId, request: Option>, ) -> Result>, BadPeer> { @@ -2505,16 +2430,13 @@ fn validate_blocks( return Err(BadPeer(*who, rep::NOT_REQUESTED)) } - let block_header = if request.direction == message::Direction::Descending { - blocks.last() - } else { - blocks.first() - } - .and_then(|b| b.header.as_ref()); + let block_header = + if request.direction == Direction::Descending { blocks.last() } else { blocks.first() } + .and_then(|b| b.header.as_ref()); let expected_block = block_header.as_ref().map_or(false, |h| match request.from { - message::FromBlock::Hash(hash) => h.hash() == hash, - message::FromBlock::Number(n) => h.number() == &n, + FromBlock::Hash(hash) => h.hash() == hash, + FromBlock::Number(n) => h.number() == &n, }); if !expected_block { @@ -2528,7 +2450,7 @@ fn validate_blocks( return Err(BadPeer(*who, rep::NOT_REQUESTED)) } - if request.fields.contains(message::BlockAttributes::HEADER) && + if request.fields.contains(BlockAttributes::HEADER) && blocks.iter().any(|b| b.header.is_none()) { trace!( @@ -2540,8 +2462,7 @@ fn validate_blocks( return Err(BadPeer(*who, rep::BAD_RESPONSE)) } - if request.fields.contains(message::BlockAttributes::BODY) && - blocks.iter().any(|b| b.body.is_none()) + if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) { trace!( target: "sync", @@ -2592,13 +2513,10 @@ fn validate_blocks( #[cfg(test)] mod test { - use super::{ - message::{BlockState, FromBlock}, - *, - }; - use crate::message::BlockData; + use super::*; use futures::{executor::block_on, future::poll_fn}; use sc_block_builder::BlockBuilderProvider; + use sc_network_common::sync::message::{BlockData, BlockState, FromBlock}; use sp_blockchain::HeaderBackend; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use substrate_test_runtime_client::{ @@ -2709,7 +2627,7 @@ mod test { assert!(sync.justification_requests().any(|(p, r)| { p == peer_id3 && r.fields == BlockAttributes::JUSTIFICATION && - r.from == message::FromBlock::Hash(b1_hash) && + r.from == FromBlock::Hash(b1_hash) && r.to == None })); @@ -3125,10 +3043,11 @@ mod test { let response = create_block_response(vec![block.clone()]); let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = match on_block_data.into_request() { - Some(req) => req.1, + request = if let OnBlockData::Request(_peer, request) = on_block_data { + request + } else { // We found the ancenstor - None => break, + break }; log::trace!(target: "sync", "Request: {:?}", request); @@ -3255,10 +3174,11 @@ mod test { let response = create_block_response(vec![block.clone()]); let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = match on_block_data.into_request() { - Some(req) => req.1, + request = if let OnBlockData::Request(_peer, request) = on_block_data { + request + } else { // We found the ancenstor - None => break, + break }; log::trace!(target: "sync", "Request: {:?}", request); diff --git a/client/network/sync/src/schema.rs b/client/network/sync/src/schema.rs index aa3eb84621d8f..b31005360d023 100644 --- a/client/network/sync/src/schema.rs +++ b/client/network/sync/src/schema.rs @@ -18,6 +18,6 @@ //! Include sources generated from protobuf definitions. -pub mod v1 { +pub(crate) mod v1 { include!(concat!(env!("OUT_DIR"), "/api.v1.rs")); } diff --git a/client/network/sync/src/state.rs b/client/network/sync/src/state.rs index 4041c28af0eba..e70d3b6b33a28 100644 --- a/client/network/sync/src/state.rs +++ b/client/network/sync/src/state.rs @@ -23,6 +23,7 @@ use codec::{Decode, Encode}; use log::debug; use sc_client_api::{CompactProof, ProofProvider}; use sc_consensus::ImportedState; +use sc_network_common::sync::StateDownloadProgress; use smallvec::SmallVec; use sp_core::storage::well_known_keys; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; @@ -42,15 +43,6 @@ pub struct StateSync { skip_proof: bool, } -/// Reported state download progress. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct StateDownloadProgress { - /// Estimated download percentage. - pub percentage: u32, - /// Total state size in bytes downloaded so far. - pub size: u64, -} - /// Import state chunk result. pub enum ImportResult { /// State is complete and ready for import. diff --git a/client/network/sync/src/warp.rs b/client/network/sync/src/warp.rs index d3d9d7d244153..f3fad6c1b7fdb 100644 --- a/client/network/sync/src/warp.rs +++ b/client/network/sync/src/warp.rs @@ -18,60 +18,25 @@ //! Warp sync support. -pub use crate::warp_request_handler::{ - EncodedProof, Request as WarpProofRequest, VerificationResult, WarpSyncProvider, -}; use crate::{ schema::v1::{StateRequest, StateResponse}, state::{ImportResult, StateSync}, }; use sc_client_api::ProofProvider; +use sc_network_common::sync::warp::{ + EncodedProof, VerificationResult, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, + WarpSyncProvider, +}; use sp_blockchain::HeaderBackend; use sp_finality_grandpa::{AuthorityList, SetId}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; -use std::{fmt, sync::Arc}; +use std::sync::Arc; enum Phase { WarpProof { set_id: SetId, authorities: AuthorityList, last_hash: B::Hash }, State(StateSync), } -/// Reported warp sync phase. -#[derive(Clone, Eq, PartialEq, Debug)] -pub enum WarpSyncPhase { - /// Waiting for peers to connect. - AwaitingPeers, - /// Downloading and verifying grandpa warp proofs. - DownloadingWarpProofs, - /// Downloading state data. - DownloadingState, - /// Importing state. - ImportingState, - /// Downloading block history. - DownloadingBlocks(NumberFor), -} - -impl fmt::Display for WarpSyncPhase { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::AwaitingPeers => write!(f, "Waiting for peers"), - Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), - Self::DownloadingState => write!(f, "Downloading state"), - Self::ImportingState => write!(f, "Importing state"), - Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), - } - } -} - -/// Reported warp sync progress. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct WarpSyncProgress { - /// Estimated download percentage. - pub phase: WarpSyncPhase, - /// Total bytes downloaded so far. - pub total_bytes: u64, -} - /// Import warp proof result. pub enum WarpProofImportResult { /// Import was successful. diff --git a/client/network/sync/src/warp_request_handler.rs b/client/network/sync/src/warp_request_handler.rs index 4f66e0a6daf17..53ec216a1e668 100644 --- a/client/network/sync/src/warp_request_handler.rs +++ b/client/network/sync/src/warp_request_handler.rs @@ -1,4 +1,4 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. +// Copyright 2022 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify @@ -16,7 +16,7 @@ //! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer. -use codec::{Decode, Encode}; +use codec::Decode; use futures::{ channel::{mpsc, oneshot}, stream::StreamExt, @@ -27,52 +27,13 @@ use sc_network_common::{ request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, + sync::warp::{EncodedProof, WarpProofRequest, WarpSyncProvider}, }; use sp_runtime::traits::Block as BlockT; use std::{sync::Arc, time::Duration}; -pub use sp_finality_grandpa::{AuthorityList, SetId}; - -/// Scale-encoded warp sync proof response. -pub struct EncodedProof(pub Vec); - -/// Warp sync request -#[derive(Encode, Decode, Debug)] -pub struct Request { - /// Start collecting proofs from this block. - pub begin: B::Hash, -} - const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024; -/// Proof verification result. -pub enum VerificationResult { - /// Proof is valid, but the target was not reached. - Partial(SetId, AuthorityList, Block::Hash), - /// Target finality is proved. - Complete(SetId, AuthorityList, Block::Header), -} - -/// Warp sync backend. Handles retrieveing and verifying warp sync proofs. -pub trait WarpSyncProvider: Send + Sync { - /// Generate proof starting at given block hash. The proof is accumulated until maximum proof - /// size is reached. - fn generate( - &self, - start: B::Hash, - ) -> Result>; - /// Verify warp proof against current set of authorities. - fn verify( - &self, - proof: &EncodedProof, - set_id: SetId, - authorities: AuthorityList, - ) -> Result, Box>; - /// Get current list of authorities. This is supposed to be genesis authorities when starting - /// sync. - fn current_authorities(&self) -> AuthorityList; -} - /// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing /// incoming requests. pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig { @@ -115,7 +76,7 @@ impl RequestHandler { payload: Vec, pending_response: oneshot::Sender, ) -> Result<(), HandleRequestError> { - let request = Request::::decode(&mut &payload[..])?; + let request = WarpProofRequest::::decode(&mut &payload[..])?; let EncodedProof(proof) = self .backend diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index fdb9befc41cb1..1aa6ebd8bf357 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -26,6 +26,8 @@ sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-network = { version = "0.10.0-dev", path = "../" } sc-network-common = { version = "0.10.0-dev", path = "../common" } +sc-network-light = { version = "0.10.0-dev", path = "../light" } +sc-network-sync = { version = "0.10.0-dev", path = "../sync" } sc-service = { version = "0.10.0-dev", default-features = false, features = ["test-helpers"], path = "../../service" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 9e752e81a3bbc..d7c83810d521d 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -48,16 +48,21 @@ use sc_consensus::{ }; pub use sc_network::config::EmptyTransactionPool; use sc_network::{ - block_request_handler::BlockRequestHandler, config::{ - MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, - ProtocolConfig, Role, SyncMode, TransportConfig, + MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, Role, + SyncMode, TransportConfig, }, - light_client_requests::handler::LightClientRequestHandler, - state_request_handler::StateRequestHandler, - warp_request_handler, Multiaddr, NetworkService, NetworkWorker, + Multiaddr, NetworkService, NetworkWorker, }; pub use sc_network_common::config::ProtocolId; +use sc_network_common::sync::warp::{ + AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider, +}; +use sc_network_light::light_client_requests::handler::LightClientRequestHandler; +use sc_network_sync::{ + block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + warp_request_handler, ChainSync, +}; use sc_service::client::Client; use sp_blockchain::{ well_known_cache_keys::{self, Id as CacheKeyId}, @@ -638,27 +643,26 @@ impl VerifierAdapter { struct TestWarpSyncProvider(Arc>); -impl warp_request_handler::WarpSyncProvider for TestWarpSyncProvider { +impl WarpSyncProvider for TestWarpSyncProvider { fn generate( &self, _start: B::Hash, - ) -> Result> { + ) -> Result> { let info = self.0.info(); let best_header = self.0.header(BlockId::hash(info.best_hash)).unwrap().unwrap(); - Ok(warp_request_handler::EncodedProof(best_header.encode())) + Ok(EncodedProof(best_header.encode())) } fn verify( &self, - proof: &warp_request_handler::EncodedProof, - _set_id: warp_request_handler::SetId, - _authorities: warp_request_handler::AuthorityList, - ) -> Result, Box> - { - let warp_request_handler::EncodedProof(encoded) = proof; + proof: &EncodedProof, + _set_id: SetId, + _authorities: AuthorityList, + ) -> Result, Box> { + let EncodedProof(encoded) = proof; let header = B::Header::decode(&mut encoded.as_slice()).unwrap(); - Ok(warp_request_handler::VerificationResult::Complete(0, Default::default(), header)) + Ok(VerificationResult::Complete(0, Default::default(), header)) } - fn current_authorities(&self) -> warp_request_handler::AuthorityList { + fn current_authorities(&self) -> AuthorityList { Default::default() } } @@ -688,7 +692,7 @@ pub struct FullPeerConfig { pub storage_chain: bool, } -pub trait TestNetFactory: Sized +pub trait TestNetFactory: Default + Sized where >::Transaction: Send, { @@ -696,14 +700,8 @@ where type BlockImport: BlockImport + Clone + Send + Sync + 'static; type PeerData: Default; - /// These two need to be implemented! - fn from_config(config: &ProtocolConfig) -> Self; - fn make_verifier( - &self, - client: PeersClient, - config: &ProtocolConfig, - peer_data: &Self::PeerData, - ) -> Self::Verifier; + /// This one needs to be implemented! + fn make_verifier(&self, client: PeersClient, peer_data: &Self::PeerData) -> Self::Verifier; /// Get reference to peer. fn peer(&mut self, i: usize) -> &mut Peer; @@ -723,15 +721,10 @@ where Self::PeerData, ); - fn default_config() -> ProtocolConfig { - ProtocolConfig::default() - } - /// Create new test network with this many peers. fn new(n: usize) -> Self { trace!(target: "test_network", "Creating test network"); - let config = Self::default_config(); - let mut net = Self::from_config(&config); + let mut net = Self::default(); for i in 0..n { trace!(target: "test_network", "Adding peer {}", i); @@ -767,11 +760,8 @@ where let (block_import, justification_import, data) = self .make_block_import(PeersClient { client: client.clone(), backend: backend.clone() }); - let verifier = self.make_verifier( - PeersClient { client: client.clone(), backend: backend.clone() }, - &Default::default(), - &data, - ); + let verifier = self + .make_verifier(PeersClient { client: client.clone(), backend: backend.clone() }, &data); let verifier = VerifierAdapter::new(verifier); let import_queue = Box::new(BasicQueue::new( @@ -845,6 +835,10 @@ where protocol_config }; + let max_parallel_downloads = network_config.max_parallel_downloads; + let block_announce_validator = config + .block_announce_validator + .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)); let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, @@ -856,9 +850,18 @@ where transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, import_queue, - block_announce_validator: config - .block_announce_validator - .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)), + create_chain_sync: Box::new(move |sync_mode, chain, warp_sync_provider| { + match ChainSync::new( + sync_mode, + chain, + block_announce_validator, + max_parallel_downloads, + warp_sync_provider, + ) { + Ok(chain_sync) => Ok(Box::new(chain_sync)), + Err(error) => Err(Box::new(error).into()), + } + }), metrics_registry: None, block_request_protocol_config, state_request_protocol_config, @@ -1012,6 +1015,7 @@ where } } +#[derive(Default)] pub struct TestNet { peers: Vec>, } @@ -1021,17 +1025,7 @@ impl TestNetFactory for TestNet { type PeerData = (); type BlockImport = PeersClient; - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - TestNet { peers: Vec::new() } - } - - fn make_verifier( - &self, - _client: PeersClient, - _config: &ProtocolConfig, - _peer_data: &(), - ) -> Self::Verifier { + fn make_verifier(&self, _client: PeersClient, _peer_data: &()) -> Self::Verifier { PassThroughVerifier::new(false) } @@ -1081,6 +1075,7 @@ impl JustificationImport for ForceFinalized { } } +#[derive(Default)] pub struct JustificationTestNet(TestNet); impl TestNetFactory for JustificationTestNet { @@ -1088,17 +1083,8 @@ impl TestNetFactory for JustificationTestNet { type PeerData = (); type BlockImport = PeersClient; - fn from_config(config: &ProtocolConfig) -> Self { - JustificationTestNet(TestNet::from_config(config)) - } - - fn make_verifier( - &self, - client: PeersClient, - config: &ProtocolConfig, - peer_data: &(), - ) -> Self::Verifier { - self.0.make_verifier(client, config, peer_data) + fn make_verifier(&self, client: PeersClient, peer_data: &()) -> Self::Verifier { + self.0.make_verifier(client, peer_data) } fn peer(&mut self, i: usize) -> &mut Peer { diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index ff11dd1344d01..31b0c860cf190 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -50,8 +50,10 @@ sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/comm sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } sp-storage = { version = "6.0.0", path = "../../primitives/storage" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } +sc-network-light = { version = "0.10.0-dev", path = "../network/light" } +sc-network-sync = { version = "0.10.0-dev", path = "../network/sync" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 5319bf24d5e72..fe0ae5db53e70 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -38,13 +38,17 @@ use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; use sc_network::{ - block_request_handler::{self, BlockRequestHandler}, config::{Role, SyncMode}, - light_client_requests::{self, handler::LightClientRequestHandler}, - state_request_handler::{self, StateRequestHandler}, - warp_request_handler::{self, RequestHandler as WarpSyncRequestHandler, WarpSyncProvider}, NetworkService, }; +use sc_network_common::sync::warp::WarpSyncProvider; +use sc_network_light::light_client_requests::{self, handler::LightClientRequestHandler}; +use sc_network_sync::{ + block_request_handler::{self, BlockRequestHandler}, + state_request_handler::{self, StateRequestHandler}, + warp_request_handler::{self, RequestHandler as WarpSyncRequestHandler}, + ChainSync, +}; use sc_rpc::{ author::AuthorApiServer, chain::ChainApiServer, @@ -801,6 +805,7 @@ where } }; + let max_parallel_downloads = config.network.max_parallel_downloads; let network_params = sc_network::config::Params { role: config.role.clone(), executor: { @@ -818,9 +823,20 @@ where network_config: config.network.clone(), chain: client.clone(), transaction_pool: transaction_pool_adapter as _, - import_queue: Box::new(import_queue), protocol_id, - block_announce_validator, + import_queue: Box::new(import_queue), + create_chain_sync: Box::new( + move |sync_mode, chain, warp_sync_provider| match ChainSync::new( + sync_mode, + chain, + block_announce_validator, + max_parallel_downloads, + warp_sync_provider, + ) { + Ok(chain_sync) => Ok(Box::new(chain_sync)), + Err(error) => Err(Box::new(error).into()), + }, + ), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_request_protocol_config, state_request_protocol_config, From 42044053b5895991d06ddc776e45da61c6462c78 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 13 Jul 2022 11:09:28 +0100 Subject: [PATCH 032/135] Fix off by one error in proportional slashing (#11782) * Fix proportional slashing logic * Update frame/nomination-pools/test-staking/src/lib.rs Co-authored-by: David * Update frame/staking/src/lib.rs Co-authored-by: David * Update frame/staking/src/lib.rs Co-authored-by: David * Update frame/staking/src/lib.rs Co-authored-by: David * fmt * Update frame/nomination-pools/test-staking/src/lib.rs * clean * fix * last fixes * doc Co-authored-by: David --- .../nomination-pools/test-staking/src/lib.rs | 256 +++++++++++++++++- .../nomination-pools/test-staking/src/mock.rs | 7 +- frame/staking/src/lib.rs | 95 +++++-- frame/staking/src/tests.rs | 30 +- 4 files changed, 342 insertions(+), 46 deletions(-) diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index 2e40e8c6d917d..be7752ce113b4 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -22,7 +22,8 @@ mod mock; use frame_support::{assert_noop, assert_ok, bounded_btree_map, traits::Currency}; use mock::*; use pallet_nomination_pools::{ - Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, PoolMembers, PoolState, + BondedPools, Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, PoolMembers, + PoolState, }; use pallet_staking::{CurrentEra, Event as StakingEvent, Payee, RewardDestination}; @@ -273,7 +274,7 @@ fn pool_slash_e2e() { 30, &mut Default::default(), &mut Default::default(), - 1, // slash era 1, affects chunks at era 5 onwards. + 2, // slash era 2, affects chunks at era 5 onwards. ); assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); @@ -371,3 +372,254 @@ fn pool_slash_e2e() { ); }); } + +#[test] +fn pool_slash_proportional() { + // a typical example where 3 pool members unbond in era 99, 100, and 101, and a slash that + // happened in era 100 should only affect the latter two. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(Origin::signed(20), bond, 1)); + assert_ok!(Pools::join(Origin::signed(21), bond, 1)); + assert_ok!(Pools::join(Origin::signed(22), bond, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded(POOL1_BONDED, bond), + StakingEvent::Bonded(POOL1_BONDED, bond), + StakingEvent::Bonded(POOL1_BONDED, bond), + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: bond, joined: true }, + PoolsEvent::Bonded { member: 22, pool_id: 1, bonded: bond, joined: true }, + ] + ); + + // now let's progress a lot. + CurrentEra::::set(Some(99)); + + // and unbond + assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: bond, points: bond }] + ); + + CurrentEra::::set(Some(100)); + assert_ok!(Pools::unbond(Origin::signed(21), 21, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: bond, points: bond }] + ); + + CurrentEra::::set(Some(101)); + assert_ok!(Pools::unbond(Origin::signed(22), 22, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { member: 22, pool_id: 1, balance: bond, points: bond }] + ); + + // Apply a slash that happened in era 100. This is typically applied with a delay. + // Of the total 100, 50 is slashed. + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 50, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); + assert_eq!( + pool_events_since_last_call(), + vec![ + // This last pool got slashed only the leftover dust. Otherwise in principle, this + // chunk/pool should have not been affected. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 127, balance: 19 }, + // This pool got slashed 12.5, which rounded down to 12. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 128, balance: 8 }, + // This pool got slashed 12.5, which rounded down to 12. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 129, balance: 8 }, + // Bonded pool got slashed for 25, remaining 15 in it. + PoolsEvent::PoolSlashed { pool_id: 1, balance: 15 } + ] + ); + }); +} + +#[test] +fn pool_slash_non_proportional_only_bonded_pool() { + // A typical example where a pool member unbonds in era 99, and he can get away with a slash + // that happened in era 100, as long as the pool has enough active bond to cover the slash. If + // everything else in the slashing/staking system works, this should always be the case. + // Nonetheless, `ledger.slash` has been written such that it will slash greedily from any chunk + // if it runs out of chunks that it thinks should be affected by the slash. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(Origin::signed(20), bond, 1)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded(POOL1_BONDED, bond)] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }] + ); + + // progress and unbond. + CurrentEra::::set(Some(99)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: bond, points: bond }] + ); + + // slash for 30. This will be deducted only from the bonded pool. + CurrentEra::::set(Some(100)); + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 30, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::PoolSlashed { pool_id: 1, balance: 10 }] + ); + }); +} + +#[test] +fn pool_slash_non_proportional_bonded_pool_and_chunks() { + // An uncommon example where even though some funds are unlocked such that they should not be + // affected by a slash, we still slash out of them. This should not happen at all. If a + // nomination has unbonded, from the next era onwards, their exposure will drop, so if an era + // happens in that era, then their share of that slash should naturally be less, such that only + // their active ledger stake is enough to compensate it. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(Origin::signed(20), bond, 1)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded(POOL1_BONDED, bond)] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }] + ); + + // progress and unbond. + CurrentEra::::set(Some(99)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: bond, points: bond }] + ); + + // slash 50. This will be deducted only from the bonded pool and one of the unbonding pools. + CurrentEra::::set(Some(100)); + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 50, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); + assert_eq!( + pool_events_since_last_call(), + vec![ + // out of 20, 10 was taken. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 127, balance: 10 }, + // out of 40, all was taken. + PoolsEvent::PoolSlashed { pool_id: 1, balance: 0 } + ] + ); + }); +} diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 7b720c009b29b..d13d8aa341c12 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -24,6 +24,8 @@ type AccountIndex = u32; type BlockNumber = u64; type Balance = u128; +pub(crate) type T = Runtime; + pub(crate) const POOL1_BONDED: AccountId = 20318131474730217858575332831085u128; pub(crate) const POOL1_REWARD: AccountId = 20397359637244482196168876781421u128; @@ -194,13 +196,14 @@ frame_support::construct_runtime!( ); pub fn new_test_ext() -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); let _ = pallet_nomination_pools::GenesisConfig:: { min_join_bond: 2, min_create_bond: 2, max_pools: Some(3), - max_members_per_pool: Some(3), - max_members: Some(3 * 3), + max_members_per_pool: Some(5), + max_members: Some(3 * 5), } .assimilate_storage(&mut storage) .unwrap(); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 360d5b5efb58f..50c033bdc7e28 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -529,13 +529,26 @@ impl StakingLedger { (self, unlocking_balance) } - /// Slash the staker for a given amount of balance. This can grow the value of the slash in the - /// case that either the active bonded or some unlocking chunks become dust after slashing. - /// Returns the amount of funds actually slashed. + /// Slash the staker for a given amount of balance. /// - /// `slash_era` is the era in which the slash (which is being enacted now) actually happened. + /// This implements a proportional slashing system, whereby we set our preference to slash as + /// such: + /// + /// - If any unlocking chunks exist that are scheduled to be unlocked at `slash_era + + /// bonding_duration` and onwards, the slash is divided equally between the active ledger and + /// the unlocking chunks. + /// - If no such chunks exist, then only the active balance is slashed. + /// + /// Note that the above is only a *preference*. If for any reason the active ledger, with or + /// without some portion of the unlocking chunks that are more justified to be slashed are not + /// enough, then the slashing will continue and will consume as much of the active and unlocking + /// chunks as needed. /// - /// # Note + /// This will never slash more than the given amount. If any of the chunks become dusted, the + /// last chunk is slashed slightly less to compensate. Returns the amount of funds actually + /// slashed. + /// + /// `slash_era` is the era in which the slash (which is being enacted now) actually happened. /// /// This calls `Config::OnStakerSlash::on_slash` with information as to how the slash was /// applied. @@ -545,54 +558,81 @@ impl StakingLedger { minimum_balance: BalanceOf, slash_era: EraIndex, ) -> BalanceOf { - use sp_staking::OnStakerSlash as _; - if slash_amount.is_zero() { return Zero::zero() } + use sp_staking::OnStakerSlash as _; let mut remaining_slash = slash_amount; let pre_slash_total = self.total; - let era_after_slash = slash_era + 1; - let chunk_unlock_era_after_slash = era_after_slash + T::BondingDuration::get(); + // for a `slash_era = x`, any chunk that is scheduled to be unlocked at era `x + 28` + // (assuming 28 is the bonding duration) onwards should be slashed. + let slashable_chunks_start = slash_era + T::BondingDuration::get(); - // Calculate the total balance of active funds and unlocking funds in the affected range. - let (affected_balance, slash_chunks_priority): (_, Box>) = { - if let Some(start_index) = - self.unlocking.iter().position(|c| c.era >= chunk_unlock_era_after_slash) + // `Some(ratio)` if this is proportional, with `ratio`, `None` otherwise. In both cases, we + // slash first the active chunk, and then `slash_chunks_priority`. + let (maybe_proportional, slash_chunks_priority) = { + if let Some(first_slashable_index) = + self.unlocking.iter().position(|c| c.era >= slashable_chunks_start) { + // If there exists a chunk who's after the first_slashable_start, then this is a + // proportional slash, because we want to slash active and these chunks + // proportionally. + // The indices of the first chunk after the slash up through the most recent chunk. // (The most recent chunk is at greatest from this era) - let affected_indices = start_index..self.unlocking.len(); + let affected_indices = first_slashable_index..self.unlocking.len(); let unbonding_affected_balance = affected_indices.clone().fold(BalanceOf::::zero(), |sum, i| { - if let Some(chunk) = self.unlocking.get_mut(i).defensive() { + if let Some(chunk) = self.unlocking.get(i).defensive() { sum.saturating_add(chunk.value) } else { sum } }); + let affected_balance = self.active.saturating_add(unbonding_affected_balance); + let ratio = Perquintill::from_rational(slash_amount, affected_balance); ( - self.active.saturating_add(unbonding_affected_balance), - Box::new(affected_indices.chain((0..start_index).rev())), + Some(ratio), + affected_indices.chain((0..first_slashable_index).rev()).collect::>(), ) } else { - (self.active, Box::new((0..self.unlocking.len()).rev())) + // We just slash from the last chunk to the most recent one, if need be. + (None, (0..self.unlocking.len()).rev().collect::>()) } }; // Helper to update `target` and the ledgers total after accounting for slashing `target`. - let ratio = Perquintill::from_rational(slash_amount, affected_balance); + log!( + debug, + "slashing {:?} for era {:?} out of {:?}, priority: {:?}, proportional = {:?}", + slash_amount, + slash_era, + self, + slash_chunks_priority, + maybe_proportional, + ); + let mut slash_out_of = |target: &mut BalanceOf, slash_remaining: &mut BalanceOf| { - let mut slash_from_target = - if slash_amount < affected_balance { ratio * (*target) } else { *slash_remaining } - .min(*target); + let mut slash_from_target = if let Some(ratio) = maybe_proportional { + ratio * (*target) + } else { + *slash_remaining + } + // this is the total that that the slash target has. We can't slash more than + // this anyhow! + .min(*target) + // this is the total amount that we would have wanted to slash + // non-proportionally, a proportional slash should never exceed this either! + .min(*slash_remaining); // slash out from *target exactly `slash_from_target`. *target = *target - slash_from_target; if *target < minimum_balance { - // Slash the rest of the target if its dust + // Slash the rest of the target if it's dust. This might cause the last chunk to be + // slightly under-slashed, by at most `MaxUnlockingChunks * ED`, which is not a big + // deal. slash_from_target = sp_std::mem::replace(target, Zero::zero()).saturating_add(slash_from_target) } @@ -606,10 +646,11 @@ impl StakingLedger { let mut slashed_unlocking = BTreeMap::<_, _>::new(); for i in slash_chunks_priority { + if remaining_slash.is_zero() { + break + } + if let Some(chunk) = self.unlocking.get_mut(i).defensive() { - if remaining_slash.is_zero() { - break - } slash_out_of(&mut chunk.value, &mut remaining_slash); // write the new slashed value of this chunk to the map. slashed_unlocking.insert(chunk.era, chunk.value); @@ -618,7 +659,9 @@ impl StakingLedger { } } + // clean unlocking chunks that are set to zero. self.unlocking.retain(|c| !c.value.is_zero()); + T::OnStakerSlash::on_slash(&self.stash, self.active, &slashed_unlocking); pre_slash_total.saturating_sub(self.total) } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 9a13a818f4b59..b76126f0c5d04 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -2081,8 +2081,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { let _ = Balances::make_free_balance_be(&11, stake); let _ = Balances::make_free_balance_be(&2, stake); - // only slashes out of bonded stake are applied. without this line, - // it is 0. + // only slashes out of bonded stake are applied. without this line, it is 0. Staking::bond(Origin::signed(2), 20000, stake - 1, RewardDestination::default()).unwrap(); // Override exposure of 11 ErasStakers::::insert( @@ -2104,7 +2103,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { &[Perbill::from_percent(100)], ); - assert_eq!(Balances::total_balance(&11), stake); + assert_eq!(Balances::total_balance(&11), stake - 1); assert_eq!(Balances::total_balance(&2), 1); }) } @@ -4960,7 +4959,6 @@ fn proportional_ledger_slash_works() { unlocking: bounded_vec![], claimed_rewards: vec![], }; - assert_eq!(BondingDuration::get(), 3); // When we slash a ledger with no unlocking chunks @@ -4997,7 +4995,7 @@ fn proportional_ledger_slash_works() { ledger.total = 4 * 100; ledger.active = 0; // When the first 2 chunks don't overlap with the affected range of unlock eras. - assert_eq!(ledger.slash(140, 0, 2), 140); + assert_eq!(ledger.slash(140, 0, 3), 140); // Then assert_eq!(ledger.unlocking, vec![c(4, 100), c(5, 100), c(6, 30), c(7, 30)]); assert_eq!(ledger.total, 4 * 100 - 140); @@ -5039,7 +5037,7 @@ fn proportional_ledger_slash_works() { ledger.active = 500; ledger.total = 40 + 10 + 100 + 250 + 500; // 900 assert_eq!(ledger.total, 900); - // When we have a higher min balance + // When we have a higher min balance assert_eq!( ledger.slash( 900 / 2, @@ -5047,16 +5045,17 @@ fn proportional_ledger_slash_works() { * get swept */ 0 ), - 475 + 450 ); - let dust = (10 / 2) + (40 / 2); assert_eq!(ledger.active, 500 / 2); - assert_eq!(ledger.unlocking, vec![c(5, 100 / 2), c(7, 250 / 2)]); - assert_eq!(ledger.total, 900 / 2 - dust); + // the last chunk was not slashed 50% like all the rest, because some other earlier chunks got + // dusted. + assert_eq!(ledger.unlocking, vec![c(5, 100 / 2), c(7, 150)]); + assert_eq!(ledger.total, 900 / 2); assert_eq!(LedgerSlashPerEra::get().0, 500 / 2); assert_eq!( LedgerSlashPerEra::get().1, - BTreeMap::from([(4, 0), (5, 100 / 2), (6, 0), (7, 250 / 2)]) + BTreeMap::from([(4, 0), (5, 100 / 2), (6, 0), (7, 150)]) ); // Given @@ -5068,7 +5067,7 @@ fn proportional_ledger_slash_works() { ledger.slash( 500 + 10 + 250 + 100 / 2, // active + era 6 + era 7 + era 5 / 2 0, - 2 /* slash era 2+4 first, so the affected parts are era 2+4, era 3+4 and + 3 /* slash era 6 first, so the affected parts are era 6, era 7 and * ledge.active. This will cause the affected to go to zero, and then we will * start slashing older chunks */ ), @@ -5091,7 +5090,7 @@ fn proportional_ledger_slash_works() { ledger.slash( 351, // active + era 6 + era 7 + era 5 / 2 + 1 50, // min balance - everything slashed below 50 will get dusted - 2 /* slash era 2+4 first, so the affected parts are era 2+4, era 3+4 and + 3 /* slash era 3+3 first, so the affected parts are era 6, era 7 and * ledge.active. This will cause the affected to go to zero, and then we will * start slashing older chunks */ ), @@ -5108,9 +5107,8 @@ fn proportional_ledger_slash_works() { // Given let slash = u64::MAX as Balance * 2; - let value = slash - - (9 * 4) // The value of the other parts of ledger that will get slashed - + 1; + // The value of the other parts of ledger that will get slashed + let value = slash - (10 * 4); ledger.active = 10; ledger.unlocking = bounded_vec![c(4, 10), c(5, 10), c(6, 10), c(7, value)]; From 9dad17f77b8a56ccf0d289ae410ee1e61d9b12a1 Mon Sep 17 00:00:00 2001 From: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Date: Wed, 13 Jul 2022 13:41:20 +0100 Subject: [PATCH 033/135] Buy&Sell methods for Uniques (#11398) * Allow to set item's price * Clean the state when we transfer/burn an item or destroy a collection * Allow to buy an item * Remove redundant checks * Improve events * Cover with tests * Add comments * Apply suggestions * Fmt * Improvements for price validation * Improve validation * Update to use the new terminology * Remove multi-assets support * Chore * Weights + benchmarking * Shield against human error * Test when we pass the higher item's price * fmt fix * Chore * cargo run --quiet --profile=production --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark pallet --chain=dev --steps=50 --repeat=20 --pallet=pallet_uniques --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/uniques/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Remove is_frozen check when setting the price * Try to fix benchmarking * Fix benchmarking * cargo run --quiet --profile=production --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark pallet --chain=dev --steps=50 --repeat=20 --pallet=pallet_uniques --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/uniques/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Add transactional * Add 'allow deprecated' flag for transactional * Remove #[allow(deprecated)] * ".git/.scripts/bench-bot.sh" pallet dev pallet_uniques Co-authored-by: Parity Bot Co-authored-by: command-bot <> --- frame/uniques/src/benchmarking.rs | 36 ++++++ frame/uniques/src/functions.rs | 79 ++++++++++++- frame/uniques/src/lib.rs | 88 ++++++++++++++- frame/uniques/src/tests.rs | 178 +++++++++++++++++++++++++++++- frame/uniques/src/types.rs | 2 + frame/uniques/src/weights.rs | 174 +++++++++++++++++------------ 6 files changed, 485 insertions(+), 72 deletions(-) diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs index 14a3e2c61809a..713393048f7e9 100644 --- a/frame/uniques/src/benchmarking.rs +++ b/frame/uniques/src/benchmarking.rs @@ -408,5 +408,41 @@ benchmarks_instance_pallet! { }.into()); } + set_price { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let price = ItemPrice::::from(100u32); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, Some(price), Some(delegate_lookup)) + verify { + assert_last_event::(Event::ItemPriceSet { + collection, + item, + price, + whitelisted_buyer: Some(delegate), + }.into()); + } + + buy_item { + let (collection, seller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let buyer: T::AccountId = account("buyer", 0, SEED); + let buyer_lookup = T::Lookup::unlookup(buyer.clone()); + let price = ItemPrice::::from(0u32); + let origin = SystemOrigin::Signed(seller.clone()).into(); + Uniques::::set_price(origin, collection, item, Some(price.clone()), Some(buyer_lookup))?; + T::Currency::make_free_balance_be(&buyer, DepositBalanceOf::::max_value()); + }: _(SystemOrigin::Signed(buyer.clone()), collection, item, price.clone()) + verify { + assert_last_event::(Event::ItemBought { + collection, + item, + price, + seller, + buyer, + }.into()); + } + impl_benchmark_test_suite!(Uniques, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index 155fb35ef0999..9e8d3301616df 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -18,7 +18,10 @@ //! Various pieces of common functionality. use super::*; -use frame_support::{ensure, traits::Get}; +use frame_support::{ + ensure, + traits::{ExistenceRequirement, Get}, +}; use sp_runtime::{DispatchError, DispatchResult}; impl, I: 'static> Pallet { @@ -46,6 +49,7 @@ impl, I: 'static> Pallet { let origin = details.owner; details.owner = dest; Item::::insert(&collection, &item, &details); + ItemPriceOf::::remove(&collection, &item); Self::deposit_event(Event::Transferred { collection, @@ -112,6 +116,8 @@ impl, I: 'static> Pallet { } #[allow(deprecated)] ItemMetadataOf::::remove_prefix(&collection, None); + #[allow(deprecated)] + ItemPriceOf::::remove_prefix(&collection, None); CollectionMetadataOf::::remove(&collection); #[allow(deprecated)] Attribute::::remove_prefix((&collection,), None); @@ -196,8 +202,79 @@ impl, I: 'static> Pallet { Item::::remove(&collection, &item); Account::::remove((&owner, &collection, &item)); + ItemPriceOf::::remove(&collection, &item); Self::deposit_event(Event::Burned { collection, item, owner }); Ok(()) } + + pub fn do_set_price( + collection: T::CollectionId, + item: T::ItemId, + sender: T::AccountId, + price: Option>, + whitelisted_buyer: Option, + ) -> DispatchResult { + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(details.owner == sender, Error::::NoPermission); + + if let Some(ref price) = price { + ItemPriceOf::::insert( + &collection, + &item, + (price.clone(), whitelisted_buyer.clone()), + ); + Self::deposit_event(Event::ItemPriceSet { + collection, + item, + price: price.clone(), + whitelisted_buyer, + }); + } else { + ItemPriceOf::::remove(&collection, &item); + Self::deposit_event(Event::ItemPriceRemoved { collection, item }); + } + + Ok(()) + } + + pub fn do_buy_item( + collection: T::CollectionId, + item: T::ItemId, + buyer: T::AccountId, + bid_price: ItemPrice, + ) -> DispatchResult { + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(details.owner != buyer, Error::::NoPermission); + + let price_info = + ItemPriceOf::::get(&collection, &item).ok_or(Error::::NotForSale)?; + + ensure!(bid_price >= price_info.0, Error::::BidTooLow); + + if let Some(only_buyer) = price_info.1 { + ensure!(only_buyer == buyer, Error::::NoPermission); + } + + T::Currency::transfer( + &buyer, + &details.owner, + price_info.0, + ExistenceRequirement::KeepAlive, + )?; + + let old_owner = details.owner.clone(); + + Self::do_transfer(collection, item, buyer.clone(), |_, _| Ok(()))?; + + Self::deposit_event(Event::ItemBought { + collection, + item, + price: price_info.0, + seller: old_owner, + buyer, + }); + + Ok(()) + } } diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 5c7adeac0eff2..53f83605da90e 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -24,6 +24,7 @@ //! * [`System`](../frame_system/index.html) //! * [`Support`](../frame_support/index.html) +#![recursion_limit = "256"] // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -42,8 +43,11 @@ pub mod migration; pub mod weights; use codec::{Decode, Encode}; -use frame_support::traits::{ - tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, +use frame_support::{ + traits::{ + tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, + }, + transactional, }; use frame_system::Config as SystemConfig; use sp_runtime::{ @@ -245,6 +249,18 @@ pub mod pallet { OptionQuery, >; + #[pallet::storage] + /// Price of an asset instance. + pub(super) type ItemPriceOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + (ItemPrice, Option), + OptionQuery, + >; + #[pallet::storage] /// Keeps track of the number of items a collection might have. pub(super) type CollectionMaxSupply, I: 'static = ()> = @@ -341,6 +357,23 @@ pub mod pallet { OwnershipAcceptanceChanged { who: T::AccountId, maybe_collection: Option }, /// Max supply has been set for a collection. CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, + /// The price was set for the instance. + ItemPriceSet { + collection: T::CollectionId, + item: T::ItemId, + price: ItemPrice, + whitelisted_buyer: Option, + }, + /// The price for the instance was removed. + ItemPriceRemoved { collection: T::CollectionId, item: T::ItemId }, + /// An item was bought. + ItemBought { + collection: T::CollectionId, + item: T::ItemId, + price: ItemPrice, + seller: T::AccountId, + buyer: T::AccountId, + }, } #[pallet::error] @@ -375,6 +408,12 @@ pub mod pallet { MaxSupplyAlreadySet, /// The provided max supply is less to the amount of items a collection already has. MaxSupplyTooSmall, + /// The given item ID is unknown. + UnknownItem, + /// Item is not for sale. + NotForSale, + /// The provided bid is too low. + BidTooLow, } impl, I: 'static> Pallet { @@ -1408,5 +1447,50 @@ pub mod pallet { Self::deposit_event(Event::CollectionMaxSupplySet { collection, max_supply }); Ok(()) } + + /// Set (or reset) the price for an item. + /// + /// Origin must be Signed and must be the owner of the asset `item`. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item to set the price for. + /// - `price`: The price for the item. Pass `None`, to reset the price. + /// - `buyer`: Restricts the buy operation to a specific account. + /// + /// Emits `ItemPriceSet` on success if the price is not `None`. + /// Emits `ItemPriceRemoved` on success if the price is `None`. + #[pallet::weight(T::WeightInfo::set_price())] + pub fn set_price( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + price: Option>, + whitelisted_buyer: Option<::Source>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let whitelisted_buyer = whitelisted_buyer.map(T::Lookup::lookup).transpose()?; + Self::do_set_price(collection, item, origin, price, whitelisted_buyer) + } + + /// Allows to buy an item if it's up for sale. + /// + /// Origin must be Signed and must not be the owner of the `item`. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item the sender wants to buy. + /// - `bid_price`: The price the sender is willing to pay. + /// + /// Emits `ItemBought` on success. + #[pallet::weight(T::WeightInfo::buy_item())] + #[transactional] + pub fn buy_item( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + bid_price: ItemPrice, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_buy_item(collection, item, origin, bid_price) + } } } diff --git a/frame/uniques/src/tests.rs b/frame/uniques/src/tests.rs index ef28a2143e84a..8b1d00d7ba0c7 100644 --- a/frame/uniques/src/tests.rs +++ b/frame/uniques/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for Uniques pallet. use crate::{mock::*, Event, *}; -use frame_support::{assert_noop, assert_ok, traits::Currency}; +use frame_support::{assert_noop, assert_ok, dispatch::Dispatchable, traits::Currency}; use pallet_balances::Error as BalancesError; use sp_std::prelude::*; @@ -694,3 +694,179 @@ fn max_supply_should_work() { assert!(!CollectionMaxSupply::::contains_key(collection_id)); }); } + +#[test] +fn set_price_should_work() { + new_test_ext().execute_with(|| { + let user_id = 1; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + + assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); + + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_1, user_id)); + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_2, user_id)); + + assert_ok!(Uniques::set_price( + Origin::signed(user_id), + collection_id, + item_1, + Some(1), + None, + )); + + assert_ok!(Uniques::set_price( + Origin::signed(user_id), + collection_id, + item_2, + Some(2), + Some(3) + )); + + let item = ItemPriceOf::::get(collection_id, item_1).unwrap(); + assert_eq!(item.0, 1); + assert_eq!(item.1, None); + + let item = ItemPriceOf::::get(collection_id, item_2).unwrap(); + assert_eq!(item.0, 2); + assert_eq!(item.1, Some(3)); + + assert!(events().contains(&Event::::ItemPriceSet { + collection: collection_id, + item: item_1, + price: 1, + whitelisted_buyer: None, + })); + + // validate we can unset the price + assert_ok!(Uniques::set_price(Origin::signed(user_id), collection_id, item_2, None, None)); + assert!(events().contains(&Event::::ItemPriceRemoved { + collection: collection_id, + item: item_2 + })); + assert!(!ItemPriceOf::::contains_key(collection_id, item_2)); + }); +} + +#[test] +fn buy_item_should_work() { + new_test_ext().execute_with(|| { + let user_1 = 1; + let user_2 = 2; + let user_3 = 3; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + let item_3 = 3; + let price_1 = 20; + let price_2 = 30; + let initial_balance = 100; + + Balances::make_free_balance_be(&user_1, initial_balance); + Balances::make_free_balance_be(&user_2, initial_balance); + Balances::make_free_balance_be(&user_3, initial_balance); + + assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_1, true)); + + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_1, user_1)); + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_2, user_1)); + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_3, user_1)); + + assert_ok!(Uniques::set_price( + Origin::signed(user_1), + collection_id, + item_1, + Some(price_1), + None, + )); + + assert_ok!(Uniques::set_price( + Origin::signed(user_1), + collection_id, + item_2, + Some(price_2), + Some(user_3), + )); + + // can't buy for less + assert_noop!( + Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, 1), + Error::::BidTooLow + ); + + // pass the higher price to validate it will still deduct correctly + assert_ok!(Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, price_1 + 1,)); + + // validate the new owner & balances + let item = Item::::get(collection_id, item_1).unwrap(); + assert_eq!(item.owner, user_2); + assert_eq!(Balances::total_balance(&user_1), initial_balance + price_1); + assert_eq!(Balances::total_balance(&user_2), initial_balance - price_1); + + // can't buy from yourself + assert_noop!( + Uniques::buy_item(Origin::signed(user_1), collection_id, item_2, price_2), + Error::::NoPermission + ); + + // can't buy when the item is listed for a specific buyer + assert_noop!( + Uniques::buy_item(Origin::signed(user_2), collection_id, item_2, price_2), + Error::::NoPermission + ); + + // can buy when I'm a whitelisted buyer + assert_ok!(Uniques::buy_item(Origin::signed(user_3), collection_id, item_2, price_2,)); + + assert!(events().contains(&Event::::ItemBought { + collection: collection_id, + item: item_2, + price: price_2, + seller: user_1, + buyer: user_3, + })); + + // ensure we reset the buyer field + assert!(!ItemPriceOf::::contains_key(collection_id, item_2)); + + // can't buy when item is not for sale + assert_noop!( + Uniques::buy_item(Origin::signed(user_2), collection_id, item_3, price_2), + Error::::NotForSale + ); + + // ensure we can't buy an item when the collection or an item is frozen + { + assert_ok!(Uniques::set_price( + Origin::signed(user_1), + collection_id, + item_3, + Some(price_1), + None, + )); + + // freeze collection + assert_ok!(Uniques::freeze_collection(Origin::signed(user_1), collection_id)); + + let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { + collection: collection_id, + item: item_3, + bid_price: price_1, + }); + assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); + + assert_ok!(Uniques::thaw_collection(Origin::signed(user_1), collection_id)); + + // freeze item + assert_ok!(Uniques::freeze(Origin::signed(user_1), collection_id, item_3)); + + let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { + collection: collection_id, + item: item_3, + bid_price: price_1, + }); + assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); + } + }); +} diff --git a/frame/uniques/src/types.rs b/frame/uniques/src/types.rs index d7706fd7e3a58..98e056163d28d 100644 --- a/frame/uniques/src/types.rs +++ b/frame/uniques/src/types.rs @@ -30,6 +30,8 @@ pub(super) type CollectionDetailsFor = CollectionDetails<::AccountId, DepositBalanceOf>; pub(super) type ItemDetailsFor = ItemDetails<::AccountId, DepositBalanceOf>; +pub(super) type ItemPrice = + <>::Currency as Currency<::AccountId>>::Balance; #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index d885077a8dee9..7c8cb170b1b1d 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -18,22 +18,22 @@ //! Autogenerated weights for pallet_uniques //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-06-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-07-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `test-bench-bot`, CPU: `Intel(R) Xeon(R) CPU @ 3.10GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: // target/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_uniques // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --pallet=pallet_uniques +// --chain=dev // --output=./frame/uniques/src/weights.rs // --template=./.maintain/frame-weight-template.hbs @@ -70,6 +70,8 @@ pub trait WeightInfo { fn cancel_approval() -> Weight; fn set_accept_ownership() -> Weight; fn set_collection_max_supply() -> Weight; + fn set_price() -> Weight; + fn buy_item() -> Weight; } /// Weights for pallet_uniques using the Substrate node and recommended hardware. @@ -78,14 +80,14 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (27_715_000 as Weight) + (33_075_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (16_929_000 as Weight) + (19_528_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -102,12 +104,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 16_000 - .saturating_add((10_481_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 16_000 - .saturating_add((1_762_000 as Weight).saturating_mul(m as Weight)) - // Standard Error: 16_000 - .saturating_add((1_590_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 25_000 + .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 25_000 + .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 25_000 + .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(4 as Weight)) @@ -120,33 +122,35 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (36_577_000 as Weight) + (42_146_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) + // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (36_124_000 as Weight) + (42_960_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) + // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (27_225_000 as Weight) + (33_025_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { (0 as Weight) - // Standard Error: 14_000 - .saturating_add((12_407_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 24_000 + .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -155,26 +159,26 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (21_452_000 as Weight) + (25_194_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (22_155_000 as Weight) + (25_397_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (16_897_000 as Weight) + (19_278_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (16_657_000 as Weight) + (19_304_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -182,20 +186,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (25_057_000 as Weight) + (28_615_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (17_253_000 as Weight) + (19_943_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (20_010_000 as Weight) + (22_583_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -203,7 +207,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (41_159_000 as Weight) + (47_520_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -211,65 +215,81 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (39_598_000 as Weight) + (45_316_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (33_387_000 as Weight) + (38_391_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (33_208_000 as Weight) + (38_023_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (32_619_000 as Weight) + (37_398_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (31_028_000 as Weight) + (35_621_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (22_263_000 as Weight) + (25_856_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (22_910_000 as Weight) + (26_098_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (20_716_000 as Weight) + (24_076_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (19_380_000 as Weight) + (22_035_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Uniques Asset (r:1 w:0) + // Storage: Uniques ItemPriceOf (r:0 w:1) + fn set_price() -> Weight { + (22_534_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques ItemPriceOf (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Account (r:0 w:2) + fn buy_item() -> Weight { + (45_272_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } } // For backwards compatibility and tests @@ -277,14 +297,14 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (27_715_000 as Weight) + (33_075_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (16_929_000 as Weight) + (19_528_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -301,12 +321,12 @@ impl WeightInfo for () { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 16_000 - .saturating_add((10_481_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 16_000 - .saturating_add((1_762_000 as Weight).saturating_mul(m as Weight)) - // Standard Error: 16_000 - .saturating_add((1_590_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 25_000 + .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 25_000 + .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 25_000 + .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) @@ -319,33 +339,35 @@ impl WeightInfo for () { // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (36_577_000 as Weight) + (42_146_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) + // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (36_124_000 as Weight) + (42_960_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) + // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (27_225_000 as Weight) + (33_025_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { (0 as Weight) - // Standard Error: 14_000 - .saturating_add((12_407_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 24_000 + .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -354,26 +376,26 @@ impl WeightInfo for () { // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (21_452_000 as Weight) + (25_194_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (22_155_000 as Weight) + (25_397_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (16_897_000 as Weight) + (19_278_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (16_657_000 as Weight) + (19_304_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -381,20 +403,20 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (25_057_000 as Weight) + (28_615_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (17_253_000 as Weight) + (19_943_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (20_010_000 as Weight) + (22_583_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -402,7 +424,7 @@ impl WeightInfo for () { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (41_159_000 as Weight) + (47_520_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -410,63 +432,79 @@ impl WeightInfo for () { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (39_598_000 as Weight) + (45_316_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (33_387_000 as Weight) + (38_391_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (33_208_000 as Weight) + (38_023_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (32_619_000 as Weight) + (37_398_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (31_028_000 as Weight) + (35_621_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (22_263_000 as Weight) + (25_856_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (22_910_000 as Weight) + (26_098_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (20_716_000 as Weight) + (24_076_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (19_380_000 as Weight) + (22_035_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: Uniques Asset (r:1 w:0) + // Storage: Uniques ItemPriceOf (r:0 w:1) + fn set_price() -> Weight { + (22_534_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques ItemPriceOf (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Account (r:0 w:2) + fn buy_item() -> Weight { + (45_272_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } } From 367dab0d4bd7fd7b6c222dd15c753169c057dd42 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 13 Jul 2022 13:49:20 +0100 Subject: [PATCH 034/135] Revamp nomination pool reward scheme (#11669) * make pool roles optional * undo lock file changes? * add migration * add the ability for pools to chill themselves * boilerplate of tests * somewhat stable, but I think I found another bug as well * Fix it all * Add more more sophisticated test + capture one more bug. * Update frame/staking/src/lib.rs * reduce the diff a little bit * add some test for the slashing bug * cleanup * fix lock file? * Fix * fmt * Update frame/nomination-pools/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/mock.rs Co-authored-by: Oliver Tale-Yazdi * Fix build * fix some fishy tests.. * add one last integrity check for MinCreateBond * remove bad assertion -- needs to be dealt with later * nits * fix tests and add benchmarks for chill * remove stuff * fix benchmarks * cargo run --quiet --profile=production --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark pallet --chain=dev --steps=50 --repeat=20 --pallet=pallet_nomination_pools --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/nomination-pools/src/weights.rs --template=./.maintain/frame-weight-template.hbs * remove defensive * first working version * bring back all tests * ALL new tests work now * cleanup * make sure benchmarks and all work * cargo run --quiet --profile=production --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark pallet --chain=dev --steps=50 --repeat=20 --pallet=pallet_nomination_pools --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/nomination-pools/src/weights.rs --template=./.maintain/frame-weight-template.hbs * round of self-review, make arithmetic safe * fix warn * add migration code * Fix doc * add precision notes * make arithmetic fallible * fix node runtime * a lot of precision tests and notes and stuff * document MaxPOintsToBalance better * :round of self-review * fmt * fix some comments * Fix proportional slashing logic * Update frame/nomination-pools/src/tests.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/tests.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * track poinst in migration * fix * fmt * fix migration * remove event read * Apply suggestions from code review * Update frame/staking/src/lib.rs Co-authored-by: Shawn Tabrizi * Update frame/nomination-pools/src/lib.rs Co-authored-by: Shawn Tabrizi * Update frame/nomination-pools/src/lib.rs Co-authored-by: Shawn Tabrizi * update * fmt * fmt * add one last test * fmt Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Shawn Tabrizi Co-authored-by: Parity Bot --- bin/node/runtime/src/lib.rs | 9 +- .../nomination-pools/benchmarking/src/mock.rs | 11 +- frame/nomination-pools/src/lib.rs | 582 +++--- frame/nomination-pools/src/migration.rs | 284 ++- frame/nomination-pools/src/mock.rs | 52 +- frame/nomination-pools/src/tests.rs | 1805 ++++++++++++----- frame/nomination-pools/src/weights.rs | 98 +- .../nomination-pools/test-staking/src/lib.rs | 5 +- .../nomination-pools/test-staking/src/mock.rs | 17 +- utils/frame/try-runtime/cli/src/lib.rs | 9 +- 10 files changed, 1950 insertions(+), 922 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2d5981339fab3..b2efcb196787d 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -68,7 +68,7 @@ use sp_runtime::{ SaturatedConversion, StaticLookup, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedPointNumber, Perbill, Percent, Permill, Perquintill, + ApplyExtrinsicResult, FixedPointNumber, FixedU128, Perbill, Percent, Permill, Perquintill, }; use sp_std::prelude::*; #[cfg(any(feature = "std", test))] @@ -730,7 +730,7 @@ impl pallet_bags_list::Config for Runtime { parameter_types! { pub const PostUnbondPoolsWindow: u32 = 4; pub const NominationPoolsPalletId: PalletId = PalletId(*b"py/nopls"); - pub const MinPointsToBalance: u32 = 10; + pub const MaxPointsToBalance: u8 = 10; } use sp_runtime::traits::Convert; @@ -751,6 +751,8 @@ impl pallet_nomination_pools::Config for Runtime { type WeightInfo = (); type Event = Event; type Currency = Balances; + type CurrencyBalance = Balance; + type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; type StakingInterface = pallet_staking::Pallet; @@ -758,7 +760,7 @@ impl pallet_nomination_pools::Config for Runtime { type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; type PalletId = NominationPoolsPalletId; - type MinPointsToBalance = MinPointsToBalance; + type MaxPointsToBalance = MaxPointsToBalance; } parameter_types! { @@ -1675,6 +1677,7 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, + pallet_nomination_pools::migration::v2::MigrateToV2, >; /// MMR helper types. diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index eb884869f6d32..1c74c301e562d 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -17,7 +17,10 @@ use frame_election_provider_support::VoteWeight; use frame_support::{pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; -use sp_runtime::traits::{Convert, IdentityLookup}; +use sp_runtime::{ + traits::{Convert, IdentityLookup}, + FixedU128, +}; type AccountId = u128; type AccountIndex = u32; @@ -144,13 +147,15 @@ impl Convert for U256ToBalance { parameter_types! { pub static PostUnbondingPoolsWindow: u32 = 10; pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); - pub const MinPointsToBalance: u32 = 10; + pub const MaxPointsToBalance: u8 = 10; } impl pallet_nomination_pools::Config for Runtime { type Event = Event; type WeightInfo = (); type Currency = Balances; + type CurrencyBalance = Balance; + type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; type StakingInterface = Staking; @@ -158,7 +163,7 @@ impl pallet_nomination_pools::Config for Runtime { type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; type PalletId = PoolsPalletId; - type MinPointsToBalance = MinPointsToBalance; + type MaxPointsToBalance = MaxPointsToBalance; } impl crate::Config for Runtime {} diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index f7ccac3e35411..53e1c48a5e39e 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -17,8 +17,8 @@ //! # Nomination Pools for Staking Delegation //! -//! A pallet that allows members to delegate their stake to nominating pools. A nomination pool -//! acts as nominator and nominates validators on the members behalf. +//! A pallet that allows members to delegate their stake to nominating pools. A nomination pool acts +//! as nominator and nominates validators on the members behalf. //! //! # Index //! @@ -28,16 +28,27 @@ //! //! ## Key terms //! +//! * pool id: A unique identifier of each pool. Set to u12 //! * bonded pool: Tracks the distribution of actively staked funds. See [`BondedPool`] and -//! [`BondedPoolInner`]. Bonded pools are identified via the pools bonded account. +//! [`BondedPoolInner`]. //! * reward pool: Tracks rewards earned by actively staked funds. See [`RewardPool`] and -//! [`RewardPools`]. Reward pools are identified via the pools bonded account. +//! [`RewardPools`]. //! * unbonding sub pools: Collection of pools at different phases of the unbonding lifecycle. See -//! [`SubPools`] and [`SubPoolsStorage`]. Sub pools are identified via the pools bonded account. -//! * members: Accounts that are members of pools. See [`PoolMember`] and [`PoolMembers`]. Pool -//! members are identified via their account. -//! * point: A unit of measure for a members portion of a pool's funds. +//! [`SubPools`] and [`SubPoolsStorage`]. +//! * members: Accounts that are members of pools. See [`PoolMember`] and [`PoolMembers`]. +//! * roles: Administrative roles of each pool, capable of controlling nomination, and the state of +//! the pool. +//! * point: A unit of measure for a members portion of a pool's funds. Points initially have a +//! ratio of 1 (as set by `POINTS_TO_BALANCE_INIT_RATIO`) to balance, but as slashing happens, +//! this can change. //! * kick: The act of a pool administrator forcibly ejecting a member. +//! * bonded account: A key-less account id derived from the pool id that acts as the bonded +//! account. This account registers itself as a nominator in the staking system, and follows +//! exactly the same rules and conditions as a normal staker. Its bond increases or decreases as +//! members join, it can `nominate` or `chill`, and might not even earn staking rewards if it is +//! not nominating proper validators. +//! * reward account: A similar key-less account, that is set as the `Payee` account fo the bonded +//! account for all staking rewards. //! //! ## Usage //! @@ -55,11 +66,13 @@ //! //! In order to leave, a member must take two steps. //! -//! First, they must call [`Call::unbond`]. The unbond other extrinsic will start the -//! unbonding process by unbonding all of the members funds. +//! First, they must call [`Call::unbond`]. The unbond extrinsic will start the unbonding process by +//! unbonding all or a portion of the members funds. //! -//! Second, once [`sp_staking::StakingInterface::bonding_duration`] eras have passed, the member -//! can call [`Call::withdraw_unbonded`] to withdraw all their funds. +//! > A member can have up to [`Config::MaxUnbonding`] distinct active unbonding requests. +//! +//! Second, once [`sp_staking::StakingInterface::bonding_duration`] eras have passed, the member can +//! call [`Call::withdraw_unbonded`] to withdraw any funds that are free. //! //! For design docs see the [bonded pool](#bonded-pool) and [unbonding sub //! pools](#unbonding-sub-pools) sections. @@ -67,9 +80,13 @@ //! ### Slashes //! //! Slashes are distributed evenly across the bonded pool and the unbonding pools from slash era+1 -//! through the slash apply era. Thus, any member who either a) unbonded or b) was actively -//! bonded in the aforementioned range of eras will be affected by the slash. A member is slashed -//! pro-rata based on its stake relative to the total slash amount. +//! through the slash apply era. Thus, any member who either +//! +//! 1. unbonded, or +//! 2. was actively bonded +// +//! in the aforementioned range of eras will be affected by the slash. A member is slashed pro-rata +//! based on its stake relative to the total slash amount. //! //! For design docs see the [slashing](#slashing) section. //! @@ -82,7 +99,9 @@ //! To help facilitate pool administration the pool has one of three states (see [`PoolState`]): //! //! * Open: Anyone can join the pool and no members can be permissionlessly removed. -//! * Blocked: No members can join and some admin roles can kick members. +//! * Blocked: No members can join and some admin roles can kick members. Kicking is not instant, +//! and follows the same process of `unbond` and then `withdraw_unbonded`. In other words, +//! administrators can permissionlessly unbond other members. //! * Destroying: No members can join and all members can be permissionlessly removed with //! [`Call::unbond`] and [`Call::withdraw_unbonded`]. Once a pool is in destroying state, it //! cannot be reverted to another state. @@ -90,12 +109,23 @@ //! A pool has 4 administrative roles (see [`PoolRoles`]): //! //! * Depositor: creates the pool and is the initial member. They can only leave the pool once all -//! other members have left. Once they fully leave the pool is destroyed. +//! other members have left. Once they fully withdraw their funds, the pool is destroyed. //! * Nominator: can select which validators the pool nominates. //! * State-Toggler: can change the pools state and kick members if the pool is blocked. //! * Root: can change the nominator, state-toggler, or itself and can perform any of the actions //! the nominator or state-toggler can. //! +//! ### Dismantling +//! +//! As noted, a pool is destroyed once +//! +//! 1. First, all members need to fully unbond and withdraw. If the pool state is set to +//! `Destroying`, this can happen permissionlessly. +//! 2. The depositor itself fully unbonds and withdraws. Note that at this point, based on the +//! requirements of the staking system, the pool's bonded account's stake might not be able to ge +//! below a certain threshold as a nominator. At this point, the pool should `chill` itself to +//! allow the depositor to leave. +//! //! ## Design //! //! _Notes_: this section uses pseudo code to explain general design and does not necessarily @@ -108,8 +138,8 @@ //! members that where in the pool while it was backing a validator that got slashed. //! * Maximize scalability in terms of member count. //! -//! In order to maintain scalability, all operations are independent of the number of members. To -//! do this, delegation specific information is stored local to the member while the pool data +//! In order to maintain scalability, all operations are independent of the number of members. To do +//! this, delegation specific information is stored local to the member while the pool data //! structures have bounded datum. //! //! ### Bonded pool @@ -118,9 +148,9 @@ //! unbonding. The total points of a bonded pool are always equal to the sum of points of the //! delegation members. A bonded pool tracks its points and reads its bonded balance. //! -//! When a member joins a pool, `amount_transferred` is transferred from the members account -//! to the bonded pools account. Then the pool calls `staking::bond_extra(amount_transferred)` and -//! issues new points which are tracked by the member and added to the bonded pool's points. +//! When a member joins a pool, `amount_transferred` is transferred from the members account to the +//! bonded pools account. Then the pool calls `staking::bond_extra(amount_transferred)` and issues +//! new points which are tracked by the member and added to the bonded pool's points. //! //! When the pool already has some balance, we want the value of a point before the transfer to //! equal the value of a point after the transfer. So, when a member joins a bonded pool with a @@ -148,77 +178,13 @@ //! ### Reward pool //! //! When a pool is first bonded it sets up an deterministic, inaccessible account as its reward -//! destination. To track staking rewards we track how the balance of this reward account changes. -//! -//! The reward pool needs to store: -//! -//! * The pool balance at the time of the last payout: `reward_pool.balance` -//! * The total earnings ever at the time of the last payout: `reward_pool.total_earnings` -//! * The total points in the pool at the time of the last payout: `reward_pool.points` -//! -//! And the member needs to store: -//! -//! * The total payouts at the time of the last payout by that member: -//! `member.reward_pool_total_earnings` -//! -//! Before the first reward claim is initiated for a pool, all the above variables are set to zero. -//! -//! When a member initiates a claim, the following happens: -//! -//! 1) Compute the reward pool's total points and the member's virtual points in the reward pool -//! * First `current_total_earnings` is computed (`current_balance` is the free balance of the -//! reward pool at the beginning of these operations.) -//! ```text -//! current_total_earnings = -//! current_balance - reward_pool.balance + pool.total_earnings; -//! ``` -//! * Then the `current_points` is computed. Every balance unit that was added to the reward -//! pool since last time recorded means that the `pool.points` is increased by -//! `bonding_pool.total_points`. In other words, for every unit of balance that has been -//! earned by the reward pool, the reward pool points are inflated by `bonded_pool.points`. In -//! effect this allows each, single unit of balance (e.g. planck) to be divvied up pro-rata -//! among members based on points. -//! ```text -//! new_earnings = current_total_earnings - reward_pool.total_earnings; -//! current_points = reward_pool.points + bonding_pool.points * new_earnings; -//! ``` -//! * Finally, the`member_virtual_points` are computed: the product of the member's points in -//! the bonding pool and the total inflow of balance units since the last time the member -//! claimed rewards -//! ```text -//! new_earnings_since_last_claim = current_total_earnings - member.reward_pool_total_earnings; -//! member_virtual_points = member.points * new_earnings_since_last_claim; -//! ``` -//! 2) Compute the `member_payout`: -//! ```text -//! member_pool_point_ratio = member_virtual_points / current_points; -//! member_payout = current_balance * member_pool_point_ratio; -//! ``` -//! 3) Transfer `member_payout` to the member -//! 4) For the member set: -//! ```text -//! member.reward_pool_total_earnings = current_total_earnings; -//! ``` -//! 5) For the pool set: -//! ```text -//! reward_pool.points = current_points - member_virtual_points; -//! reward_pool.balance = current_balance - member_payout; -//! reward_pool.total_earnings = current_total_earnings; -//! ``` -//! -//! _Note_: One short coming of this design is that new joiners can claim rewards for the era after -//! they join even though their funds did not contribute to the pools vote weight. When a -//! member joins, it's `reward_pool_total_earnings` field is set equal to the `total_earnings` -//! of the reward pool at that point in time. At best the reward pool has the rewards up through the -//! previous era. If a member joins prior to the election snapshot it will benefit from the -//! rewards for the active era despite not contributing to the pool's vote weight. If it joins -//! after the election snapshot is taken it will benefit from the rewards of the next _2_ eras -//! because it's vote weight will not be counted until the election snapshot in active era + 1. -//! Related: -// _Note to maintainers_: In order to ensure the reward account never falls below the existential -// deposit, at creation the reward account must be endowed with the existential deposit. All logic -// for calculating rewards then does not see that existential deposit as part of the free balance. -// See `RewardPool::current_balance`. +//! destination. +//! +//! The reward pool is not really a pool anymore, as it does not track points anymore. Instead, it +//! tracks, a virtual value called `reward_counter`, among a few other values. +//! +//! See [this link](https://hackmd.io/PFGn6wI5TbCmBYoEA_f2Uw) for an in-depth explanation of the +//! reward pool mechanism. //! //! **Relevant extrinsics:** //! @@ -297,13 +263,8 @@ //! * PoolMembers cannot vote with their staked funds because they are transferred into the pools //! account. In the future this can be overcome by allowing the members to vote with their bonded //! funds via vote splitting. -//! * PoolMembers cannot quickly transfer to another pool if they do not like the nominations, -//! instead they must wait for the unbonding duration. -//! -//! # Runtime builder warnings -//! -//! * Watch out for overflow of [`RewardPoints`] and [`BalanceOf`] types. Consider things like the -//! chains total issuance, staking reward rate, and burn rate. +//! * PoolMembers cannot quickly transfer to another pool if they do no like nominations, instead +//! they must wait for the unbonding duration. #![cfg_attr(not(feature = "std"), no_std)] @@ -320,7 +281,10 @@ use frame_support::{ }; use scale_info::TypeInfo; use sp_core::U256; -use sp_runtime::traits::{AccountIdConversion, Bounded, CheckedSub, Convert, Saturating, Zero}; +use sp_runtime::{ + traits::{AccountIdConversion, Bounded, CheckedAdd, CheckedSub, Convert, Saturating, Zero}, + FixedPointNumber, FixedPointOperand, +}; use sp_staking::{EraIndex, OnStakerSlash, StakingInterface}; use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, ops::Div, vec::Vec}; @@ -352,8 +316,6 @@ pub use weights::WeightInfo; /// The balance type used by the currency system. pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -/// Type used to track the points of a reward pool. -pub type RewardPoints = U256; /// Type used for unique identifier of each pool. pub type PoolId = u32; @@ -407,26 +369,50 @@ pub struct PoolMember { /// The quantity of points this member has in the bonded pool or in a sub pool if /// `Self::unbonding_era` is some. pub points: BalanceOf, - /// The reward pools total earnings _ever_ the last time this member claimed a payout. - /// Assuming no massive burning events, we expect this value to always be below total issuance. - /// This value lines up with the [`RewardPool::total_earnings`] after a member claims a - /// payout. - pub reward_pool_total_earnings: BalanceOf, + /// The reward counter at the time of this member's last payout claim. + pub last_recorded_reward_counter: T::RewardCounter, /// The eras in which this member is unbonding, mapped from era index to the number of /// points scheduled to unbond in the given era. pub unbonding_eras: BoundedBTreeMap, T::MaxUnbonding>, } impl PoolMember { - fn total_points(&self) -> BalanceOf { - self.active_points().saturating_add(self.unbonding_points()) + /// The pending rewards of this member. + fn pending_rewards( + &self, + current_reward_counter: T::RewardCounter, + ) -> Result, Error> { + // accuracy note: Reward counters are `FixedU128` with base of 10^18. This value is being + // multiplied by a point. The worse case of a point is 10x the granularity of the balance + // (10x is the common configuration of `MaxPointsToBalance`). + // + // Assuming roughly the current issuance of polkadot (12,047,781,394,999,601,455, which is + // 1.2 * 10^9 * 10^10 = 1.2 * 10^19), the worse case point value is around 10^20. + // + // The final multiplication is: + // + // rc * 10^20 / 10^18 = rc * 100 + // + // meaning that as long as reward_counter's value is less than 1/100th of its max capacity + // (u128::MAX_VALUE), `checked_mul_int` won't saturate. + // + // given the nature of reward counter being 'pending_rewards / pool_total_point', the only + // (unrealistic) way that super high values can be achieved is for a pool to suddenly + // receive massive rewards with a very very small amount of stake. In all normal pools, as + // the points increase, so does the rewards. Moreover, as long as rewards are not + // accumulated for astronomically large durations, + // `current_reward_counter.defensive_saturating_sub(self.last_recorded_reward_counter)` + // won't be extremely big. + (current_reward_counter.defensive_saturating_sub(self.last_recorded_reward_counter)) + .checked_mul_int(self.active_points()) + .ok_or(Error::::OverflowRisk) } /// Active balance of the member. /// /// This is derived from the ratio of points in the pool to which the member belongs to. /// Might return different values based on the pool state for the same member and points. - pub(crate) fn active_balance(&self) -> BalanceOf { + fn active_balance(&self) -> BalanceOf { if let Some(pool) = BondedPool::::get(self.pool_id).defensive() { pool.points_to_balance(self.points) } else { @@ -434,13 +420,18 @@ impl PoolMember { } } + /// Total points of this member, both active and unbonding. + fn total_points(&self) -> BalanceOf { + self.active_points().saturating_add(self.unbonding_points()) + } + /// Active points of the member. - pub(crate) fn active_points(&self) -> BalanceOf { + fn active_points(&self) -> BalanceOf { self.points } /// Inactive points of the member, waiting to be withdrawn. - pub(crate) fn unbonding_points(&self) -> BalanceOf { + fn unbonding_points(&self) -> BalanceOf { self.unbonding_eras .as_ref() .iter() @@ -673,7 +664,7 @@ impl BondedPool { MaxPoolMembers::::get().map_or(true, |max| PoolMembers::::count() < max), Error::::MaxPoolMembers ); - self.member_counter = self.member_counter.defensive_saturating_add(1); + self.member_counter = self.member_counter.checked_add(1).ok_or(Error::::OverflowRisk)?; Ok(()) } @@ -750,19 +741,19 @@ impl BondedPool { // We checked for zero above .div(bonded_balance); - let min_points_to_balance = T::MinPointsToBalance::get(); + let max_points_to_balance = T::MaxPointsToBalance::get(); // Pool points can inflate relative to balance, but only if the pool is slashed. // If we cap the ratio of points:balance so one cannot join a pool that has been slashed - // by `min_points_to_balance`%, if not zero. + // by `max_points_to_balance`%, if not zero. ensure!( - points_to_balance_ratio_floor < min_points_to_balance.into(), + points_to_balance_ratio_floor < max_points_to_balance.into(), Error::::OverflowRisk ); - // while restricting the balance to `min_points_to_balance` of max total issuance, + // while restricting the balance to `max_points_to_balance` of max total issuance, let next_bonded_balance = bonded_balance.saturating_add(new_funds); ensure!( - next_bonded_balance < BalanceOf::::max_value().div(min_points_to_balance.into()), + next_bonded_balance < BalanceOf::::max_value().div(max_points_to_balance.into()), Error::::OverflowRisk ); @@ -904,16 +895,6 @@ impl BondedPool { Ok(points_issued) } - /// If `n` saturates at it's upper bound, mark the pool as destroying. This is useful when a - /// number saturating indicates the pool can no longer correctly keep track of state. - fn bound_check(&mut self, n: U256) -> U256 { - if n == U256::max_value() { - self.set_state(PoolState::Destroying) - } - - n - } - // Set the state of `self`, and deposit an event if the state changed. State should never be set // directly in in order to ensure a state change event is always correctly deposited. fn set_state(&mut self, state: PoolState) { @@ -928,45 +909,104 @@ impl BondedPool { } /// A reward pool. +/// +/// A reward pool is not so much a pool anymore, since it does not contain any shares or points. +/// Rather, simply to fit nicely next to bonded pool and unbonding pools in terms of terminology. In +/// reality, a reward pool is just a container for a few pool-dependent data related to the rewards. #[derive(Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebugNoBound)] -#[cfg_attr(feature = "std", derive(Clone, PartialEq))] +#[cfg_attr(feature = "std", derive(Clone, PartialEq, DefaultNoBound))] #[codec(mel_bound(T: Config))] #[scale_info(skip_type_params(T))] pub struct RewardPool { - /// The balance of this reward pool after the last claimed payout. - pub balance: BalanceOf, - /// The total earnings _ever_ of this reward pool after the last claimed payout. I.E. the sum - /// of all incoming balance through the pools life. + /// The last recorded value of the reward counter. /// - /// NOTE: We assume this will always be less than total issuance and thus can use the runtimes - /// `Balance` type. However in a chain with a burn rate higher than the rate this increases, - /// this type should be bigger than `Balance`. - pub total_earnings: BalanceOf, - /// The total points of this reward pool after the last claimed payout. - pub points: RewardPoints, + /// This is updated ONLY when the points in the bonded pool change, which means `join`, + /// `bond_extra` and `unbond`, all of which is done through `update_recorded`. + last_recorded_reward_counter: T::RewardCounter, + /// The last recorded total payouts of the reward pool. + /// + /// Payouts is essentially income of the pool. + /// + /// Update criteria is same as that of `last_recorded_reward_counter`. + last_recorded_total_payouts: BalanceOf, + /// Total amount that this pool has paid out so far to the members. + total_rewards_claimed: BalanceOf, } impl RewardPool { - /// Mutate the reward pool by updating the total earnings and current free balance. - fn update_total_earnings_and_balance(&mut self, id: PoolId) { - let current_balance = Self::current_balance(id); - // The earnings since the last time it was updated - let new_earnings = current_balance.saturating_sub(self.balance); - // The lifetime earnings of the of the reward pool - self.total_earnings = new_earnings.saturating_add(self.total_earnings); - self.balance = current_balance; - } - - /// Get a reward pool and update its total earnings and balance - fn get_and_update(id: PoolId) -> Option { - RewardPools::::get(id).map(|mut r| { - r.update_total_earnings_and_balance(id); - r - }) - } - - /// The current balance of the reward pool. Never access the reward pools free balance directly. - /// The existential deposit was not received as a reward, so the reward pool can not use it. + /// Getter for [`RewardPool::last_recorded_reward_counter`]. + fn last_recorded_reward_counter(&self) -> T::RewardCounter { + self.last_recorded_reward_counter + } + + /// Register some rewards that are claimed from the pool by the members. + fn register_claimed_reward(&mut self, reward: BalanceOf) { + self.total_rewards_claimed = self.total_rewards_claimed.saturating_add(reward); + } + + /// Update the recorded values of the pool. + fn update_records(&mut self, id: PoolId, bonded_points: BalanceOf) -> Result<(), Error> { + let balance = Self::current_balance(id); + self.last_recorded_reward_counter = self.current_reward_counter(id, bonded_points)?; + self.last_recorded_total_payouts = balance + .checked_add(&self.total_rewards_claimed) + .ok_or(Error::::OverflowRisk)?; + Ok(()) + } + + /// Get the current reward counter, based on the given `bonded_points` being the state of the + /// bonded pool at this time. + fn current_reward_counter( + &self, + id: PoolId, + bonded_points: BalanceOf, + ) -> Result> { + let balance = Self::current_balance(id); + let payouts_since_last_record = balance + .saturating_add(self.total_rewards_claimed) + .saturating_sub(self.last_recorded_total_payouts); + + // * accuracy notes regarding the multiplication in `checked_from_rational`: + // `payouts_since_last_record` is a subset of the total_issuance at the very + // worse. `bonded_points` are similarly, in a non-slashed pool, have the same granularity as + // balance, and are thus below within the range of total_issuance. In the worse case + // scenario, for `saturating_from_rational`, we have: + // + // dot_total_issuance * 10^18 / `minJoinBond` + // + // assuming `MinJoinBond == ED` + // + // dot_total_issuance * 10^18 / 10^10 = dot_total_issuance * 10^8 + // + // which, with the current numbers, is a miniscule fraction of the u128 capacity. + // + // Thus, adding two values of type reward counter should be safe for ages in a chain like + // Polkadot. The important note here is that `reward_pool.last_recorded_reward_counter` only + // ever accumulates, but its semantics imply that it is less than total_issuance, when + // represented as `FixedU128`, which means it is less than `total_issuance * 10^18`. + // + // * accuracy notes regarding `checked_from_rational` collapsing to zero, meaning that no + // reward can be claimed: + // + // largest `bonded_points`, such that the reward counter is non-zero, with `FixedU128` + // will be when the payout is being computed. This essentially means `payout/bonded_points` + // needs to be more than 1/1^18. Thus, assuming that `bonded_points` will always be less + // than `10 * dot_total_issuance`, if the reward_counter is the smallest possible value, + // the value of the reward being calculated is: + // + // x / 10^20 = 1/ 10^18 + // + // x = 100 + // + // which is basically 10^-8 DOTs. See `smallest_claimable_reward` for an example of this. + T::RewardCounter::checked_from_rational(payouts_since_last_record, bonded_points) + .and_then(|ref r| self.last_recorded_reward_counter.checked_add(r)) + .ok_or(Error::::OverflowRisk) + } + + /// Current free balance of the reward pool. + /// + /// This is sum of all the rewards that are claimable by pool members. fn current_balance(id: PoolId) -> BalanceOf { T::Currency::free_balance(&Pallet::::create_reward_account(id)) .saturating_sub(T::Currency::minimum_balance()) @@ -1098,9 +1138,10 @@ pub mod pallet { use super::*; use frame_support::traits::StorageVersion; use frame_system::{ensure_signed, pallet_prelude::*}; + use sp_runtime::traits::CheckedAdd; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] #[pallet::generate_store(pub(crate) trait Store)] @@ -1116,19 +1157,53 @@ pub mod pallet { type WeightInfo: weights::WeightInfo; /// The nominating balance. - type Currency: Currency; + type Currency: Currency; + + /// Sadly needed to bound it to `FixedPointOperand`. + // The only alternative is to sprinkle a `where BalanceOf: FixedPointOperand` in roughly + // a million places, so we prefer doing this. + type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned + + codec::FullCodec + + MaybeSerializeDeserialize + + sp_std::fmt::Debug + + Default + + FixedPointOperand + + CheckedAdd + + TypeInfo + + MaxEncodedLen; + + /// The type that is used for reward counter. + /// + /// The arithmetic of the reward counter might saturate based on the size of the + /// `Currency::Balance`. If this happens, operations fails. Nonetheless, this type should be + /// chosen such that this failure almost never happens, as if it happens, the pool basically + /// needs to be dismantled (or all pools migrated to a larger `RewardCounter` type, which is + /// a PITA to do). + /// + /// See the inline code docs of `Member::pending_rewards` and `RewardPool::update_recorded` + /// for example analysis. A [`sp_runtime::FixedU128`] should be fine for chains with balance + /// types similar to that of Polkadot and Kusama, in the absence of severe slashing (or + /// prevented via a reasonable `MaxPointsToBalance`), for many many years to come. + type RewardCounter: FixedPointNumber + MaxEncodedLen + TypeInfo + Default + codec::FullCodec; /// The nomination pool's pallet id. #[pallet::constant] type PalletId: Get; - /// The minimum pool points-to-balance ratio that must be maintained for it to be `open`. + /// The maximum pool points-to-balance ratio that an `open` pool can have. + /// /// This is important in the event slashing takes place and the pool's points-to-balance /// ratio becomes disproportional. + /// + /// Moreover, this relates to the `RewardCounter` type as well, as the arithmetic operations + /// are a function of number of points, and by setting this value to e.g. 10, you ensure + /// that the total number of points in the system are at most 10 times the total_issuance of + /// the chain, in the absolute worse case. + /// /// For a value of 10, the threshold would be a pool points-to-balance ratio of 10:1. /// Such a scenario would also be the equivalent of the pool being 90% slashed. #[pallet::constant] - type MinPointsToBalance: Get; + type MaxPointsToBalance: Get; /// Infallible method for converting `Currency::Balance` to `U256`. type BalanceToU256: Convert, U256>; @@ -1427,10 +1502,10 @@ pub mod pallet { let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; bonded_pool.ok_to_join(amount)?; - // We just need its total earnings at this point in time, but we don't need to write it - // because we are not adjusting its points (all other values can calculated virtual). - let reward_pool = RewardPool::::get_and_update(pool_id) + let mut reward_pool = RewardPools::::get(pool_id) .defensive_ok_or::>(DefensiveError::RewardPoolNotFound.into())?; + // IMPORTANT: reward pool records must be updated with the old points. + reward_pool.update_records(pool_id, bonded_pool.points)?; bonded_pool.try_inc_members()?; let points_issued = bonded_pool.try_bond_funds(&who, amount, BondType::Later)?; @@ -1440,13 +1515,9 @@ pub mod pallet { PoolMember:: { pool_id, points: points_issued, - // At best the reward pool has the rewards up through the previous era. If the - // member joins prior to the snapshot they will benefit from the rewards of - // the active era despite not contributing to the pool's vote weight. If they - // join after the snapshot is taken they will benefit from the rewards of the - // next 2 eras because their vote weight will not be counted until the - // snapshot in active era + 1. - reward_pool_total_earnings: reward_pool.total_earnings, + // we just updated `last_known_reward_counter` to the current one in + // `update_recorded`. + last_recorded_reward_counter: reward_pool.last_recorded_reward_counter(), unbonding_eras: Default::default(), }, ); @@ -1457,7 +1528,9 @@ pub mod pallet { bonded: amount, joined: true, }); + bonded_pool.put(); + RewardPools::::insert(pool_id, reward_pool); Ok(()) } @@ -1466,6 +1539,8 @@ pub mod pallet { /// /// Additional funds can come from either the free balance of the account, of from the /// accumulated rewards, see [`BondExtra`]. + /// + /// Bonding extra funds implies an automatic payout of all pending rewards as well. // NOTE: this transaction is implemented with the sole purpose of readability and // correctness, not optimization. We read/write several storage items multiple times instead // of just once, in the spirit reusing code. @@ -1478,19 +1553,23 @@ pub mod pallet { let who = ensure_signed(origin)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&who)?; + // payout related stuff: we must claim the payouts, and updated recorded payout data + // before updating the bonded pool points, similar to that of `join` transaction. + reward_pool.update_records(bonded_pool.id, bonded_pool.points)?; + // TODO: optimize this to not touch the free balance of `who ` at all in benchmarks. + // Currently, bonding rewards is like a batch. In the same PR, also make this function + // take a boolean argument that make it either 100% pure (no storage update), or make it + // also emit event and do the transfer. #11671 + let claimed = + Self::do_reward_payout(&who, &mut member, &mut bonded_pool, &mut reward_pool)?; + let (points_issued, bonded) = match extra { BondExtra::FreeBalance(amount) => (bonded_pool.try_bond_funds(&who, amount, BondType::Later)?, amount), - BondExtra::Rewards => { - let claimed = Self::do_reward_payout( - &who, - &mut member, - &mut bonded_pool, - &mut reward_pool, - )?; - (bonded_pool.try_bond_funds(&who, claimed, BondType::Later)?, claimed) - }, + BondExtra::Rewards => + (bonded_pool.try_bond_funds(&who, claimed, BondType::Later)?, claimed), }; + bonded_pool.ok_to_be_open(bonded)?; member.points = member.points.saturating_add(points_issued); @@ -1558,21 +1637,17 @@ pub mod pallet { member_account: T::AccountId, #[pallet::compact] unbonding_points: BalanceOf, ) -> DispatchResult { - let caller = ensure_signed(origin)?; + let who = ensure_signed(origin)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&member_account)?; - bonded_pool.ok_to_unbond_with(&caller, &member_account, &member, unbonding_points)?; + bonded_pool.ok_to_unbond_with(&who, &member_account, &member, unbonding_points)?; // Claim the the payout prior to unbonding. Once the user is unbonding their points no // longer exist in the bonded pool and thus they can no longer claim their payouts. It // is not strictly necessary to claim the rewards, but we do it here for UX. - Self::do_reward_payout( - &member_account, - &mut member, - &mut bonded_pool, - &mut reward_pool, - )?; + let _ = reward_pool.update_records(bonded_pool.id, bonded_pool.points)?; + let _ = Self::do_reward_payout(&who, &mut member, &mut bonded_pool, &mut reward_pool)?; let current_era = T::StakingInterface::current_era(); let unbond_era = T::StakingInterface::bonding_duration().saturating_add(current_era); @@ -1846,23 +1921,25 @@ pub mod pallet { PoolMember:: { pool_id, points, - reward_pool_total_earnings: Zero::zero(), + last_recorded_reward_counter: Zero::zero(), unbonding_eras: Default::default(), }, ); RewardPools::::insert( pool_id, RewardPool:: { - balance: Zero::zero(), - points: U256::zero(), - total_earnings: Zero::zero(), + last_recorded_reward_counter: Zero::zero(), + last_recorded_total_payouts: Zero::zero(), + total_rewards_claimed: Zero::zero(), }, ); ReversePoolIdLookup::::insert(bonded_pool.bonded_account(), pool_id); + Self::deposit_event(Event::::Created { depositor: who.clone(), pool_id: pool_id.clone(), }); + Self::deposit_event(Event::::Bonded { member: who, pool_id, @@ -1896,8 +1973,14 @@ pub mod pallet { /// Set a new state for the pool. /// - /// The dispatch origin of this call must be signed by the state toggler, or the root role - /// of the pool. + /// If a pool is already in the `Destroying` state, then under no condition can its state + /// change again. + /// + /// The dispatch origin of this call must be either: + /// + /// 1. signed by the state toggler, or the root role of the pool, + /// 2. if the pool conditions to be open are NOT met (as described by `ok_to_be_open`), and + /// then the state of the pool can be permissionlessly changed to `Destroying`. #[pallet::weight(T::WeightInfo::set_state())] #[transactional] pub fn set_state( @@ -2065,14 +2148,9 @@ pub mod pallet { impl Hooks> for Pallet { fn integrity_test() { assert!( - T::MinPointsToBalance::get() > 0, + T::MaxPointsToBalance::get() > 0, "Minimum points to balance ratio must be greater than 0" ); - assert!( - sp_std::mem::size_of::() >= - 2 * sp_std::mem::size_of::>(), - "bit-length of the reward points must be at least twice as much as balance" - ); assert!( T::StakingInterface::bonding_duration() < TotalUnbondingPools::::get(), "There must be more unbonding pools then the bonding duration / @@ -2119,7 +2197,7 @@ impl Pallet { ExistenceRequirement::AllowDeath, ); - // TODO: this is purely defensive. + // NOTE: this is purely defensive. T::Currency::make_free_balance_be(&reward_account, Zero::zero()); T::Currency::make_free_balance_be(&bonded_pool.bonded_account(), Zero::zero()); @@ -2212,75 +2290,6 @@ impl Pallet { .div(current_points) } - /// Calculate the rewards for `member`. - /// - /// Returns the payout amount. - fn calculate_member_payout( - member: &mut PoolMember, - bonded_pool: &mut BondedPool, - reward_pool: &mut RewardPool, - ) -> Result, DispatchError> { - let u256 = |x| T::BalanceToU256::convert(x); - let balance = |x| T::U256ToBalance::convert(x); - - let last_total_earnings = reward_pool.total_earnings; - reward_pool.update_total_earnings_and_balance(bonded_pool.id); - - // Notice there is an edge case where total_earnings have not increased and this is zero - let new_earnings = u256(reward_pool.total_earnings.saturating_sub(last_total_earnings)); - - // The new points that will be added to the pool. For every unit of balance that has been - // earned by the reward pool, we inflate the reward pool points by `bonded_pool.points`. In - // effect this allows each, single unit of balance (e.g. plank) to be divvied up pro rata - // among members based on points. - let new_points = u256(bonded_pool.points).saturating_mul(new_earnings); - - // The points of the reward pool after taking into account the new earnings. Notice that - // this only stays even or increases over time except for when we subtract member virtual - // shares. - let current_points = bonded_pool.bound_check(reward_pool.points.saturating_add(new_points)); - - // The rewards pool's earnings since the last time this member claimed a payout. - let new_earnings_since_last_claim = - reward_pool.total_earnings.saturating_sub(member.reward_pool_total_earnings); - - // The points of the reward pool that belong to the member. - let member_virtual_points = - // The members portion of the reward pool - u256(member.active_points()) - // times the amount the pool has earned since the member last claimed. - .saturating_mul(u256(new_earnings_since_last_claim)); - - let member_payout = if member_virtual_points.is_zero() || - current_points.is_zero() || - reward_pool.balance.is_zero() - { - Zero::zero() - } else { - // Equivalent to `(member_virtual_points / current_points) * reward_pool.balance` - let numerator = { - let numerator = member_virtual_points.saturating_mul(u256(reward_pool.balance)); - bonded_pool.bound_check(numerator) - }; - balance( - numerator - // We check for zero above - .div(current_points), - ) - }; - - // Record updates - if reward_pool.total_earnings == BalanceOf::::max_value() { - bonded_pool.set_state(PoolState::Destroying); - }; - - member.reward_pool_total_earnings = reward_pool.total_earnings; - reward_pool.points = current_points.saturating_sub(member_virtual_points); - reward_pool.balance = reward_pool.balance.saturating_sub(member_payout); - - Ok(member_payout) - } - /// If the member has some rewards, transfer a payout from the reward pool to the member. // Emits events and potentially modifies pool state if any arithmetic saturates, but does // not persist any of the mutable inputs to storage. @@ -2291,38 +2300,37 @@ impl Pallet { reward_pool: &mut RewardPool, ) -> Result, DispatchError> { debug_assert_eq!(member.pool_id, bonded_pool.id); + // a member who has no skin in the game anymore cannot claim any rewards. ensure!(!member.active_points().is_zero(), Error::::FullyUnbonding); - let was_destroying = bonded_pool.is_destroying(); - let member_payout = Self::calculate_member_payout(member, bonded_pool, reward_pool)?; + let current_reward_counter = + reward_pool.current_reward_counter(bonded_pool.id, bonded_pool.points)?; + let pending_rewards = member.pending_rewards(current_reward_counter)?; - if member_payout.is_zero() { - return Ok(member_payout) + if pending_rewards.is_zero() { + return Ok(pending_rewards) } + // IFF the reward is non-zero alter the member and reward pool info. + member.last_recorded_reward_counter = current_reward_counter; + reward_pool.register_claimed_reward(pending_rewards); + // Transfer payout to the member. T::Currency::transfer( &bonded_pool.reward_account(), &member_account, - member_payout, + pending_rewards, ExistenceRequirement::AllowDeath, )?; Self::deposit_event(Event::::PaidOut { member: member_account.clone(), pool_id: member.pool_id, - payout: member_payout, + payout: pending_rewards, }); - if bonded_pool.is_destroying() && !was_destroying { - Self::deposit_event(Event::::StateChanged { - pool_id: member.pool_id, - new_state: PoolState::Destroying, - }); - } - - Ok(member_payout) + Ok(pending_rewards) } /// Ensure the correctness of the state of this pallet. diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index e23a35fe85602..f2abfe29dfbf7 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -16,11 +16,12 @@ // limitations under the License. use super::*; +use crate::log; +use frame_support::traits::OnRuntimeUpgrade; +use sp_std::collections::btree_map::BTreeMap; pub mod v1 { use super::*; - use crate::log; - use frame_support::traits::OnRuntimeUpgrade; #[derive(Decode)] pub struct OldPoolRoles { @@ -103,3 +104,282 @@ pub mod v1 { } } } + +pub mod v2 { + use super::*; + use sp_runtime::Perbill; + + #[test] + fn migration_assumption_is_correct() { + // this migrations cleans all the reward accounts to contain exactly ed, and all members + // having no claimable rewards. In this state, all fields of the `RewardPool` and + // `member.last_recorded_reward_counter` are all zero. + use crate::mock::*; + ExtBuilder::default().build_and_execute(|| { + let join = |x| { + Balances::make_free_balance_be(&x, Balances::minimum_balance() + 10); + frame_support::assert_ok!(Pools::join(Origin::signed(x), 10, 1)); + }; + + assert_eq!(BondedPool::::get(1).unwrap().points, 10); + assert_eq!( + RewardPools::::get(1).unwrap(), + RewardPool { ..Default::default() } + ); + assert_eq!( + PoolMembers::::get(10).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + + join(20); + assert_eq!(BondedPool::::get(1).unwrap().points, 20); + assert_eq!( + RewardPools::::get(1).unwrap(), + RewardPool { ..Default::default() } + ); + assert_eq!( + PoolMembers::::get(10).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + assert_eq!( + PoolMembers::::get(20).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + + join(30); + assert_eq!(BondedPool::::get(1).unwrap().points, 30); + assert_eq!( + RewardPools::::get(1).unwrap(), + RewardPool { ..Default::default() } + ); + assert_eq!( + PoolMembers::::get(10).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + assert_eq!( + PoolMembers::::get(20).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + assert_eq!( + PoolMembers::::get(30).unwrap().last_recorded_reward_counter, + Zero::zero() + ); + }); + } + + #[derive(Decode)] + pub struct OldRewardPool { + pub balance: B, + pub total_earnings: B, + pub points: U256, + } + + #[derive(Decode)] + pub struct OldPoolMember { + pub pool_id: PoolId, + pub points: BalanceOf, + pub reward_pool_total_earnings: BalanceOf, + pub unbonding_eras: BoundedBTreeMap, T::MaxUnbonding>, + } + + /// Migrate the pool reward scheme to the new version, as per + /// . + pub struct MigrateToV2(sp_std::marker::PhantomData); + impl MigrateToV2 { + fn run(current: StorageVersion) -> Weight { + let mut reward_pools_translated = 0u64; + let mut members_translated = 0u64; + // just for logging. + let mut total_value_locked = BalanceOf::::zero(); + let mut total_points_locked = BalanceOf::::zero(); + + // store each member of the pool, with their active points. In the process, migrate + // their data as well. + let mut temp_members = BTreeMap::)>>::new(); + PoolMembers::::translate::, _>(|key, old_member| { + let id = old_member.pool_id; + temp_members.entry(id).or_default().push((key, old_member.points)); + + total_points_locked += old_member.points; + members_translated += 1; + Some(PoolMember:: { + last_recorded_reward_counter: Zero::zero(), + pool_id: old_member.pool_id, + points: old_member.points, + unbonding_eras: old_member.unbonding_eras, + }) + }); + + // translate all reward pools. In the process, do the last payout as well. + RewardPools::::translate::>, _>( + |id, _old_reward_pool| { + // each pool should have at least one member. + let members = match temp_members.get(&id) { + Some(x) => x, + None => { + log!(error, "pool {} has no member! deleting it..", id); + return None + }, + }; + let bonded_pool = match BondedPools::::get(id) { + Some(x) => x, + None => { + log!(error, "pool {} has no bonded pool! deleting it..", id); + return None + }, + }; + + let accumulated_reward = RewardPool::::current_balance(id); + let reward_account = Pallet::::create_reward_account(id); + let mut sum_paid_out = BalanceOf::::zero(); + + members + .into_iter() + .filter_map(|(who, points)| { + let bonded_pool = match BondedPool::::get(id) { + Some(x) => x, + None => { + log!(error, "pool {} for member {:?} does not exist!", id, who); + return None + }, + }; + + total_value_locked += bonded_pool.points_to_balance(points.clone()); + let portion = Perbill::from_rational(*points, bonded_pool.points); + let last_claim = portion * accumulated_reward; + + log!( + debug, + "{:?} has {:?} ({:?}) of pool {} with total reward of {:?}", + who, + portion, + last_claim, + id, + accumulated_reward + ); + + if last_claim.is_zero() { + None + } else { + Some((who, last_claim)) + } + }) + .for_each(|(who, last_claim)| { + let outcome = T::Currency::transfer( + &reward_account, + &who, + last_claim, + ExistenceRequirement::KeepAlive, + ); + + if let Err(reason) = outcome { + log!(warn, "last reward claim failed due to {:?}", reason,); + } else { + sum_paid_out = sum_paid_out.saturating_add(last_claim); + } + + Pallet::::deposit_event(Event::::PaidOut { + member: who.clone(), + pool_id: id, + payout: last_claim, + }); + }); + + // this can only be because of rounding down, or because the person we + // wanted to pay their reward to could not accept it (dust). + let leftover = accumulated_reward.saturating_sub(sum_paid_out); + if !leftover.is_zero() { + // pay it all to depositor. + let o = T::Currency::transfer( + &reward_account, + &bonded_pool.roles.depositor, + leftover, + ExistenceRequirement::KeepAlive, + ); + log!(warn, "paying {:?} leftover to the depositor: {:?}", leftover, o); + } + + // finally, migrate the reward pool. + reward_pools_translated += 1; + + Some(RewardPool { + last_recorded_reward_counter: Zero::zero(), + last_recorded_total_payouts: Zero::zero(), + total_rewards_claimed: Zero::zero(), + }) + }, + ); + + log!( + info, + "Upgraded {} members, {} reward pools, TVL {:?} TPL {:?}, storage to version {:?}", + members_translated, + reward_pools_translated, + total_value_locked, + total_points_locked, + current + ); + current.put::>(); + T::DbWeight::get().reads_writes(members_translated + 1, reward_pools_translated + 1) + } + } + + impl OnRuntimeUpgrade for MigrateToV2 { + fn on_runtime_upgrade() -> Weight { + let current = Pallet::::current_storage_version(); + let onchain = Pallet::::on_chain_storage_version(); + + log!( + info, + "Running migration with current storage version {:?} / onchain {:?}", + current, + onchain + ); + + if current == 2 && onchain == 1 { + Self::run(current) + } else { + log!(info, "MigrateToV2 did not executed. This probably should be removed"); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + // all reward accounts must have more than ED. + RewardPools::::iter().for_each(|(id, _)| { + assert!( + T::Currency::free_balance(&Pallet::::create_reward_account(id)) >= + T::Currency::minimum_balance() + ) + }); + + Ok(()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + // new version must be set. + assert_eq!(Pallet::::on_chain_storage_version(), 2); + + // no reward or bonded pool has been skipped. + assert_eq!(RewardPools::::iter().count() as u32, RewardPools::::count()); + assert_eq!(BondedPools::::iter().count() as u32, BondedPools::::count()); + + // all reward pools must have exactly ED in them. This means no reward can be claimed, + // and that setting reward counters all over the board to zero will work henceforth. + RewardPools::::iter().for_each(|(id, _)| { + assert_eq!( + RewardPool::::current_balance(id), + Zero::zero(), + "reward pool({}) balance is {:?}", + id, + RewardPool::::current_balance(id) + ); + }); + + log!(info, "post upgrade hook for MigrateToV2 executed."); + Ok(()) + } + } +} diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index 7d71efc3be04d..042a0b666efb1 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -2,9 +2,14 @@ use super::*; use crate::{self as pools}; use frame_support::{assert_ok, parameter_types, PalletId}; use frame_system::RawOrigin; +use sp_runtime::FixedU128; pub type AccountId = u128; pub type Balance = u128; +pub type RewardCounter = FixedU128; +// This sneaky little hack allows us to write code exactly as we would do in the pallet in the tests +// as well, e.g. `StorageItem::::get()`. +pub type T = Runtime; // Ext builder creates a pool with id 1. pub fn default_bonded_account() -> AccountId { @@ -23,6 +28,7 @@ parameter_types! { pub storage UnbondingBalanceMap: BTreeMap = Default::default(); #[derive(Clone, PartialEq)] pub static MaxUnbonding: u32 = 8; + pub static StakingMinBond: Balance = 10; pub storage Nominations: Option> = None; } @@ -40,7 +46,7 @@ impl sp_staking::StakingInterface for StakingMock { type AccountId = AccountId; fn minimum_bond() -> Self::Balance { - 10 + StakingMinBond::get() } fn current_era() -> EraIndex { @@ -184,6 +190,8 @@ impl pools::Config for Runtime { type Event = Event; type WeightInfo = (); type Currency = Balances; + type CurrencyBalance = Balance; + type RewardCounter = RewardCounter; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; type StakingInterface = StakingMock; @@ -191,7 +199,7 @@ impl pools::Config for Runtime { type PalletId = PoolsPalletId; type MaxMetadataLen = MaxMetadataLen; type MaxUnbonding = MaxUnbonding; - type MinPointsToBalance = frame_support::traits::ConstU32<10>; + type MaxPointsToBalance = frame_support::traits::ConstU8<10>; } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -208,9 +216,16 @@ frame_support::construct_runtime!( } ); -#[derive(Default)] pub struct ExtBuilder { members: Vec<(AccountId, Balance)>, + max_members: Option, + max_members_per_pool: Option, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { members: Default::default(), max_members: Some(4), max_members_per_pool: Some(3) } + } } impl ExtBuilder { @@ -225,11 +240,26 @@ impl ExtBuilder { self } + pub(crate) fn min_bond(self, min: Balance) -> Self { + StakingMinBond::set(min); + self + } + pub(crate) fn with_check(self, level: u8) -> Self { CheckLevel::set(level); self } + pub(crate) fn max_members(mut self, max: Option) -> Self { + self.max_members = max; + self + } + + pub(crate) fn max_members_per_pool(mut self, max: Option) -> Self { + self.max_members_per_pool = max; + self + } + pub(crate) fn build(self) -> sp_io::TestExternalities { let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -238,8 +268,8 @@ impl ExtBuilder { min_join_bond: 2, min_create_bond: 2, max_pools: Some(2), - max_members_per_pool: Some(3), - max_members: Some(4), + max_members_per_pool: self.max_members_per_pool, + max_members: self.max_members, } .assimilate_storage(&mut storage); @@ -281,8 +311,8 @@ pub(crate) fn unsafe_set_state(pool_id: PoolId, state: PoolState) -> Result<(), } parameter_types! { - static PoolsEvents: usize = 0; - static BalancesEvents: usize = 0; + storage PoolsEvents: u32 = 0; + storage BalancesEvents: u32 = 0; } /// All events of this pallet. @@ -293,8 +323,8 @@ pub(crate) fn pool_events_since_last_call() -> Vec> { .filter_map(|e| if let Event::Pools(inner) = e { Some(inner) } else { None }) .collect::>(); let already_seen = PoolsEvents::get(); - PoolsEvents::set(events.len()); - events.into_iter().skip(already_seen).collect() + PoolsEvents::set(&(events.len() as u32)); + events.into_iter().skip(already_seen as usize).collect() } /// All events of the `Balances` pallet. @@ -305,8 +335,8 @@ pub(crate) fn balances_events_since_last_call() -> Vec>(); let already_seen = BalancesEvents::get(); - BalancesEvents::set(events.len()); - events.into_iter().skip(already_seen).collect() + BalancesEvents::set(&(events.len() as u32)); + events.into_iter().skip(already_seen as usize).collect() } /// Same as `fully_unbond`, in permissioned setting. diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 97104423c5910..9989893d86462 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -17,10 +17,7 @@ use super::*; use crate::{mock::*, Event}; -use frame_support::{ - assert_err, assert_noop, assert_ok, assert_storage_noop, bounded_btree_map, - storage::{with_transaction, TransactionOutcome}, -}; +use frame_support::{assert_err, assert_noop, assert_ok, assert_storage_noop, bounded_btree_map}; use pallet_balances::Event as BEvent; use sp_runtime::traits::Dispatchable; @@ -66,7 +63,11 @@ fn test_setup_works() { ); assert_eq!( RewardPools::::get(last_pool).unwrap(), - RewardPool:: { balance: 0, points: 0u32.into(), total_earnings: 0 } + RewardPool:: { + last_recorded_reward_counter: Zero::zero(), + last_recorded_total_payouts: 0, + total_rewards_claimed: 0 + } ); assert_eq!( PoolMembers::::get(10).unwrap(), @@ -206,34 +207,34 @@ mod bonded_pool { }, }; - let min_points_to_balance: u128 = - <::MinPointsToBalance as Get>::get().into(); + let max_points_to_balance: u128 = + <::MaxPointsToBalance as Get>::get().into(); // Simulate a 100% slashed pool StakingMock::set_bonded_balance(pool.bonded_account(), 0); assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); - // Simulate a slashed pool at `MinPointsToBalance` + 1 slashed pool + // Simulate a slashed pool at `MaxPointsToBalance` + 1 slashed pool StakingMock::set_bonded_balance( pool.bonded_account(), - min_points_to_balance.saturating_add(1).into(), + max_points_to_balance.saturating_add(1).into(), ); assert_ok!(pool.ok_to_join(0)); - // Simulate a slashed pool at `MinPointsToBalance` - StakingMock::set_bonded_balance(pool.bonded_account(), min_points_to_balance); + // Simulate a slashed pool at `MaxPointsToBalance` + StakingMock::set_bonded_balance(pool.bonded_account(), max_points_to_balance); assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); StakingMock::set_bonded_balance( pool.bonded_account(), - Balance::MAX / min_points_to_balance, + Balance::MAX / max_points_to_balance, ); // New bonded balance would be over threshold of Balance type assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); // and a sanity check StakingMock::set_bonded_balance( pool.bonded_account(), - Balance::MAX / min_points_to_balance - 1, + Balance::MAX / max_points_to_balance - 1, ); assert_ok!(pool.ok_to_join(0)); }); @@ -515,44 +516,37 @@ mod join { .put(); // and reward pool - RewardPools::::insert( - 123, - RewardPool:: { - balance: Zero::zero(), - total_earnings: Zero::zero(), - points: U256::from(0u32), - }, - ); + RewardPools::::insert(123, RewardPool:: { ..Default::default() }); - // Force the points:balance ratio to `MinPointsToBalance` (100/10) - let min_points_to_balance: u128 = - <::MinPointsToBalance as Get>::get().into(); + // Force the points:balance ratio to `MaxPointsToBalance` (100/10) + let max_points_to_balance: u128 = + <::MaxPointsToBalance as Get>::get().into(); StakingMock::set_bonded_balance( Pools::create_bonded_account(123), - min_points_to_balance, + max_points_to_balance, ); assert_noop!(Pools::join(Origin::signed(11), 420, 123), Error::::OverflowRisk); StakingMock::set_bonded_balance( Pools::create_bonded_account(123), - Balance::MAX / min_points_to_balance, + Balance::MAX / max_points_to_balance, ); - // Balance needs to be gt Balance::MAX / `MinPointsToBalance` + // Balance needs to be gt Balance::MAX / `MaxPointsToBalance` assert_noop!(Pools::join(Origin::signed(11), 5, 123), Error::::OverflowRisk); - StakingMock::set_bonded_balance(Pools::create_bonded_account(1), min_points_to_balance); + StakingMock::set_bonded_balance(Pools::create_bonded_account(1), max_points_to_balance); // Cannot join a pool that isn't open unsafe_set_state(123, PoolState::Blocked).unwrap(); assert_noop!( - Pools::join(Origin::signed(11), min_points_to_balance, 123), + Pools::join(Origin::signed(11), max_points_to_balance, 123), Error::::NotOpen ); unsafe_set_state(123, PoolState::Destroying).unwrap(); assert_noop!( - Pools::join(Origin::signed(11), min_points_to_balance, 123), + Pools::join(Origin::signed(11), max_points_to_balance, 123), Error::::NotOpen ); @@ -649,17 +643,34 @@ mod join { mod claim_payout { use super::*; - fn del(points: Balance, reward_pool_total_earnings: Balance) -> PoolMember { + fn del(points: Balance, last_recorded_reward_counter: u128) -> PoolMember { PoolMember { pool_id: 1, points, - reward_pool_total_earnings, + last_recorded_reward_counter: last_recorded_reward_counter.into(), unbonding_eras: Default::default(), } } - fn rew(balance: Balance, points: u32, total_earnings: Balance) -> RewardPool { - RewardPool { balance, points: points.into(), total_earnings } + fn del_float(points: Balance, last_recorded_reward_counter: f64) -> PoolMember { + PoolMember { + pool_id: 1, + points, + last_recorded_reward_counter: RewardCounter::from_float(last_recorded_reward_counter), + unbonding_eras: Default::default(), + } + } + + fn rew( + last_recorded_reward_counter: u128, + last_recorded_total_payouts: Balance, + total_rewards_claimed: Balance, + ) -> RewardPool { + RewardPool { + last_recorded_reward_counter: last_recorded_reward_counter.into(), + last_recorded_total_payouts, + total_rewards_claimed, + } } #[test] @@ -672,8 +683,12 @@ mod claim_payout { Balances::make_free_balance_be(&40, 0); Balances::make_free_balance_be(&50, 0); let ed = Balances::minimum_balance(); + // and the reward pool has earned 100 in rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 100); + assert_eq!(Balances::free_balance(default_reward_account()), ed); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 100)); + + let _ = pool_events_since_last_call(); // When assert_ok!(Pools::claim_payout(Origin::signed(10))); @@ -681,22 +696,13 @@ mod claim_payout { // Then assert_eq!( pool_events_since_last_call(), - vec![ - Event::Created { depositor: 10, pool_id: 1 }, - Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Bonded { member: 40, pool_id: 1, bonded: 40, joined: true }, - Event::Bonded { member: 50, pool_id: 1, bonded: 50, joined: true }, - Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, - ] - ); - - // Expect a payout of 10: (10 del virtual points / 100 pool points) * 100 pool - // balance - assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 100)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew(90, 100 * 100 - 100 * 10, 100) + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10 },] ); + // last recorded reward counter at the time of this member's payout is 1 + assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 1)); + // pool's 'last_recorded_reward_counter' and 'last_recorded_total_payouts' don't + // really change unless if someone bonds/unbonds. + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 10)); assert_eq!(Balances::free_balance(&10), 10); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 90); @@ -708,13 +714,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 40, pool_id: 1, payout: 40 }] ); - - // Expect payout 40: (400 del virtual points / 900 pool points) * 90 pool balance - assert_eq!(PoolMembers::::get(40).unwrap(), del(40, 100)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew(50, 9_000 - 100 * 40, 100) - ); + assert_eq!(PoolMembers::::get(40).unwrap(), del(40, 1)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 50)); assert_eq!(Balances::free_balance(&40), 40); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 50); @@ -726,15 +727,13 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] ); - - // Expect payout 50: (50 del virtual points / 50 pool points) * 50 pool balance - assert_eq!(PoolMembers::::get(50).unwrap(), del(50, 100)); + assert_eq!(PoolMembers::::get(50).unwrap(), del(50, 1)); assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 100)); assert_eq!(Balances::free_balance(&50), 50); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 0); // Given the reward pool has some new rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 50); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); // When assert_ok!(Pools::claim_payout(Origin::signed(10))); @@ -744,10 +743,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] ); - - // Expect payout 5: (500 del virtual points / 5,000 pool points) * 50 pool balance - assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 150)); - assert_eq!(RewardPools::::get(&1).unwrap(), rew(45, 5_000 - 50 * 10, 150)); + assert_eq!(PoolMembers::::get(10).unwrap(), del_float(10, 1.5)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 105)); assert_eq!(Balances::free_balance(&10), 10 + 5); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 45); @@ -759,11 +756,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 40, pool_id: 1, payout: 20 }] ); - - // Expect payout 20: (2,000 del virtual points / 4,500 pool points) * 45 pool - // balance - assert_eq!(PoolMembers::::get(40).unwrap(), del(40, 150)); - assert_eq!(RewardPools::::get(&1).unwrap(), rew(25, 4_500 - 50 * 40, 150)); + assert_eq!(PoolMembers::::get(40).unwrap(), del_float(40, 1.5)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 125)); assert_eq!(Balances::free_balance(&40), 40 + 20); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); @@ -779,22 +773,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] ); - - // We expect a payout of 50: (5,000 del virtual points / 7,5000 pool points) * 75 - // pool balance - assert_eq!(PoolMembers::::get(50).unwrap(), del(50, 200)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew( - 25, - // old pool points + points from new earnings - del points. - // - // points from new earnings = new earnings(50) * bonded_pool.points(100) - // del points = member.points(50) * new_earnings_since_last_claim (100) - (2_500 + 50 * 100) - 50 * 100, - 200, - ) - ); + assert_eq!(PoolMembers::::get(50).unwrap(), del_float(50, 2.0)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 175)); assert_eq!(Balances::free_balance(&50), 50 + 50); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); @@ -806,10 +786,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] ); - - // We expect a payout of 5 - assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 200)); - assert_eq!(RewardPools::::get(&1).unwrap(), rew(20, 2_500 - 10 * 50, 200)); + assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 2)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 180)); assert_eq!(Balances::free_balance(&10), 15 + 5); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 20); @@ -827,19 +805,8 @@ mod claim_payout { ); // We expect a payout of 40 - assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 600)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew( - 380, - // old pool points + points from new earnings - del points - // - // points from new earnings = new earnings(400) * bonded_pool.points(100) - // del points = member.points(10) * new_earnings_since_last_claim(400) - (2_000 + 400 * 100) - 10 * 400, - 600 - ) - ); + assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 6)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 220)); assert_eq!(Balances::free_balance(&10), 20 + 40); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 380); @@ -855,14 +822,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 10, pool_id: 1, payout: 2 }] ); - - // Expect a payout of 2: (200 del virtual points / 38,000 pool points) * 400 pool - // balance - assert_eq!(PoolMembers::::get(10).unwrap(), del(10, 620)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew(398, (38_000 + 20 * 100) - 10 * 20, 620) - ); + assert_eq!(PoolMembers::::get(10).unwrap(), del_float(10, 6.2)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 222)); assert_eq!(Balances::free_balance(&10), 60 + 2); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 398); @@ -874,14 +835,8 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 40, pool_id: 1, payout: 188 }] ); - - // Expect a payout of 188: (18,800 del virtual points / 39,800 pool points) * 399 - // pool balance - assert_eq!(PoolMembers::::get(40).unwrap(), del(40, 620)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew(210, 39_800 - 40 * 470, 620) - ); + assert_eq!(PoolMembers::::get(40).unwrap(), del_float(40, 6.2)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 410)); assert_eq!(Balances::free_balance(&40), 60 + 188); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 210); @@ -893,88 +848,20 @@ mod claim_payout { pool_events_since_last_call(), vec![Event::PaidOut { member: 50, pool_id: 1, payout: 210 }] ); - - // Expect payout of 210: (21,000 / 21,000) * 210 - assert_eq!(PoolMembers::::get(50).unwrap(), del(50, 620)); - assert_eq!( - RewardPools::::get(&1).unwrap(), - rew(0, 21_000 - 50 * 420, 620) - ); + assert_eq!(PoolMembers::::get(50).unwrap(), del_float(50, 6.2)); + assert_eq!(RewardPools::::get(&1).unwrap(), rew(0, 0, 620)); assert_eq!(Balances::free_balance(&50), 100 + 210); assert_eq!(Balances::free_balance(&default_reward_account()), ed + 0); }); } - #[test] - fn do_reward_payout_correctly_sets_pool_state_to_destroying() { - ExtBuilder::default().build_and_execute(|| { - let _ = with_transaction(|| -> TransactionOutcome { - let mut bonded_pool = BondedPool::::get(1).unwrap(); - let mut reward_pool = RewardPools::::get(1).unwrap(); - let mut member = PoolMembers::::get(10).unwrap(); - - // -- reward_pool.total_earnings saturates - - // Given - Balances::make_free_balance_be(&default_reward_account(), Balance::MAX); - - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut member, - &mut bonded_pool, - &mut reward_pool - )); - - // Then - assert!(bonded_pool.is_destroying()); - - storage::TransactionOutcome::Rollback(Ok(())) - }); - - // -- current_points saturates (reward_pool.points + new_earnings * bonded_pool.points) - let _ = with_transaction(|| -> TransactionOutcome { - // Given - let mut bonded_pool = BondedPool::::get(1).unwrap(); - let mut reward_pool = RewardPools::::get(1).unwrap(); - let mut member = PoolMembers::::get(10).unwrap(); - // Force new_earnings * bonded_pool.points == 100 - Balances::make_free_balance_be(&default_reward_account(), 5 + 10); - assert_eq!(bonded_pool.points, 10); - // Force reward_pool.points == U256::MAX - new_earnings * bonded_pool.points - reward_pool.points = U256::MAX - U256::from(100u32); - RewardPools::::insert(1, reward_pool.clone()); - - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut member, - &mut bonded_pool, - &mut reward_pool - )); - - // Then - assert!(bonded_pool.is_destroying()); - - storage::TransactionOutcome::Rollback(Ok(())) - }); - }); - } - #[test] fn reward_payout_errors_if_a_member_is_fully_unbonding() { ExtBuilder::default().add_members(vec![(11, 11)]).build_and_execute(|| { // fully unbond the member. - assert_ok!(Pools::fully_unbond(Origin::signed(11), 11)); + assert_ok!(fully_unbond_permissioned(11)); - let mut bonded_pool = BondedPool::::get(1).unwrap(); - let mut reward_pool = RewardPools::::get(1).unwrap(); - let mut member = PoolMembers::::get(11).unwrap(); - - assert_noop!( - Pools::do_reward_payout(&11, &mut member, &mut bonded_pool, &mut reward_pool,), - Error::::FullyUnbonding - ); + assert_noop!(Pools::claim_payout(Origin::signed(11)), Error::::FullyUnbonding); assert_eq!( pool_events_since_last_call(), @@ -989,80 +876,87 @@ mod claim_payout { } #[test] - fn calculate_member_payout_works_with_a_pool_of_1() { - let del = |reward_pool_total_earnings| del(10, reward_pool_total_earnings); + fn do_reward_payout_works_with_a_pool_of_1() { + let del = |last_recorded_reward_counter| del_float(10, last_recorded_reward_counter); ExtBuilder::default().build_and_execute(|| { - let mut bonded_pool = BondedPool::::get(1).unwrap(); - let mut reward_pool = RewardPools::::get(1).unwrap(); - let mut member = PoolMembers::::get(10).unwrap(); + let (mut member, mut bonded_pool, mut reward_pool) = + Pools::get_member_with_pools(&10).unwrap(); let ed = Balances::minimum_balance(); - // Given no rewards have been earned - // When let payout = - Pools::calculate_member_payout(&mut member, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut member, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then assert_eq!(payout, 0); - assert_eq!(member, del(0)); + assert_eq!(member, del(0.0)); assert_eq!(reward_pool, rew(0, 0, 0)); // Given the pool has earned some rewards for the first time - Balances::make_free_balance_be(&default_reward_account(), ed + 5); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 5)); // When let payout = - Pools::calculate_member_payout(&mut member, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut member, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 5); // (10 * 5 del virtual points / 10 * 5 pool points) * 5 pool balance + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 5 } + ] + ); + assert_eq!(payout, 5); assert_eq!(reward_pool, rew(0, 0, 5)); - assert_eq!(member, del(5)); + assert_eq!(member, del(0.5)); // Given the pool has earned rewards again - Balances::make_free_balance_be(&default_reward_account(), ed + 10); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 10)); // When let payout = - Pools::calculate_member_payout(&mut member, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut member, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 10); // (10 * 10 del virtual points / 10 pool points) * 5 pool balance + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10 }] + ); + assert_eq!(payout, 10); assert_eq!(reward_pool, rew(0, 0, 15)); - assert_eq!(member, del(15)); + assert_eq!(member, del(1.5)); // Given the pool has earned no new rewards Balances::make_free_balance_be(&default_reward_account(), ed + 0); // When let payout = - Pools::calculate_member_payout(&mut member, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut member, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then + assert_eq!(pool_events_since_last_call(), vec![]); assert_eq!(payout, 0); assert_eq!(reward_pool, rew(0, 0, 15)); - assert_eq!(member, del(15)); + assert_eq!(member, del(1.5)); }); } #[test] - fn calculate_member_payout_works_with_a_pool_of_3() { + fn do_reward_payout_works_with_a_pool_of_3() { ExtBuilder::default() .add_members(vec![(40, 40), (50, 50)]) .build_and_execute(|| { let mut bonded_pool = BondedPool::::get(1).unwrap(); let mut reward_pool = RewardPools::::get(1).unwrap(); - let ed = Balances::minimum_balance(); - // PoolMember with 10 points + let mut del_10 = PoolMembers::::get(10).unwrap(); - // PoolMember with 40 points let mut del_40 = PoolMembers::::get(40).unwrap(); - // PoolMember with 50 points let mut del_50 = PoolMembers::::get(50).unwrap(); assert_eq!( @@ -1078,479 +972,970 @@ mod claim_payout { // Given we have a total of 100 points split among the members assert_eq!(del_50.points + del_40.points + del_10.points, 100); assert_eq!(bonded_pool.points, 100); + // and the reward pool has earned 100 in rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 100); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 100)); // When let payout = - Pools::calculate_member_payout(&mut del_10, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut del_10, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 10); // (10 del virtual points / 100 pool points) * 100 pool balance - assert_eq!(del_10, del(10, 100)); - assert_eq!(reward_pool, rew(90, 100 * 100 - 100 * 10, 100)); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 10)); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10 }] + ); + assert_eq!(payout, 10); + assert_eq!(del_10, del(10, 1)); + assert_eq!(reward_pool, rew(0, 0, 10)); // When let payout = - Pools::calculate_member_payout(&mut del_40, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&40, &mut del_40, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 40); // (400 del virtual points / 900 pool points) * 90 pool balance - assert_eq!(del_40, del(40, 100)); assert_eq!( - reward_pool, - rew( - 50, - // old pool points - member virtual points - 9_000 - 100 * 40, - 100 - ) + pool_events_since_last_call(), + vec![Event::PaidOut { member: 40, pool_id: 1, payout: 40 }] ); - // Mock the reward pool transferring the payout to del_40 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 40)); + assert_eq!(payout, 40); + assert_eq!(del_40, del(40, 1)); + assert_eq!(reward_pool, rew(0, 0, 50)); // When let payout = - Pools::calculate_member_payout(&mut del_50, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&50, &mut del_50, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 50); // (50 del virtual points / 50 pool points) * 50 pool balance - assert_eq!(del_50, del(50, 100)); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] + ); + assert_eq!(payout, 50); + assert_eq!(del_50, del(50, 1)); assert_eq!(reward_pool, rew(0, 0, 100)); - // Mock the reward pool transferring the payout to del_50 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 50)); // Given the reward pool has some new rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 50); + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); // When let payout = - Pools::calculate_member_payout(&mut del_10, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut del_10, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 5); // (500 del virtual points / 5,000 pool points) * 50 pool balance - assert_eq!(del_10, del(10, 150)); - assert_eq!(reward_pool, rew(45, 5_000 - 50 * 10, 150)); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 5)); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] + ); + assert_eq!(payout, 5); + assert_eq!(del_10, del_float(10, 1.5)); + assert_eq!(reward_pool, rew(0, 0, 105)); // When let payout = - Pools::calculate_member_payout(&mut del_40, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&40, &mut del_40, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 20); // (2,000 del virtual points / 4,500 pool points) * 45 pool balance - assert_eq!(del_40, del(40, 150)); - assert_eq!(reward_pool, rew(25, 4_500 - 50 * 40, 150)); - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 20)); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 40, pool_id: 1, payout: 20 }] + ); + assert_eq!(payout, 20); + assert_eq!(del_40, del_float(40, 1.5)); + assert_eq!(reward_pool, rew(0, 0, 125)); // Given del_50 hasn't claimed and the reward pools has just earned 50 assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 75); // When let payout = - Pools::calculate_member_payout(&mut del_50, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&50, &mut del_50, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 50); // (5,000 del virtual points / 7,5000 pool points) * 75 pool balance - assert_eq!(del_50, del(50, 200)); - assert_eq!( - reward_pool, - rew( - 25, - // old pool points + points from new earnings - del points. - // - // points from new earnings = new earnings(50) * bonded_pool.points(100) - // del points = member.points(50) * new_earnings_since_last_claim (100) - (2_500 + 50 * 100) - 50 * 100, - 200, - ) - ); - // Mock the reward pool transferring the payout to del_50 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 50)); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] + ); + assert_eq!(payout, 50); + assert_eq!(del_50, del_float(50, 2.0)); + assert_eq!(reward_pool, rew(0, 0, 175)); // When let payout = - Pools::calculate_member_payout(&mut del_10, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut del_10, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] + ); assert_eq!(payout, 5); - assert_eq!(del_10, del(10, 200)); - assert_eq!(reward_pool, rew(20, 2_500 - 10 * 50, 200)); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 5)); + assert_eq!(del_10, del_float(10, 2.0)); + assert_eq!(reward_pool, rew(0, 0, 180)); // Given del_40 hasn't claimed and the reward pool has just earned 400 assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 400)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 420); // When let payout = - Pools::calculate_member_payout(&mut del_10, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut del_10, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 40 }] + ); assert_eq!(payout, 40); - assert_eq!(del_10, del(10, 600)); - assert_eq!( - reward_pool, - rew( - 380, - // old pool points + points from new earnings - del points - // - // points from new earnings = new earnings(400) * bonded_pool.points(100) - // del points = member.points(10) * new_earnings_since_last_claim(400) - (2_000 + 400 * 100) - 10 * 400, - 600 - ) - ); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 40)); + assert_eq!(del_10, del_float(10, 6.0)); + assert_eq!(reward_pool, rew(0, 0, 220)); // Given del_40 + del_50 haven't claimed and the reward pool has earned 20 assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 20)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 400); // When let payout = - Pools::calculate_member_payout(&mut del_10, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&10, &mut del_10, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 2); // (200 del virtual points / 38,000 pool points) * 400 pool balance - assert_eq!(del_10, del(10, 620)); - assert_eq!(reward_pool, rew(398, (38_000 + 20 * 100) - 10 * 20, 620)); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 2)); + assert_eq!(payout, 2); + assert_eq!(del_10, del_float(10, 6.2)); + assert_eq!(reward_pool, rew(0, 0, 222)); // When let payout = - Pools::calculate_member_payout(&mut del_40, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&40, &mut del_40, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 188); // (18,800 del virtual points / 39,800 pool points) * 399 pool balance - assert_eq!(del_40, del(40, 620)); - assert_eq!(reward_pool, rew(210, 39_800 - 40 * 470, 620)); - // Mock the reward pool transferring the payout to del_10 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= 188)); + assert_eq!(payout, 188); // 20 (from the 50) + 160 (from the 400) + 8 (from the 20) + assert_eq!(del_40, del_float(40, 6.2)); + assert_eq!(reward_pool, rew(0, 0, 410)); // When let payout = - Pools::calculate_member_payout(&mut del_50, &mut bonded_pool, &mut reward_pool) + Pools::do_reward_payout(&50, &mut del_50, &mut bonded_pool, &mut reward_pool) .unwrap(); // Then - assert_eq!(payout, 210); // (21,000 / 21,000) * 210 - assert_eq!(del_50, del(50, 620)); - assert_eq!(reward_pool, rew(0, 21_000 - 50 * 420, 620)); + assert_eq!(payout, 210); // 200 (from the 400) + 10 (from the 20) + assert_eq!(del_50, del_float(50, 6.2)); + assert_eq!(reward_pool, rew(0, 0, 620)); }); } #[test] - fn do_reward_payout_works() { - ExtBuilder::default() - .add_members(vec![(40, 40), (50, 50)]) - .build_and_execute(|| { - let mut bonded_pool = BondedPool::::get(1).unwrap(); - let mut reward_pool = RewardPools::::get(1).unwrap(); - let ed = Balances::minimum_balance(); + fn rewards_distribution_is_fair_basic() { + ExtBuilder::default().build_and_execute(|| { + // reward pool by 10. + Balances::mutate_account(&default_reward_account(), |f| f.free += 10).unwrap(); - assert_eq!( - pool_events_since_last_call(), - vec![ - Event::Created { depositor: 10, pool_id: 1 }, - Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Bonded { member: 40, pool_id: 1, bonded: 40, joined: true }, - Event::Bonded { member: 50, pool_id: 1, bonded: 50, joined: true } - ] - ); + // 20 joins afterwards. + Balances::make_free_balance_be(&20, Balances::minimum_balance() + 10); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); - // Given the bonded pool has 100 points - assert_eq!(bonded_pool.points, 100); - // Each member currently has a free balance of - Balances::make_free_balance_be(&10, 0); - Balances::make_free_balance_be(&40, 0); - Balances::make_free_balance_be(&50, 0); - // and the reward pool has earned 100 in rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 100); + // reward by another 20 + Balances::mutate_account(&default_reward_account(), |f| f.free += 20).unwrap(); - let mut del_10 = PoolMembers::get(10).unwrap(); - let mut del_40 = PoolMembers::get(40).unwrap(); - let mut del_50 = PoolMembers::get(50).unwrap(); + // 10 should claim 10 + 10, 20 should claim 20 / 2. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 20 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 10 }, + ] + ); - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut del_10, - &mut bonded_pool, - &mut reward_pool - )); + // any upcoming rewards are shared equally. + Balances::mutate_account(&default_reward_account(), |f| f.free += 20).unwrap(); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10 }] - ); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); - // Expect a payout of 10: (10 del virtual points / 100 pool points) * 100 pool - // balance - assert_eq!(del_10, del(10, 100)); - assert_eq!(reward_pool, rew(90, 100 * 100 - 100 * 10, 100)); - assert_eq!(Balances::free_balance(&10), 10); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 90); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 10 }, + ] + ); + }); + } - // When - assert_ok!(Pools::do_reward_payout( - &40, - &mut del_40, - &mut bonded_pool, - &mut reward_pool - )); + #[test] + fn rewards_distribution_is_fair_basic_with_fractions() { + // basically checks the case where the amount of rewards is less than the pool shares. for + // this, we have to rely on fixed point arithmetic. + ExtBuilder::default().build_and_execute(|| { + Balances::mutate_account(&default_reward_account(), |f| f.free += 3).unwrap(); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 40, pool_id: 1, payout: 40 }] - ); + Balances::make_free_balance_be(&20, Balances::minimum_balance() + 10); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); - // Expect payout 40: (400 del virtual points / 900 pool points) * 90 pool balance - assert_eq!(del_40, del(40, 100)); - assert_eq!(reward_pool, rew(50, 9_000 - 100 * 40, 100)); - assert_eq!(Balances::free_balance(&40), 40); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 50); + Balances::mutate_account(&default_reward_account(), |f| f.free += 6).unwrap(); - // When - assert_ok!(Pools::do_reward_payout( - &50, - &mut del_50, - &mut bonded_pool, - &mut reward_pool - )); + // 10 should claim 3, 20 should claim 3 + 3. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] - ); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 3 + 3 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 3 }, + ] + ); - // Expect payout 50: (50 del virtual points / 50 pool points) * 50 pool balance - assert_eq!(del_50, del(50, 100)); - assert_eq!(reward_pool, rew(0, 0, 100)); - assert_eq!(Balances::free_balance(&50), 50); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 0); + // any upcoming rewards are shared equally. + Balances::mutate_account(&default_reward_account(), |f| f.free += 8).unwrap(); - // Given the reward pool has some new rewards - Balances::make_free_balance_be(&default_reward_account(), ed + 50); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut del_10, - &mut bonded_pool, - &mut reward_pool - )); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 4 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 4 }, + ] + ); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] - ); + // uneven upcoming rewards are shared equally, rounded down. + Balances::mutate_account(&default_reward_account(), |f| f.free += 7).unwrap(); - // Expect payout 5: (500 del virtual points / 5,000 pool points) * 50 pool balance - assert_eq!(del_10, del(10, 150)); - assert_eq!(reward_pool, rew(45, 5_000 - 50 * 10, 150)); - assert_eq!(Balances::free_balance(&10), 10 + 5); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 45); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); - // When - assert_ok!(Pools::do_reward_payout( - &40, - &mut del_40, - &mut bonded_pool, - &mut reward_pool - )); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 3 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 3 }, + ] + ); + }); + } - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 40, pool_id: 1, payout: 20 }] - ); + #[test] + fn rewards_distribution_is_fair_3() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); - // Expect payout 20: (2,000 del virtual points / 4,500 pool points) * 45 pool - // balance - assert_eq!(del_40, del(40, 150)); - assert_eq!(reward_pool, rew(25, 4_500 - 50 * 40, 150)); - assert_eq!(Balances::free_balance(&40), 40 + 20); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); - // Given del 50 hasn't claimed and the reward pools has just earned 50 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 75); + Balances::make_free_balance_be(&20, ed + 10); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); - // When - assert_ok!(Pools::do_reward_payout( - &50, - &mut del_50, - &mut bonded_pool, - &mut reward_pool - )); + Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); - // Then + Balances::make_free_balance_be(&30, ed + 10); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + // 10 should claim 10, 20 should claim nothing. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 30, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 30 + 100 / 2 + 60 / 3 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 100 / 2 + 60 / 3 }, + Event::PaidOut { member: 30, pool_id: 1, payout: 60 / 3 }, + ] + ); + + // any upcoming rewards are shared equally. + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 30, pool_id: 1, payout: 10 }, + ] + ); + }); + } + + #[test] + fn rewards_distribution_is_fair_bond_extra() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + Balances::make_free_balance_be(&20, ed + 20); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + Balances::make_free_balance_be(&30, ed + 20); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + + Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); + + // everyone claims. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::Bonded { member: 30, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 20 }, + Event::PaidOut { member: 30, pool_id: 1, payout: 10 } + ] + ); + + // 30 now bumps itself to be like 20. + assert_ok!(Pools::bond_extra(Origin::signed(30), BondExtra::FreeBalance(10))); + + // more rewards come in. + Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); + + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Bonded { member: 30, pool_id: 1, bonded: 10, joined: false }, + Event::PaidOut { member: 10, pool_id: 1, payout: 20 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 40 }, + Event::PaidOut { member: 30, pool_id: 1, payout: 40 } + ] + ); + }); + } + + #[test] + fn rewards_distribution_is_fair_unbond() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + Balances::make_free_balance_be(&20, ed + 20); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + + // everyone claims. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 20 } + ] + ); + + // 20 unbonds to be equal to 10 (10 points each). + assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); + + // more rewards come in. + Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); + + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10 }, + Event::PaidOut { member: 10, pool_id: 1, payout: 50 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 50 }, + ] + ); + }); + } + + #[test] + fn unclaimed_reward_is_safe() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + Balances::make_free_balance_be(&20, ed + 20); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + Balances::make_free_balance_be(&30, ed + 20); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + + // 10 gets 10, 20 gets 20, 30 gets 10 + Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); + + // some claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::Bonded { member: 30, pool_id: 1, bonded: 10, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 20 } + ] + ); + + // 10 gets 20, 20 gets 40, 30 gets 20 + Balances::mutate_account(&default_reward_account(), |f| f.free += 80).unwrap(); + + // some claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 20 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 40 } + ] + ); + + // 10 gets 20, 20 gets 40, 30 gets 20 + Balances::mutate_account(&default_reward_account(), |f| f.free += 80).unwrap(); + + // some claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 20 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 40 } + ] + ); + + // now 30 claims all at once + assert_ok!(Pools::claim_payout(Origin::signed(30))); + + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 30, pool_id: 1, payout: 10 + 20 + 20 }] + ); + }); + } + + #[test] + fn bond_extra_and_delayed_claim() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + Balances::make_free_balance_be(&20, ed + 200); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + + // 10 gets 10, 20 gets 20, 30 gets 10 + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + + // some claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 10 } + ] + ); + + // 20 has not claimed yet, more reward comes + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + // and 20 bonds more -- they should not have more share of this reward. + assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::FreeBalance(10))); + + // everyone claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + // 20 + 40, which means the extra amount they bonded did not impact us. + Event::PaidOut { member: 20, pool_id: 1, payout: 60 }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: false }, + Event::PaidOut { member: 10, pool_id: 1, payout: 20 } + ] + ); + + // but in the next round of rewards, the extra10 they bonded has an impact. + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + // everyone claim. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 15 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 45 } + ] + ); + }); + } + + #[test] + fn create_sets_recorded_data() { + ExtBuilder::default().build_and_execute(|| { + MaxPools::::set(None); + // pool 10 has already been created. + let (member_10, _, reward_pool_10) = Pools::get_member_with_pools(&10).unwrap(); + + assert_eq!(reward_pool_10.last_recorded_total_payouts, 0); + assert_eq!(reward_pool_10.total_rewards_claimed, 0); + assert_eq!(reward_pool_10.last_recorded_reward_counter, 0.into()); + + assert_eq!(member_10.last_recorded_reward_counter, 0.into()); + + // transfer some reward to pool 1. + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + // create pool 2 + Balances::make_free_balance_be(&20, 100); + assert_ok!(Pools::create(Origin::signed(20), 10, 20, 20, 20)); + + // has no impact -- initial + let (member_20, _, reward_pool_20) = Pools::get_member_with_pools(&20).unwrap(); + + assert_eq!(reward_pool_20.last_recorded_total_payouts, 0); + assert_eq!(reward_pool_20.total_rewards_claimed, 0); + assert_eq!(reward_pool_20.last_recorded_reward_counter, 0.into()); + + assert_eq!(member_20.last_recorded_reward_counter, 0.into()); + + // pre-fund the reward account of pool id 3 with some funds. + Balances::make_free_balance_be(&Pools::create_reward_account(3), 10); + + // create pool 3 + Balances::make_free_balance_be(&30, 100); + assert_ok!(Pools::create(Origin::signed(30), 10, 30, 30, 30)); + + // reward counter is still the same. + let (member_30, _, reward_pool_30) = Pools::get_member_with_pools(&30).unwrap(); + assert_eq!( + Balances::free_balance(&Pools::create_reward_account(3)), + 10 + Balances::minimum_balance() + ); + + assert_eq!(reward_pool_30.last_recorded_total_payouts, 0); + assert_eq!(reward_pool_30.total_rewards_claimed, 0); + assert_eq!(reward_pool_30.last_recorded_reward_counter, 0.into()); + + assert_eq!(member_30.last_recorded_reward_counter, 0.into()); + + // and 30 can claim the reward now. + assert_ok!(Pools::claim_payout(Origin::signed(30))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Created { depositor: 20, pool_id: 2 }, + Event::Bonded { member: 20, pool_id: 2, bonded: 10, joined: true }, + Event::Created { depositor: 30, pool_id: 3 }, + Event::Bonded { member: 30, pool_id: 3, bonded: 10, joined: true }, + Event::PaidOut { member: 30, pool_id: 3, payout: 10 } + ] + ); + }) + } + + #[test] + fn join_updates_recorded_data() { + ExtBuilder::default().build_and_execute(|| { + MaxPoolMembers::::set(None); + MaxPoolMembersPerPool::::set(None); + let join = |x, y| { + Balances::make_free_balance_be(&x, y + Balances::minimum_balance()); + assert_ok!(Pools::join(Origin::signed(x), y, 1)); + }; + + { + let (member_10, _, reward_pool_10) = Pools::get_member_with_pools(&10).unwrap(); + + assert_eq!(reward_pool_10.last_recorded_total_payouts, 0); + assert_eq!(reward_pool_10.total_rewards_claimed, 0); + assert_eq!(reward_pool_10.last_recorded_reward_counter, 0.into()); + + assert_eq!(member_10.last_recorded_reward_counter, 0.into()); + } + + // someone joins without any rewards being issued. + { + join(20, 10); + let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); + // reward counter is 0 both before.. + assert_eq!(member.last_recorded_reward_counter, 0.into()); + assert_eq!(reward_pool.last_recorded_total_payouts, 0); + assert_eq!(reward_pool.last_recorded_reward_counter, 0.into()); + } + + // transfer some reward to pool 1. + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + { + join(30, 10); + let (member, _, reward_pool) = Pools::get_member_with_pools(&30).unwrap(); + assert_eq!(reward_pool.last_recorded_total_payouts, 60); + // explanation: we have a total of 20 points so far (excluding the 10 that just got + // bonded), and 60 unclaimed rewards. each share is then roughly worth of 3 units of + // rewards, thus reward counter is 3. member's reward counter is the same + assert_eq!(member.last_recorded_reward_counter, 3.into()); + assert_eq!(reward_pool.last_recorded_reward_counter, 3.into()); + } + + // someone else joins + { + join(40, 10); + let (member, _, reward_pool) = Pools::get_member_with_pools(&40).unwrap(); + // reward counter does not change since no rewards have came in. + assert_eq!(member.last_recorded_reward_counter, 3.into()); + assert_eq!(reward_pool.last_recorded_reward_counter, 3.into()); + assert_eq!(reward_pool.last_recorded_total_payouts, 60); + } + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 30, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 40, pool_id: 1, bonded: 10, joined: true } + ] + ); + }) + } + + #[test] + fn bond_extra_updates_recorded_data() { + ExtBuilder::default().add_members(vec![(20, 20)]).build_and_execute(|| { + MaxPoolMembers::::set(None); + MaxPoolMembersPerPool::::set(None); + + // initial state of pool 1. + { + let (member_10, _, reward_pool_10) = Pools::get_member_with_pools(&10).unwrap(); + + assert_eq!(reward_pool_10.last_recorded_total_payouts, 0); + assert_eq!(reward_pool_10.total_rewards_claimed, 0); + assert_eq!(reward_pool_10.last_recorded_reward_counter, 0.into()); + + assert_eq!(member_10.last_recorded_reward_counter, 0.into()); + } + + Balances::make_free_balance_be(&10, 100); + Balances::make_free_balance_be(&20, 100); + + // 10 bonds extra without any rewards. + { + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); + assert_eq!(member.last_recorded_reward_counter, 0.into()); + assert_eq!(reward_pool.last_recorded_total_payouts, 0); + assert_eq!(reward_pool.last_recorded_reward_counter, 0.into()); + } + + // 10 bonds extra again with some rewards. This reward should be split equally between + // 10 and 20, as they both have equal points now. + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + + { + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); + // explanation: before bond_extra takes place, there is 40 points and 30 balance in + // the system, RewardCounter is therefore 7.5 + assert_eq!(member.last_recorded_reward_counter, RewardCounter::from_float(0.75)); assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 50, pool_id: 1, payout: 50 }] + reward_pool.last_recorded_reward_counter, + RewardCounter::from_float(0.75) ); + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + } - // We expect a payout of 50: (5,000 del virtual points / 7,5000 pool points) * 75 - // pool balance - assert_eq!(del_50, del(50, 200)); + // 20 bonds extra again, without further rewards. + { + assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::FreeBalance(10))); + let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); + assert_eq!(member.last_recorded_reward_counter, RewardCounter::from_float(0.75)); assert_eq!( - reward_pool, - rew( - 25, - // old pool points + points from new earnings - del points. - // - // points from new earnings = new earnings(50) * bonded_pool.points(100) - // del points = member.points(50) * new_earnings_since_last_claim (100) - (2_500 + 50 * 100) - 50 * 100, - 200, - ) + reward_pool.last_recorded_reward_counter, + RewardCounter::from_float(0.75) ); - assert_eq!(Balances::free_balance(&50), 50 + 50); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + } + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: false }, + Event::PaidOut { member: 10, pool_id: 1, payout: 15 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: false }, + Event::PaidOut { member: 20, pool_id: 1, payout: 15 }, + Event::Bonded { member: 20, pool_id: 1, bonded: 10, joined: false } + ] + ); + }) + } + + #[test] + fn unbond_updates_recorded_data() { + ExtBuilder::default() + .add_members(vec![(20, 20), (30, 20)]) + .build_and_execute(|| { + MaxPoolMembers::::set(None); + MaxPoolMembersPerPool::::set(None); + + // initial state of pool 1. + { + let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); + + assert_eq!(reward_pool.last_recorded_total_payouts, 0); + assert_eq!(reward_pool.total_rewards_claimed, 0); + assert_eq!(reward_pool.last_recorded_reward_counter, 0.into()); + + assert_eq!(member.last_recorded_reward_counter, 0.into()); + } + + // 20 unbonds without any rewards. + { + assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); + let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); + assert_eq!(member.last_recorded_reward_counter, 0.into()); + assert_eq!(reward_pool.last_recorded_total_payouts, 0); + assert_eq!(reward_pool.last_recorded_reward_counter, 0.into()); + } - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut del_10, - &mut bonded_pool, - &mut reward_pool - )); + // some rewards come in. + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 5 }] - ); + // and 30 also unbonds half. + { + assert_ok!(Pools::unbond(Origin::signed(30), 30, 10)); + let (member, _, reward_pool) = Pools::get_member_with_pools(&30).unwrap(); + // 30 reward in the system, and 40 points before this unbond to collect it, + // RewardCounter is 3/4. + assert_eq!( + member.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + assert_eq!( + reward_pool.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + } - // We expect a payout of 5 - assert_eq!(del_10, del(10, 200)); - assert_eq!(reward_pool, rew(20, 2_500 - 10 * 50, 200)); - assert_eq!(Balances::free_balance(&10), 15 + 5); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 20); + // 30 unbonds again, not change this time. + { + assert_ok!(Pools::unbond(Origin::signed(30), 30, 5)); + let (member, _, reward_pool) = Pools::get_member_with_pools(&30).unwrap(); + assert_eq!( + member.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + assert_eq!( + reward_pool.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + } - // Given del 40 hasn't claimed and the reward pool has just earned 400 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 400)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 420); + // 20 unbonds again, not change this time, just collecting their reward. + { + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); + assert_eq!( + member.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + assert_eq!( + reward_pool.last_recorded_reward_counter, + RewardCounter::from_float(0.75) + ); + } - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut del_10, - &mut bonded_pool, - &mut reward_pool - )); + // trigger 10's reward as well to see all of the payouts. + assert_ok!(Pools::claim_payout(Origin::signed(10))); - // Then assert_eq!( pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 40 }] + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::Bonded { member: 30, pool_id: 1, bonded: 20, joined: true }, + Event::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10 }, + Event::PaidOut { member: 30, pool_id: 1, payout: 15 }, + Event::Unbonded { member: 30, pool_id: 1, balance: 10, points: 10 }, + Event::Unbonded { member: 30, pool_id: 1, balance: 5, points: 5 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 7 }, + Event::Unbonded { member: 20, pool_id: 1, balance: 5, points: 5 }, + Event::PaidOut { member: 10, pool_id: 1, payout: 7 } + ] ); + }) + } - // We expect a payout of 40 - assert_eq!(del_10, del(10, 600)); - assert_eq!( - reward_pool, - rew( - 380, - // old pool points + points from new earnings - del points - // - // points from new earnings = new earnings(400) * bonded_pool.points(100) - // del points = member.points(10) * new_earnings_since_last_claim(400) - (2_000 + 400 * 100) - 10 * 400, - 600 - ) - ); - assert_eq!(Balances::free_balance(&10), 20 + 40); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 380); + #[test] + fn rewards_are_rounded_down_depositor_collects_them() { + ExtBuilder::default().add_members(vec![(20, 20)]).build_and_execute(|| { + // initial balance of 10. + assert_eq!(Balances::free_balance(&10), 5); + assert_eq!( + Balances::free_balance(&default_reward_account()), + Balances::minimum_balance() + ); - // Given del 40 + del 50 haven't claimed and the reward pool has earned 20 - assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 20)); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 400); + // some rewards come in. + Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); - // When - assert_ok!(Pools::do_reward_payout( - &10, - &mut del_10, - &mut bonded_pool, - &mut reward_pool - )); + // everyone claims + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 2 }] - ); + // some dust (1) remains in the reward account. + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 13 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 26 } + ] + ); - // Expect a payout of 2: (200 del virtual points / 38,000 pool points) * 400 pool - // balance - assert_eq!(del_10, del(10, 620)); - assert_eq!(reward_pool, rew(398, (38_000 + 20 * 100) - 10 * 20, 620)); - assert_eq!(Balances::free_balance(&10), 60 + 2); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 398); + // start dismantling the pool. + assert_ok!(Pools::set_state(Origin::signed(902), 1, PoolState::Destroying)); + assert_ok!(fully_unbond_permissioned(20)); - // When - assert_ok!(Pools::do_reward_payout( - &40, - &mut del_40, - &mut bonded_pool, - &mut reward_pool - )); + CurrentEra::set(3); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(20), 20, 0)); + assert_ok!(fully_unbond_permissioned(10)); - // Then - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 40, pool_id: 1, payout: 188 }] - ); + CurrentEra::set(6); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); - // Expect a payout of 188: (18,800 del virtual points / 39,800 pool points) * 399 - // pool balance - assert_eq!(del_40, del(40, 620)); - assert_eq!(reward_pool, rew(210, 39_800 - 40 * 470, 620)); - assert_eq!(Balances::free_balance(&40), 60 + 188); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 210); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, + Event::Unbonded { member: 20, pool_id: 1, balance: 20, points: 20 }, + Event::Withdrawn { member: 20, pool_id: 1, balance: 20, points: 20 }, + Event::MemberRemoved { pool_id: 1, member: 20 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10 }, + Event::Withdrawn { member: 10, pool_id: 1, balance: 10, points: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::Destroyed { pool_id: 1 } + ] + ); - // When - assert_ok!(Pools::do_reward_payout( - &50, - &mut del_50, - &mut bonded_pool, - &mut reward_pool - )); + // original ed + ed put into reward account + reward + bond + dust. + assert_eq!(Balances::free_balance(&10), 5 + 5 + 13 + 10 + 1); + }) + } + + #[test] + fn claim_payout_large_numbers() { + let unit = 10u128.pow(12); // akin to KSM + ExistentialDeposit::set(unit); + StakingMinBond::set(unit * 1000); + + ExtBuilder::default() + .max_members(Some(4)) + .max_members_per_pool(Some(4)) + .add_members(vec![(20, 1500 * unit), (21, 2500 * unit), (22, 5000 * unit)]) + .build_and_execute(|| { + // some rewards come in. + assert_eq!(Balances::free_balance(&default_reward_account()), unit); + Balances::mutate_account(&default_reward_account(), |f| f.free += unit / 1000) + .unwrap(); + + // everyone claims + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(21))); + assert_ok!(Pools::claim_payout(Origin::signed(22))); - // Then assert_eq!( pool_events_since_last_call(), - vec![Event::PaidOut { member: 50, pool_id: 1, payout: 210 }] + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 1000000000000000, + joined: true + }, + Event::Bonded { + member: 20, + pool_id: 1, + bonded: 1500000000000000, + joined: true + }, + Event::Bonded { + member: 21, + pool_id: 1, + bonded: 2500000000000000, + joined: true + }, + Event::Bonded { + member: 22, + pool_id: 1, + bonded: 5000000000000000, + joined: true + }, + Event::PaidOut { member: 10, pool_id: 1, payout: 100000000 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 150000000 }, + Event::PaidOut { member: 21, pool_id: 1, payout: 250000000 }, + Event::PaidOut { member: 22, pool_id: 1, payout: 500000000 } + ] ); - - // Expect payout of 210: (21,000 / 21,000) * 210 - assert_eq!(del_50, del(50, 620)); - assert_eq!(reward_pool, rew(0, 21_000 - 50 * 420, 620)); - assert_eq!(Balances::free_balance(&50), 100 + 210); - assert_eq!(Balances::free_balance(&default_reward_account()), ed + 0); - }); + }) } } @@ -2461,7 +2846,8 @@ mod withdraw_unbonded { assert_eq!( SubPoolsStorage::::get(&1).unwrap().with_era, - unbonding_pools_with_era! { 3 => UnbondPool { points: 550 / 2 + 40 / 2, balance: 550 / 2 + 40 / 2 }} + unbonding_pools_with_era! { 3 => UnbondPool { points: 550 / 2 + 40 / 2, balance: 550 / 2 + 40 / 2 + }} ); assert_eq!( @@ -3089,11 +3475,7 @@ mod create { ); assert_eq!( RewardPools::::get(2).unwrap(), - RewardPool { - balance: Zero::zero(), - points: U256::zero(), - total_earnings: Zero::zero(), - } + RewardPool { ..Default::default() } ); assert_eq!( @@ -3688,3 +4070,310 @@ mod update_roles { }) } } + +mod reward_counter_precision { + use sp_runtime::FixedU128; + + use super::*; + + const DOT: Balance = 10u128.pow(10u32); + const POLKADOT_TOTAL_ISSUANCE_GENESIS: Balance = DOT * 10u128.pow(9u32); + + const fn inflation(years: u128) -> u128 { + let mut i = 0; + let mut start = POLKADOT_TOTAL_ISSUANCE_GENESIS; + while i < years { + start = start + start / 10; + i += 1 + } + start + } + + fn default_pool_reward_counter() -> FixedU128 { + RewardPools::::get(1) + .unwrap() + .current_reward_counter(1, BondedPools::::get(1).unwrap().points) + .unwrap() + } + + fn pending_rewards(of: AccountId) -> Option> { + let member = PoolMembers::::get(of).unwrap(); + assert_eq!(member.pool_id, 1); + let rc = default_pool_reward_counter(); + member.pending_rewards(rc).ok() + } + + #[test] + fn smallest_claimable_reward() { + // create a pool that has all of the polkadot issuance in 50 years. + let pool_bond = inflation(50); + ExtBuilder::default().ed(DOT).min_bond(pool_bond).build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 1173908528796953165005, + joined: true, + } + ] + ); + + // the smallest reward that this pool can handle is + let expected_smallest_reward = inflation(50) / 10u128.pow(18); + + // tad bit less. cannot be paid out. + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += + expected_smallest_reward - 1)); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_eq!(pool_events_since_last_call(), vec![]); + // revert it. + + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free -= + expected_smallest_reward - 1)); + + // tad bit more. can be claimed. + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += + expected_smallest_reward + 1)); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 1173 }] + ); + }) + } + + #[test] + fn reward_counter_calc_wont_fail_in_normal_polkadot_future() { + // create a pool that has roughly half of the polkadot issuance in 10 years. + let pool_bond = inflation(10) / 2; + ExtBuilder::default().ed(DOT).min_bond(pool_bond).build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 12_968_712_300_500_000_000, + joined: true, + } + ] + ); + + // in 10 years, the total claimed rewards are large values as well. assuming that a pool + // is earning all of the inflation per year (which is really unrealistic, but worse + // case), that will be: + let pool_total_earnings_10_years = inflation(10) - POLKADOT_TOTAL_ISSUANCE_GENESIS; + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += + pool_total_earnings_10_years)); + + // some whale now joins with the other half ot the total issuance. This will bloat all + // the calculation regarding current reward counter. + Balances::make_free_balance_be(&20, pool_bond * 2); + assert_ok!(Pools::join(Origin::signed(20), pool_bond, 1)); + + assert_eq!( + pool_events_since_last_call(), + vec![Event::Bonded { + member: 20, + pool_id: 1, + bonded: 12_968_712_300_500_000_000, + joined: true + }] + ); + + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 15937424600999999996 }] + ); + + // now let a small member join with 10 DOTs. + Balances::make_free_balance_be(&30, 20 * DOT); + assert_ok!(Pools::join(Origin::signed(30), 10 * DOT, 1)); + + // and give a reasonably small reward to the pool. + assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += DOT)); + + assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Bonded { member: 30, pool_id: 1, bonded: 100000000000, joined: true }, + // quite small, but working fine. + Event::PaidOut { member: 30, pool_id: 1, payout: 38 } + ] + ); + }) + } + + #[test] + fn reward_counter_update_can_fail_if_pool_is_highly_slashed() { + // create a pool that has roughly half of the polkadot issuance in 10 years. + let pool_bond = inflation(10) / 2; + ExtBuilder::default().ed(DOT).min_bond(pool_bond).build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 12_968_712_300_500_000_000, + joined: true, + } + ] + ); + + // slash this pool by 99% of that. + StakingMock::set_bonded_balance(default_bonded_account(), DOT + pool_bond / 100); + + // some whale now joins with the other half ot the total issuance. This will trigger an + // overflow. This test is actually a bit too lenient because all the reward counters are + // set to zero. In other tests that we want to assert a scenario won't fail, we should + // also set the reward counters to some large value. + Balances::make_free_balance_be(&20, pool_bond * 2); + assert_err!(Pools::join(Origin::signed(20), pool_bond, 1), Error::::OverflowRisk); + }) + } + + #[test] + fn if_small_member_waits_long_enough_they_will_earn_rewards() { + // create a pool that has a quarter of the current polkadot issuance + ExtBuilder::default() + .ed(DOT) + .min_bond(POLKADOT_TOTAL_ISSUANCE_GENESIS / 4) + .build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 2500000000000000000, + joined: true, + } + ] + ); + + // and have a tiny fish join the pool as well.. + Balances::make_free_balance_be(&20, 20 * DOT); + assert_ok!(Pools::join(Origin::signed(20), 10 * DOT, 1)); + + // earn some small rewards + assert_ok!( + Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) + ); + + // no point in claiming for 20 (nonetheless, it should be harmless) + assert!(pending_rewards(20).unwrap().is_zero()); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Bonded { + member: 20, + pool_id: 1, + bonded: 100000000000, + joined: true + }, + Event::PaidOut { member: 10, pool_id: 1, payout: 9999997 } + ] + ); + + // earn some small more, still nothing can be claimed for 20, but 10 claims their + // share. + assert_ok!( + Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) + ); + assert!(pending_rewards(20).unwrap().is_zero()); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10000000 }] + ); + + // earn some more rewards, this time 20 can also claim. + assert_ok!( + Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) + ); + assert_eq!(pending_rewards(20).unwrap(), 1); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id: 1, payout: 10000000 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 1 } + ] + ); + }); + } + + #[test] + fn zero_reward_claim_does_not_update_reward_counter() { + // create a pool that has a quarter of the current polkadot issuance + ExtBuilder::default() + .ed(DOT) + .min_bond(POLKADOT_TOTAL_ISSUANCE_GENESIS / 4) + .build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { + member: 10, + pool_id: 1, + bonded: 2500000000000000000, + joined: true, + } + ] + ); + + // and have a tiny fish join the pool as well.. + Balances::make_free_balance_be(&20, 20 * DOT); + assert_ok!(Pools::join(Origin::signed(20), 10 * DOT, 1)); + + // earn some small rewards + assert_ok!( + Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) + ); + + // if 20 claims now, their reward counter should stay the same, so that they have a + // chance of claiming this if they let it accumulate. Also see + // `if_small_member_waits_long_enough_they_will_earn_rewards` + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Bonded { + member: 20, + pool_id: 1, + bonded: 100000000000, + joined: true + }, + Event::PaidOut { member: 10, pool_id: 1, payout: 9999997 } + ] + ); + + let current_reward_counter = default_pool_reward_counter(); + // has been updated, because they actually claimed something. + assert_eq!( + PoolMembers::::get(10).unwrap().last_recorded_reward_counter, + current_reward_counter + ); + // has not be updated, even though the claim transaction went through okay. + assert_eq!( + PoolMembers::::get(20).unwrap().last_recorded_reward_counter, + Default::default() + ); + }); + } +} diff --git a/frame/nomination-pools/src/weights.rs b/frame/nomination-pools/src/weights.rs index 8e3facfc5ec26..a9003ffd3fb4c 100644 --- a/frame/nomination-pools/src/weights.rs +++ b/frame/nomination-pools/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_nomination_pools //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-06-10, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-06-15, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -70,7 +70,7 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) - // Storage: NominationPools RewardPools (r:1 w:0) + // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) @@ -80,22 +80,22 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn join() -> Weight { - (124_508_000 as Weight) + (123_947_000 as Weight) .saturating_add(T::DbWeight::get().reads(17 as Weight)) - .saturating_add(T::DbWeight::get().writes(11 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) - // Storage: System Account (r:2 w:2) + // Storage: System Account (r:3 w:2) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - (115_185_000 as Weight) - .saturating_add(T::DbWeight::get().reads(13 as Weight)) + (118_236_000 as Weight) + .saturating_add(T::DbWeight::get().reads(14 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) @@ -108,7 +108,7 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - (132_723_000 as Weight) + (132_475_000 as Weight) .saturating_add(T::DbWeight::get().reads(14 as Weight)) .saturating_add(T::DbWeight::get().writes(13 as Weight)) } @@ -117,7 +117,7 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - (52_498_000 as Weight) + (50_299_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } @@ -136,7 +136,7 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - (121_645_000 as Weight) + (121_254_000 as Weight) .saturating_add(T::DbWeight::get().reads(18 as Weight)) .saturating_add(T::DbWeight::get().writes(13 as Weight)) } @@ -146,9 +146,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - (43_320_000 as Weight) + (41_928_000 as Weight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -162,9 +162,9 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - (83_195_000 as Weight) - // Standard Error: 5_000 - .saturating_add((57_000 as Weight).saturating_mul(s as Weight)) + (81_611_000 as Weight) + // Standard Error: 1_000 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(7 as Weight)) } @@ -189,7 +189,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - (143_495_000 as Weight) + (139_849_000 as Weight) .saturating_add(T::DbWeight::get().reads(19 as Weight)) .saturating_add(T::DbWeight::get().writes(16 as Weight)) } @@ -216,7 +216,7 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - (127_998_000 as Weight) + (126_246_000 as Weight) .saturating_add(T::DbWeight::get().reads(22 as Weight)) .saturating_add(T::DbWeight::get().writes(15 as Weight)) } @@ -234,9 +234,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - (49_929_000 as Weight) - // Standard Error: 16_000 - .saturating_add((2_319_000 as Weight).saturating_mul(n as Weight)) + (48_829_000 as Weight) + // Standard Error: 10_000 + .saturating_add((2_204_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(12 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(5 as Weight)) @@ -244,7 +244,7 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - (27_399_000 as Weight) + (26_761_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -253,7 +253,7 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - (14_813_000 as Weight) + (14_519_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) @@ -265,12 +265,12 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - (6_115_000 as Weight) + (6_173_000 as Weight) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - (22_546_000 as Weight) + (22_261_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -283,7 +283,7 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (48_243_000 as Weight) + (47_959_000 as Weight) .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } @@ -295,7 +295,7 @@ impl WeightInfo for () { // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) - // Storage: NominationPools RewardPools (r:1 w:0) + // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) @@ -305,22 +305,22 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn join() -> Weight { - (124_508_000 as Weight) + (123_947_000 as Weight) .saturating_add(RocksDbWeight::get().reads(17 as Weight)) - .saturating_add(RocksDbWeight::get().writes(11 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) - // Storage: System Account (r:2 w:2) + // Storage: System Account (r:3 w:2) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - (115_185_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(13 as Weight)) + (118_236_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(14 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) @@ -333,7 +333,7 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - (132_723_000 as Weight) + (132_475_000 as Weight) .saturating_add(RocksDbWeight::get().reads(14 as Weight)) .saturating_add(RocksDbWeight::get().writes(13 as Weight)) } @@ -342,7 +342,7 @@ impl WeightInfo for () { // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - (52_498_000 as Weight) + (50_299_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } @@ -361,7 +361,7 @@ impl WeightInfo for () { // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - (121_645_000 as Weight) + (121_254_000 as Weight) .saturating_add(RocksDbWeight::get().reads(18 as Weight)) .saturating_add(RocksDbWeight::get().writes(13 as Weight)) } @@ -371,9 +371,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - (43_320_000 as Weight) + (41_928_000 as Weight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -387,9 +387,9 @@ impl WeightInfo for () { // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - (83_195_000 as Weight) - // Standard Error: 5_000 - .saturating_add((57_000 as Weight).saturating_mul(s as Weight)) + (81_611_000 as Weight) + // Standard Error: 1_000 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) .saturating_add(RocksDbWeight::get().writes(7 as Weight)) } @@ -414,7 +414,7 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - (143_495_000 as Weight) + (139_849_000 as Weight) .saturating_add(RocksDbWeight::get().reads(19 as Weight)) .saturating_add(RocksDbWeight::get().writes(16 as Weight)) } @@ -441,7 +441,7 @@ impl WeightInfo for () { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - (127_998_000 as Weight) + (126_246_000 as Weight) .saturating_add(RocksDbWeight::get().reads(22 as Weight)) .saturating_add(RocksDbWeight::get().writes(15 as Weight)) } @@ -459,9 +459,9 @@ impl WeightInfo for () { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - (49_929_000 as Weight) - // Standard Error: 16_000 - .saturating_add((2_319_000 as Weight).saturating_mul(n as Weight)) + (48_829_000 as Weight) + // Standard Error: 10_000 + .saturating_add((2_204_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(12 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) @@ -469,7 +469,7 @@ impl WeightInfo for () { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - (27_399_000 as Weight) + (26_761_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -478,7 +478,7 @@ impl WeightInfo for () { // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - (14_813_000 as Weight) + (14_519_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) @@ -490,12 +490,12 @@ impl WeightInfo for () { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - (6_115_000 as Weight) + (6_173_000 as Weight) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - (22_546_000 as Weight) + (22_261_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -508,7 +508,7 @@ impl WeightInfo for () { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (48_243_000 as Weight) + (47_959_000 as Weight) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index be7752ce113b4..3b9350f25040b 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -26,6 +26,7 @@ use pallet_nomination_pools::{ PoolState, }; use pallet_staking::{CurrentEra, Event as StakingEvent, Payee, RewardDestination}; +use sp_runtime::traits::Zero; #[test] fn pool_lifecycle_e2e() { @@ -296,7 +297,7 @@ fn pool_slash_e2e() { PoolMember { pool_id: 1, points: 0, - reward_pool_total_earnings: 0, + last_recorded_reward_counter: Zero::zero(), // the 10 points unlocked just now correspond to 5 points in the unbond pool. unbonding_eras: bounded_btree_map!(5 => 10, 6 => 5) } @@ -351,7 +352,7 @@ fn pool_slash_e2e() { PoolMember { pool_id: 1, points: 0, - reward_pool_total_earnings: 0, + last_recorded_reward_counter: Zero::zero(), unbonding_eras: bounded_btree_map!(4 => 10, 5 => 10, 9 => 10) } ); diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index d13d8aa341c12..852b2c319f376 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -16,8 +16,17 @@ // limitations under the License. use frame_election_provider_support::VoteWeight; -use frame_support::{assert_ok, pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; -use sp_runtime::traits::{Convert, IdentityLookup}; +use frame_support::{ + assert_ok, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, ConstU8}, + PalletId, +}; +use sp_runtime::{ + traits::{Convert, IdentityLookup}, + FixedU128, +}; type AccountId = u128; type AccountIndex = u32; @@ -159,13 +168,15 @@ impl pallet_nomination_pools::Config for Runtime { type Event = Event; type WeightInfo = (); type Currency = Balances; + type CurrencyBalance = Balance; + type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; type StakingInterface = Staking; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; - type MinPointsToBalance = ConstU32<10>; + type MaxPointsToBalance = ConstU8<10>; type PalletId = PoolsPalletId; } diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index c09a33cf3f16a..f77f92c625c9d 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -472,9 +472,10 @@ pub enum State { #[clap(short, long)] snapshot_path: Option, - /// The pallets to scrape. If empty, entire chain state will be scraped. + /// A pallet to scrape. Can be provided multiple times. If empty, entire chain state will + /// be scraped. #[clap(short, long, multiple_values = true)] - pallets: Vec, + pallet: Vec, /// Fetch the child-keys as well. /// @@ -498,7 +499,7 @@ impl State { Builder::::new().mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path), })), - State::Live { snapshot_path, pallets, uri, at, child_tree } => { + State::Live { snapshot_path, pallet, uri, at, child_tree } => { let at = match at { Some(at_str) => Some(hash_of::(at_str)?), None => None, @@ -507,7 +508,7 @@ impl State { .mode(Mode::Online(OnlineConfig { transport: uri.to_owned().into(), state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), - pallets: pallets.clone(), + pallets: pallet.clone(), scrape_children: true, at, })) From 49b06901eb65f2c61ff0934d66987fd955d5b8f5 Mon Sep 17 00:00:00 2001 From: Emre Surmeli Date: Thu, 14 Jul 2022 04:21:21 -0400 Subject: [PATCH 035/135] Fix typo (#11832) --- frame/election-provider-multi-phase/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 2f1f6463df719..e1d3cb8ed5dee 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -51,7 +51,7 @@ //! In the signed phase, solutions (of type [`RawSolution`]) are submitted and queued on chain. A //! deposit is reserved, based on the size of the solution, for the cost of keeping this solution //! on-chain for a number of blocks, and the potential weight of the solution upon being checked. A -//! maximum of `pallet::Config::MaxSignedSubmissions` solutions are stored. The queue is always +//! maximum of `pallet::Config::SignedMaxSubmissions` solutions are stored. The queue is always //! sorted based on score (worse to best). //! //! Upon arrival of a new solution: From 1f0253c4c5fe70213fc2a53e78b5e9648e71d2c7 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 14 Jul 2022 10:57:59 +0100 Subject: [PATCH 036/135] Fix nomination pools unbonding logic (#11746) * make pool roles optional * undo lock file changes? * add migration * add the ability for pools to chill themselves * boilerplate of tests * somewhat stable, but I think I found another bug as well * Fix it all * Add more more sophisticated test + capture one more bug. * Update frame/staking/src/lib.rs * reduce the diff a little bit * add some test for the slashing bug * cleanup * fix lock file? * Fix * fmt * Update frame/nomination-pools/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/mock.rs Co-authored-by: Oliver Tale-Yazdi * Fix build * fix some fishy tests.. * add one last integrity check for MinCreateBond * remove bad assertion -- needs to be dealt with later * nits * fix tests and add benchmarks for chill * remove stuff * fix benchmarks * cargo run --quiet --profile=production --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark pallet --chain=dev --steps=50 --repeat=20 --pallet=pallet_nomination_pools --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/nomination-pools/src/weights.rs --template=./.maintain/frame-weight-template.hbs * remove defensive * first working version * bring back all tests * ALL new tests work now * cleanup * make sure benchmarks and all work * cargo run --quiet --profile=production --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark pallet --chain=dev --steps=50 --repeat=20 --pallet=pallet_nomination_pools --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/nomination-pools/src/weights.rs --template=./.maintain/frame-weight-template.hbs * round of self-review, make arithmetic safe * fix warn * add migration code * Fix doc * add precision notes * make arithmetic fallible * fix node runtime * a lot of precision tests and notes and stuff * document MaxPOintsToBalance better * :round of self-review * fmt * fix some comments * new logic, some broken tests * Check if after unbonding remaining balance is more or equal to MinJoinBond and is not zero * incorporate nikos' work * make it work again * merge * Fix all tests * fix all tests * some updates * Add tests * remove erroneoysly placed comment * Try to make lint pass * Try to make lint pass * revamp the tests for unbond * fix docs * Fix proportional slashing logic * Update frame/nomination-pools/src/tests.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/tests.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * track poinst in migration * fix * fmt * fix migration * remove event read * Apply suggestions from code review * Update frame/nomination-pools/src/lib.rs Co-authored-by: Shawn Tabrizi * remove log * Update frame/staking/src/lib.rs Co-authored-by: Shawn Tabrizi * Update frame/nomination-pools/src/lib.rs Co-authored-by: Shawn Tabrizi * Update frame/nomination-pools/src/lib.rs Co-authored-by: Shawn Tabrizi * update * fmt * fmt * add one last test * fmrt * Update frame/nomination-pools/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/tests.rs Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Shawn Tabrizi Co-authored-by: Parity Bot Co-authored-by: wirednkod --- frame/nomination-pools/src/lib.rs | 135 +++-- frame/nomination-pools/src/mock.rs | 16 +- frame/nomination-pools/src/tests.rs | 480 ++++++++++++++---- .../nomination-pools/test-staking/src/lib.rs | 6 +- 4 files changed, 464 insertions(+), 173 deletions(-) diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 53e1c48a5e39e..514267e8bf4af 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -469,7 +469,7 @@ impl PoolMember { self.points = new_points; Ok(()) } else { - Err(Error::::NotEnoughPointsToUnbond) + Err(Error::::MinimumBondNotMet) } } @@ -781,45 +781,60 @@ impl BondedPool { let is_depositor = *target_account == self.roles.depositor; let is_full_unbond = unbonding_points == target_member.active_points(); + let balance_after_unbond = { + let new_depositor_points = + target_member.active_points().saturating_sub(unbonding_points); + let mut target_member_after_unbond = (*target_member).clone(); + target_member_after_unbond.points = new_depositor_points; + target_member_after_unbond.active_balance() + }; + // any partial unbonding is only ever allowed if this unbond is permissioned. ensure!( is_permissioned || is_full_unbond, Error::::PartialUnbondNotAllowedPermissionlessly ); + // any unbond must comply with the balance condition: + ensure!( + is_full_unbond || + balance_after_unbond >= + if is_depositor { + Pallet::::depositor_min_bond() + } else { + MinJoinBond::::get() + }, + Error::::MinimumBondNotMet + ); + + // additional checks: match (is_permissioned, is_depositor) { - // If the pool is blocked, then an admin with kicking permissions can remove a - // member. If the pool is being destroyed, anyone can remove a member + (true, false) => (), + (true, true) => { + // permission depositor unbond: if destroying and pool is empty, always allowed, + // with no additional limits. + if self.is_destroying_and_only_depositor(target_member.active_points()) { + // everything good, let them unbond anything. + } else { + // depositor cannot fully unbond yet. + ensure!(!is_full_unbond, Error::::MinimumBondNotMet); + } + }, (false, false) => { + // If the pool is blocked, then an admin with kicking permissions can remove a + // member. If the pool is being destroyed, anyone can remove a member + debug_assert!(is_full_unbond); ensure!( self.can_kick(caller) || self.is_destroying(), Error::::NotKickerOrDestroying ) }, - // Any member who is not the depositor can always unbond themselves - (true, false) => (), - (_, true) => { - if self.is_destroying_and_only_depositor(target_member.active_points()) { - // if the pool is about to be destroyed, anyone can unbond the depositor, and - // they can fully unbond. - } else { - // only the depositor can partially unbond, and they can only unbond up to the - // threshold. - ensure!(is_permissioned, Error::::DoesNotHavePermission); - let balance_after_unbond = { - let new_depositor_points = - target_member.active_points().saturating_sub(unbonding_points); - let mut depositor_after_unbond = (*target_member).clone(); - depositor_after_unbond.points = new_depositor_points; - depositor_after_unbond.active_balance() - }; - ensure!( - balance_after_unbond >= MinCreateBond::::get(), - Error::::NotOnlyPoolMember - ); - } + (false, true) => { + // the depositor can simply not be unbonded permissionlessly, period. + return Err(Error::::DoesNotHavePermission.into()) }, }; + Ok(()) } @@ -830,25 +845,14 @@ impl BondedPool { &self, caller: &T::AccountId, target_account: &T::AccountId, - target_member: &PoolMember, - sub_pools: &SubPools, ) -> Result<(), DispatchError> { - if *target_account == self.roles.depositor { - ensure!( - sub_pools.sum_unbonding_points() == target_member.unbonding_points(), - Error::::NotOnlyPoolMember - ); - debug_assert_eq!(self.member_counter, 1, "only member must exist at this point"); - Ok(()) - } else { - // This isn't a depositor - let is_permissioned = caller == target_account; - ensure!( - is_permissioned || self.can_kick(caller) || self.is_destroying(), - Error::::NotKickerOrDestroying - ); - Ok(()) - } + // This isn't a depositor + let is_permissioned = caller == target_account; + ensure!( + is_permissioned || self.can_kick(caller) || self.is_destroying(), + Error::::NotKickerOrDestroying + ); + Ok(()) } /// Bond exactly `amount` from `who`'s funds into this pool. @@ -1100,15 +1104,6 @@ impl SubPools { self } - /// The sum of all unbonding points, regardless of whether they are actually unlocked or not. - fn sum_unbonding_points(&self) -> BalanceOf { - self.no_era.points.saturating_add( - self.with_era - .values() - .fold(BalanceOf::::zero(), |acc, pool| acc.saturating_add(pool.points)), - ) - } - /// The sum of all unbonding balance, regardless of whether they are actually unlocked or not. #[cfg(any(test, debug_assertions))] fn sum_unbonding_balance(&self) -> BalanceOf { @@ -1419,15 +1414,16 @@ pub mod pallet { /// None of the funds can be withdrawn yet because the bonding duration has not passed. CannotWithdrawAny, /// The amount does not meet the minimum bond to either join or create a pool. + /// + /// The depositor can never unbond to a value less than + /// `Pallet::depositor_min_bond`. The caller does not have nominating + /// permissions for the pool. Members can never unbond to a value below `MinJoinBond`. MinimumBondNotMet, /// The transaction could not be executed due to overflow risk for the pool. OverflowRisk, /// A pool must be in [`PoolState::Destroying`] in order for the depositor to unbond or for /// other members to be permissionlessly unbonded. NotDestroying, - /// The depositor must be the only member in the bonded pool in order to unbond. And the - /// depositor must be the only member in the sub pools in order to withdraw unbonded. - NotOnlyPoolMember, /// The caller does not have nominating permissions for the pool. NotNominator, /// Either a) the caller cannot make a valid kick or b) the pool is not destroying. @@ -1447,8 +1443,6 @@ pub mod pallet { /// Some error occurred that should never happen. This should be reported to the /// maintainers. Defensive(DefensiveError), - /// Not enough points. Ty unbonding less. - NotEnoughPointsToUnbond, /// Partial unbonding now allowed permissionlessly. PartialUnbondNotAllowedPermissionlessly, } @@ -1758,12 +1752,7 @@ pub mod pallet { let mut sub_pools = SubPoolsStorage::::get(member.pool_id) .defensive_ok_or::>(DefensiveError::SubPoolsNotFound.into())?; - bonded_pool.ok_to_withdraw_unbonded_with( - &caller, - &member_account, - &member, - &sub_pools, - )?; + bonded_pool.ok_to_withdraw_unbonded_with(&caller, &member_account)?; // NOTE: must do this after we have done the `ok_to_withdraw_unbonded_other_with` check. let withdrawn_points = member.withdraw_unlocked(current_era); @@ -1878,13 +1867,7 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!( - amount >= - T::StakingInterface::minimum_bond() - .max(MinCreateBond::::get()) - .max(MinJoinBond::::get()), - Error::::MinimumBondNotMet - ); + ensure!(amount >= Pallet::::depositor_min_bond(), Error::::MinimumBondNotMet); ensure!( MaxPools::::get() .map_or(true, |max_pools| BondedPools::::count() < max_pools), @@ -2162,6 +2145,18 @@ pub mod pallet { } impl Pallet { + /// The amount of bond that MUST REMAIN IN BONDED in ALL POOLS. + /// + /// It is the responsibility of the depositor to put these funds into the pool initially. Upon + /// unbond, they can never unbond to a value below this amount. + /// + /// It is essentially `max { MinNominatorBond, MinCreateBond, MinJoinBond }`, where the former + /// is coming from the staking pallet and the latter two are configured in this pallet. + fn depositor_min_bond() -> BalanceOf { + T::StakingInterface::minimum_bond() + .max(MinCreateBond::::get()) + .max(MinJoinBond::::get()) + } /// Remove everything related to the given bonded pool. /// /// All sub-pools are also deleted. All accounts are dusted and the leftover of the reward diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index 042a0b666efb1..5138c55afccac 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -22,6 +22,7 @@ pub fn default_reward_account() -> AccountId { } parameter_types! { + pub static MinJoinBondConfig: Balance = 2; pub static CurrentEra: EraIndex = 0; pub static BondingDuration: EraIndex = 3; pub storage BondedBalanceMap: BTreeMap = Default::default(); @@ -245,6 +246,11 @@ impl ExtBuilder { self } + pub(crate) fn min_join_bond(self, min: Balance) -> Self { + MinJoinBondConfig::set(min); + self + } + pub(crate) fn with_check(self, level: u8) -> Self { CheckLevel::set(level); self @@ -261,11 +267,12 @@ impl ExtBuilder { } pub(crate) fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); let _ = crate::GenesisConfig:: { - min_join_bond: 2, + min_join_bond: MinJoinBondConfig::get(), min_create_bond: 2, max_pools: Some(2), max_members_per_pool: self.max_members_per_pool, @@ -280,8 +287,8 @@ impl ExtBuilder { frame_system::Pallet::::set_block_number(1); // make a pool - let amount_to_bond = ::StakingInterface::minimum_bond(); - Balances::make_free_balance_be(&10, amount_to_bond * 2); + let amount_to_bond = Pools::depositor_min_bond(); + Balances::make_free_balance_be(&10, amount_to_bond * 5); assert_ok!(Pools::create(RawOrigin::Signed(10).into(), amount_to_bond, 900, 901, 902)); let last_pool = LastPoolId::::get(); @@ -302,12 +309,13 @@ impl ExtBuilder { } } -pub(crate) fn unsafe_set_state(pool_id: PoolId, state: PoolState) -> Result<(), ()> { +pub(crate) fn unsafe_set_state(pool_id: PoolId, state: PoolState) { BondedPools::::try_mutate(pool_id, |maybe_bonded_pool| { maybe_bonded_pool.as_mut().ok_or(()).map(|bonded_pool| { bonded_pool.state = state; }) }) + .unwrap() } parameter_types! { diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 9989893d86462..c971239bef507 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -538,13 +538,13 @@ mod join { StakingMock::set_bonded_balance(Pools::create_bonded_account(1), max_points_to_balance); // Cannot join a pool that isn't open - unsafe_set_state(123, PoolState::Blocked).unwrap(); + unsafe_set_state(123, PoolState::Blocked); assert_noop!( Pools::join(Origin::signed(11), max_points_to_balance, 123), Error::::NotOpen ); - unsafe_set_state(123, PoolState::Destroying).unwrap(); + unsafe_set_state(123, PoolState::Destroying); assert_noop!( Pools::join(Origin::signed(11), max_points_to_balance, 123), Error::::NotOpen @@ -1824,7 +1824,8 @@ mod claim_payout { fn rewards_are_rounded_down_depositor_collects_them() { ExtBuilder::default().add_members(vec![(20, 20)]).build_and_execute(|| { // initial balance of 10. - assert_eq!(Balances::free_balance(&10), 5); + + assert_eq!(Balances::free_balance(&10), 35); assert_eq!( Balances::free_balance(&default_reward_account()), Balances::minimum_balance() @@ -1875,7 +1876,7 @@ mod claim_payout { ); // original ed + ed put into reward account + reward + bond + dust. - assert_eq!(Balances::free_balance(&10), 5 + 5 + 13 + 10 + 1); + assert_eq!(Balances::free_balance(&10), 35 + 5 + 13 + 10 + 1); }) } @@ -1942,10 +1943,269 @@ mod claim_payout { mod unbond { use super::*; + #[test] + fn member_unbond_open() { + // depositor in pool, pool state open + // - member unbond above limit + // - member unbonds to 0 + // - member cannot unbond between within limit and 0 + ExtBuilder::default() + .min_join_bond(10) + .add_members(vec![(20, 20)]) + .build_and_execute(|| { + // can unbond to above limit + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 15); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 5); + + // cannot go to below 10: + assert_noop!( + Pools::unbond(Origin::signed(20), 20, 10), + Error::::MinimumBondNotMet + ); + + // but can go to 0 + assert_ok!(Pools::unbond(Origin::signed(20), 20, 15)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); + }) + } + + #[test] + fn member_kicked() { + // depositor in pool, pool state blocked + // - member cannot be kicked to above limit + // - member cannot be kicked between within limit and 0 + // - member kicked to 0 + ExtBuilder::default() + .min_join_bond(10) + .add_members(vec![(20, 20)]) + .build_and_execute(|| { + unsafe_set_state(1, PoolState::Blocked); + let kicker = DEFAULT_ROLES.state_toggler.unwrap(); + + // cannot be kicked to above the limit. + assert_noop!( + Pools::unbond(Origin::signed(kicker), 20, 5), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // cannot go to below 10: + assert_noop!( + Pools::unbond(Origin::signed(kicker), 20, 15), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // but they themselves can do an unbond + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 18); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 2); + + // can be kicked to 0. + assert_ok!(Pools::unbond(Origin::signed(kicker), 20, 18)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); + }) + } + + #[test] + fn member_unbond_destroying() { + // depositor in pool, pool state destroying + // - member cannot be permissionlessly unbonded to above limit + // - member cannot be permissionlessly unbonded between within limit and 0 + // - member permissionlessly unbonded to 0 + ExtBuilder::default() + .min_join_bond(10) + .add_members(vec![(20, 20)]) + .build_and_execute(|| { + unsafe_set_state(1, PoolState::Destroying); + let random = 123; + + // cannot be kicked to above the limit. + assert_noop!( + Pools::unbond(Origin::signed(random), 20, 5), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // cannot go to below 10: + assert_noop!( + Pools::unbond(Origin::signed(random), 20, 15), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // but they themselves can do an unbond + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 18); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 2); + + // but can go to 0 + assert_ok!(Pools::unbond(Origin::signed(random), 20, 18)); + assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); + }) + } + + #[test] + fn depositor_unbond_open() { + // depositor in pool, pool state open + // - depositor unbonds to above limit + // - depositor cannot unbond to below limit or 0 + ExtBuilder::default().min_join_bond(10).build_and_execute(|| { + // give the depositor some extra funds. + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_eq!(PoolMembers::::get(10).unwrap().points, 20); + + // can unbond to above the limit. + assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); + assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); + + // cannot go to below 10: + assert_noop!(Pools::unbond(Origin::signed(10), 10, 10), Error::::MinimumBondNotMet); + + // cannot go to 0 either. + assert_noop!(Pools::unbond(Origin::signed(10), 10, 15), Error::::MinimumBondNotMet); + }) + } + + #[test] + fn depositor_kick() { + // depositor in pool, pool state blocked + // - depositor can never be kicked. + ExtBuilder::default().min_join_bond(10).build_and_execute(|| { + // give the depositor some extra funds. + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_eq!(PoolMembers::::get(10).unwrap().points, 20); + + // set the stage + unsafe_set_state(1, PoolState::Blocked); + let kicker = DEFAULT_ROLES.state_toggler.unwrap(); + + // cannot be kicked to above limit. + assert_noop!( + Pools::unbond(Origin::signed(kicker), 10, 5), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // or below the limit + assert_noop!( + Pools::unbond(Origin::signed(kicker), 10, 15), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // or 0. + assert_noop!( + Pools::unbond(Origin::signed(kicker), 10, 20), + Error::::DoesNotHavePermission + ); + + // they themselves cannot do it either + assert_noop!(Pools::unbond(Origin::signed(10), 10, 20), Error::::MinimumBondNotMet); + }) + } + + #[test] + fn depositor_unbond_destroying_permissionless() { + // depositor can never be permissionlessly unbonded. + ExtBuilder::default().min_join_bond(10).build_and_execute(|| { + // give the depositor some extra funds. + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_eq!(PoolMembers::::get(10).unwrap().points, 20); + + // set the stage + unsafe_set_state(1, PoolState::Destroying); + let random = 123; + + // cannot be kicked to above limit. + assert_noop!( + Pools::unbond(Origin::signed(random), 10, 5), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // or below the limit + assert_noop!( + Pools::unbond(Origin::signed(random), 10, 15), + Error::::PartialUnbondNotAllowedPermissionlessly + ); + + // or 0. + assert_noop!( + Pools::unbond(Origin::signed(random), 10, 20), + Error::::DoesNotHavePermission + ); + + // they themselves can do it in this case though. + assert_ok!(Pools::unbond(Origin::signed(10), 10, 20)); + }) + } + + #[test] + fn depositor_unbond_destroying_not_last_member() { + // deposit in pool, pool state destroying + // - depositor can never leave if there is another member in the pool. + ExtBuilder::default() + .min_join_bond(10) + .add_members(vec![(20, 20)]) + .build_and_execute(|| { + // give the depositor some extra funds. + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_eq!(PoolMembers::::get(10).unwrap().points, 20); + + // set the stage + unsafe_set_state(1, PoolState::Destroying); + + // can go above the limit + assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); + assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); + + // but not below the limit + assert_noop!( + Pools::unbond(Origin::signed(10), 10, 10), + Error::::MinimumBondNotMet + ); + + // and certainly not zero + assert_noop!( + Pools::unbond(Origin::signed(10), 10, 15), + Error::::MinimumBondNotMet + ); + }) + } + + #[test] + fn depositor_unbond_destroying_last_member() { + // deposit in pool, pool state destroying + // - depositor can unbond to above limit always. + // - depositor cannot unbond to below limit if last. + // - depositor can unbond to 0 if last and destroying. + ExtBuilder::default().min_join_bond(10).build_and_execute(|| { + // give the depositor some extra funds. + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_eq!(PoolMembers::::get(10).unwrap().points, 20); + + // set the stage + unsafe_set_state(1, PoolState::Destroying); + + // can unbond to above the limit. + assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); + assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); + + // still cannot go to below limit + assert_noop!(Pools::unbond(Origin::signed(10), 10, 10), Error::::MinimumBondNotMet); + + // can go to 0 too. + assert_ok!(Pools::unbond(Origin::signed(10), 10, 15)); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 0); + assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 20); + }) + } + #[test] fn unbond_of_1_works() { ExtBuilder::default().build_and_execute(|| { - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); assert_ok!(fully_unbond_permissioned(10)); assert_eq!( @@ -2021,7 +2281,7 @@ mod unbond { assert_eq!(Balances::free_balance(&40), 40 + 40); // We claim rewards when unbonding // When - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); assert_ok!(fully_unbond_permissioned(550)); // Then @@ -2111,7 +2371,7 @@ mod unbond { }, }, ); - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); // When let current_era = 1 + TotalUnbondingPools::::get(); @@ -2148,7 +2408,7 @@ mod unbond { .add_members(vec![(100, 100), (200, 200)]) .build_and_execute(|| { // Given - unsafe_set_state(1, PoolState::Blocked).unwrap(); + unsafe_set_state(1, PoolState::Blocked); let bonded_pool = BondedPool::::get(1).unwrap(); assert_eq!(bonded_pool.roles.root.unwrap(), 900); assert_eq!(bonded_pool.roles.nominator.unwrap(), 901); @@ -2216,7 +2476,7 @@ mod unbond { // Scenarios where non-admin accounts can unbond others ExtBuilder::default().add_members(vec![(100, 100)]).build_and_execute(|| { // Given the pool is blocked - unsafe_set_state(1, PoolState::Blocked).unwrap(); + unsafe_set_state(1, PoolState::Blocked); // A permissionless unbond attempt errors assert_noop!( @@ -2231,16 +2491,17 @@ mod unbond { ); // Given the pool is destroying - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); // The depositor cannot be fully unbonded until they are the last member assert_noop!( Pools::fully_unbond(Origin::signed(10), 10), - Error::::NotOnlyPoolMember + Error::::MinimumBondNotMet, ); // Any account can unbond a member that is not the depositor assert_ok!(Pools::fully_unbond(Origin::signed(420), 100)); + assert_eq!( pool_events_since_last_call(), vec![ @@ -2258,7 +2519,7 @@ mod unbond { ); // Given the pool is blocked - unsafe_set_state(1, PoolState::Blocked).unwrap(); + unsafe_set_state(1, PoolState::Blocked); // The depositor cannot be unbonded assert_noop!( @@ -2267,7 +2528,7 @@ mod unbond { ); // Given the pools is destroying - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); // The depositor cannot be unbonded yet. assert_noop!( @@ -2285,8 +2546,13 @@ mod unbond { Error::::PartialUnbondNotAllowedPermissionlessly, ); - // but full unbond works. - assert_ok!(Pools::fully_unbond(Origin::signed(420), 10)); + // depositor can never be unbonded permissionlessly . + assert_noop!( + Pools::fully_unbond(Origin::signed(420), 10), + Error::::DoesNotHavePermission + ); + // but depositor itself can do it. + assert_ok!(Pools::fully_unbond(Origin::signed(10), 10)); assert_eq!(BondedPools::::get(1).unwrap().points, 0); assert_eq!( @@ -2346,6 +2612,12 @@ mod unbond { #[test] fn partial_unbond_era_tracking() { ExtBuilder::default().build_and_execute(|| { + // to make the depositor capable of withdrawing. + StakingMinBond::set(1); + MinCreateBond::::set(1); + MinJoinBond::::set(1); + assert_eq!(Pools::depositor_min_bond(), 1); + // given assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 10); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 0); @@ -2360,7 +2632,7 @@ mod unbond { assert_eq!(BondingDuration::get(), 3); // so the depositor can leave, just keeps the test simpler. - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); // when: casual unbond assert_ok!(Pools::unbond(Origin::signed(10), 10, 1)); @@ -2444,13 +2716,13 @@ mod unbond { ); // when: unbonding more than our active: error - assert_err!( + assert_noop!( frame_support::storage::in_storage_layer(|| Pools::unbond( Origin::signed(10), 10, 5 )), - Error::::NotEnoughPointsToUnbond + Error::::MinimumBondNotMet ); // instead: assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); @@ -2482,26 +2754,24 @@ mod unbond { #[test] fn partial_unbond_max_chunks() { - ExtBuilder::default().ed(1).build_and_execute(|| { - // so the depositor can leave, just keeps the test simpler. - unsafe_set_state(1, PoolState::Destroying).unwrap(); + ExtBuilder::default().add_members(vec![(20, 20)]).ed(1).build_and_execute(|| { MaxUnbonding::set(2); // given - assert_ok!(Pools::unbond(Origin::signed(10), 10, 2)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); CurrentEra::set(1); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 3)); assert_eq!( - PoolMembers::::get(10).unwrap().unbonding_eras, + PoolMembers::::get(20).unwrap().unbonding_eras, member_unbonding_eras!(3 => 2, 4 => 3) ); // when CurrentEra::set(2); - assert_err!( + assert_noop!( frame_support::storage::in_storage_layer(|| Pools::unbond( - Origin::signed(10), - 10, + Origin::signed(20), + 20, 4 )), Error::::MaxUnbondingLimit @@ -2509,30 +2779,35 @@ mod unbond { // when MaxUnbonding::set(3); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 1)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 1)); + assert_eq!( - PoolMembers::::get(10).unwrap().unbonding_eras, + PoolMembers::::get(20).unwrap().unbonding_eras, member_unbonding_eras!(3 => 2, 4 => 3, 5 => 1) ); + assert_eq!( pool_events_since_last_call(), vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 2, balance: 2 }, - Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3 }, - Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1 } + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::Unbonded { member: 20, pool_id: 1, balance: 2, points: 2 }, + Event::Unbonded { member: 20, pool_id: 1, balance: 3, points: 3 }, + Event::Unbonded { member: 20, pool_id: 1, balance: 1, points: 1 } ] ); }) } - // depositor can unbond inly up to `MinCreateBond`. + // depositor can unbond only up to `MinCreateBond`. #[test] fn depositor_permissioned_partial_unbond() { ExtBuilder::default().ed(1).build_and_execute(|| { // given - assert_eq!(MinCreateBond::::get(), 2); + StakingMinBond::set(5); + assert_eq!(Pools::depositor_min_bond(), 5); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 10); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 0); @@ -2544,7 +2819,7 @@ mod unbond { // but not less than 2 assert_noop!( Pools::unbond(Origin::signed(10), 10, 6), - Error::::NotOnlyPoolMember + Error::::MinimumBondNotMet ); assert_eq!( @@ -2558,7 +2833,6 @@ mod unbond { }); } - // same as above, but the pool is slashed and therefore the depositor cannot partially unbond. #[test] fn depositor_permissioned_partial_unbond_slashed() { ExtBuilder::default().ed(1).build_and_execute(|| { @@ -2573,78 +2847,69 @@ mod unbond { // cannot unbond even 7, because the value of shares is now less. assert_noop!( Pools::unbond(Origin::signed(10), 10, 7), - Error::::NotOnlyPoolMember - ); - assert_eq!( - pool_events_since_last_call(), - vec![ - Event::Created { depositor: 10, pool_id: 1 }, - Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - ] + Error::::MinimumBondNotMet ); }); } #[test] fn every_unbonding_triggers_payout() { - ExtBuilder::default().build_and_execute(|| { - let initial_reward_account = Balances::free_balance(Pools::create_reward_account(1)); + ExtBuilder::default().add_members(vec![(20, 20)]).build_and_execute(|| { + let initial_reward_account = Balances::free_balance(default_reward_account()); assert_eq!(initial_reward_account, Balances::minimum_balance()); assert_eq!(initial_reward_account, 5); - // set the pool to destroying so that depositor can leave. - unsafe_set_state(1, PoolState::Destroying).unwrap(); - Balances::make_free_balance_be( - &Pools::create_reward_account(1), - 2 * Balances::minimum_balance(), + &default_reward_account(), + 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 2)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); assert_eq!( pool_events_since_last_call(), vec![ + // 2/3 of ed, which is 20's share. Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - // exactly equal to ed, all that can be claimed. - Event::PaidOut { member: 10, pool_id: 1, payout: 5 }, - Event::Unbonded { member: 10, pool_id: 1, points: 2, balance: 2 } + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + Event::PaidOut { member: 20, pool_id: 1, payout: 10 }, + Event::Unbonded { member: 20, pool_id: 1, balance: 2, points: 2 } ] ); CurrentEra::set(1); Balances::make_free_balance_be( - &Pools::create_reward_account(1), - 2 * Balances::minimum_balance(), + &default_reward_account(), + 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 3)); assert_eq!( pool_events_since_last_call(), vec![ - // exactly equal to ed, all that can be claimed. - Event::PaidOut { member: 10, pool_id: 1, payout: 5 }, - Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3 } + // 2/3 of ed, which is 20's share. + Event::PaidOut { member: 20, pool_id: 1, payout: 6 }, + Event::Unbonded { member: 20, pool_id: 1, points: 3, balance: 3 } ] ); CurrentEra::set(2); Balances::make_free_balance_be( - &Pools::create_reward_account(1), - 2 * Balances::minimum_balance(), + &default_reward_account(), + 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); assert_eq!( pool_events_since_last_call(), vec![ - Event::PaidOut { member: 10, pool_id: 1, payout: 5 }, - Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5 } + Event::PaidOut { member: 20, pool_id: 1, payout: 3 }, + Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5 } ] ); assert_eq!( - PoolMembers::::get(10).unwrap().unbonding_eras, + PoolMembers::::get(20).unwrap().unbonding_eras, member_unbonding_eras!(3 => 2, 4 => 3, 5 => 5) ); }); @@ -2801,7 +3066,7 @@ mod withdraw_unbonded { ); // now, finally, the depositor can take out its share. - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); assert_ok!(fully_unbond_permissioned(10)); current_era += 3; @@ -2911,7 +3176,7 @@ mod withdraw_unbonded { assert!(SubPoolsStorage::::get(&1).unwrap().with_era.is_empty()); // now, finally, the depositor can take out its share. - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); assert_ok!(fully_unbond_permissioned(10)); // because everyone else has left, the points @@ -2926,7 +3191,7 @@ mod withdraw_unbonded { assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); // then - assert_eq!(Balances::free_balance(&10), 10 + 5); + assert_eq!(Balances::free_balance(&10), 10 + 35); assert_eq!(Balances::free_balance(&default_bonded_account()), 0); // in this test 10 also gets a fair share of the slash, because the slash was @@ -2955,9 +3220,9 @@ mod withdraw_unbonded { ExtBuilder::default().build_and_execute(|| { // Given assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(Balances::free_balance(&10), 5); + assert_eq!(Balances::free_balance(&10), 35); assert_eq!(Balances::free_balance(&default_bonded_account()), 10); - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); assert_ok!(Pools::fully_unbond(Origin::signed(10), 10)); // Simulate a slash that is not accounted for in the sub pools. @@ -2974,7 +3239,7 @@ mod withdraw_unbonded { assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); // Then - assert_eq!(Balances::free_balance(10), 10 + 5); + assert_eq!(Balances::free_balance(10), 10 + 35); assert_eq!(Balances::free_balance(&default_bonded_account()), 0); }); } @@ -3054,7 +3319,7 @@ mod withdraw_unbonded { ); // Given - unsafe_set_state(1, PoolState::Blocked).unwrap(); + unsafe_set_state(1, PoolState::Blocked); // Cannot kick as a nominator assert_noop!( @@ -3112,7 +3377,7 @@ mod withdraw_unbonded { ); // Given - unsafe_set_state(1, PoolState::Destroying).unwrap(); + unsafe_set_state(1, PoolState::Destroying); // Can permissionlesly withdraw a member that is not the depositor assert_ok!(Pools::withdraw_unbonded(Origin::signed(420), 100, 0)); @@ -3137,8 +3402,8 @@ mod withdraw_unbonded { #[test] fn partial_withdraw_unbonded_depositor() { ExtBuilder::default().ed(1).build_and_execute(|| { - // so the depositor can leave, just keeps the test simpler. - unsafe_set_state(1, PoolState::Destroying).unwrap(); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + unsafe_set_state(1, PoolState::Destroying); // given assert_ok!(Pools::unbond(Origin::signed(10), 10, 6)); @@ -3158,13 +3423,14 @@ mod withdraw_unbonded { } } ); - assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 3); + assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 13); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 7); assert_eq!( pool_events_since_last_call(), vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: false }, Event::Unbonded { member: 10, pool_id: 1, points: 6, balance: 6 }, Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1 } ] @@ -3368,50 +3634,72 @@ mod withdraw_unbonded { #[test] fn full_multi_step_withdrawing_depositor() { ExtBuilder::default().ed(1).build_and_execute(|| { - // given + // depositor now has 20, they can unbond to 10. + assert_eq!(Pools::depositor_min_bond(), 10); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + + // now they can. assert_ok!(Pools::unbond(Origin::signed(10), 10, 7)); // progress one era and unbond the leftover. CurrentEra::set(1); - unsafe_set_state(1, PoolState::Destroying).unwrap(); assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); + assert_eq!( PoolMembers::::get(10).unwrap().unbonding_eras, member_unbonding_eras!(3 => 7, 4 => 3) ); + // they can't unbond to a value below 10 other than 0.. assert_noop!( - Pools::withdraw_unbonded(Origin::signed(10), 10, 0), - Error::::CannotWithdrawAny + Pools::unbond(Origin::signed(10), 10, 5), + Error::::MinimumBondNotMet ); + // but not even full, because they pool is not yet destroying. + assert_noop!( + Pools::unbond(Origin::signed(10), 10, 10), + Error::::MinimumBondNotMet + ); + + // but now they can. + unsafe_set_state(1, PoolState::Destroying); + assert_noop!( + Pools::unbond(Origin::signed(10), 10, 5), + Error::::MinimumBondNotMet + ); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 10)); + // now the 7 should be free. CurrentEra::set(3); assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_eq!( pool_events_since_last_call(), vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 7, balance: 7 }, - Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3 }, - Event::Withdrawn { member: 10, pool_id: 1, points: 7, balance: 7 } + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: false }, + Event::Unbonded { member: 10, pool_id: 1, balance: 7, points: 7 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 3, points: 3 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10 }, + Event::Withdrawn { member: 10, pool_id: 1, balance: 7, points: 7 } ] ); assert_eq!( PoolMembers::::get(10).unwrap().unbonding_eras, - member_unbonding_eras!(4 => 3) + member_unbonding_eras!(4 => 13) ); - // the 25 should be free now, and the member removed. + // the 13 should be free now, and the member removed. CurrentEra::set(4); assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_eq!( pool_events_since_last_call(), vec![ - Event::Withdrawn { member: 10, pool_id: 1, points: 3, balance: 3 }, + Event::Withdrawn { member: 10, pool_id: 1, points: 13, balance: 13 }, Event::MemberRemoved { pool_id: 1, member: 10 }, - // the pool is also destroyed now. Event::Destroyed { pool_id: 1 }, ] ); @@ -3640,7 +3928,7 @@ mod set_state { // If the pool is not ok to be open, then anyone can set it to destroying // Given - unsafe_set_state(1, PoolState::Open).unwrap(); + unsafe_set_state(1, PoolState::Open); let mut bonded_pool = BondedPool::::get(1).unwrap(); bonded_pool.points = 100; bonded_pool.put(); @@ -3651,7 +3939,7 @@ mod set_state { // Given Balances::make_free_balance_be(&default_bonded_account(), Balance::max_value() / 10); - unsafe_set_state(1, PoolState::Open).unwrap(); + unsafe_set_state(1, PoolState::Open); // When assert_ok!(Pools::set_state(Origin::signed(11), 1, PoolState::Destroying)); // Then @@ -3659,7 +3947,7 @@ mod set_state { // If the pool is not ok to be open, it cannot be permissionleslly set to a state that // isn't destroying - unsafe_set_state(1, PoolState::Open).unwrap(); + unsafe_set_state(1, PoolState::Open); assert_noop!( Pools::set_state(Origin::signed(11), 1, PoolState::Blocked), Error::::CanNotChangeState @@ -3820,13 +4108,13 @@ mod bond_extra { // given assert_eq!(PoolMembers::::get(10).unwrap().points, 10); assert_eq!(BondedPools::::get(1).unwrap().points, 10); - assert_eq!(Balances::free_balance(10), 5); + assert_eq!(Balances::free_balance(10), 35); // when assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::Rewards)); // then - assert_eq!(Balances::free_balance(10), 5); + assert_eq!(Balances::free_balance(10), 35); assert_eq!(PoolMembers::::get(10).unwrap().points, 10 + claimable_reward); assert_eq!(BondedPools::::get(1).unwrap().points, 10 + claimable_reward); @@ -3862,14 +4150,14 @@ mod bond_extra { assert_eq!(PoolMembers::::get(10).unwrap().points, 10); assert_eq!(PoolMembers::::get(20).unwrap().points, 20); assert_eq!(BondedPools::::get(1).unwrap().points, 30); - assert_eq!(Balances::free_balance(10), 5); + assert_eq!(Balances::free_balance(10), 35); assert_eq!(Balances::free_balance(20), 20); // when assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::Rewards)); // then - assert_eq!(Balances::free_balance(10), 5); + assert_eq!(Balances::free_balance(10), 35); // 10's share of the reward is 1/3, since they gave 10/30 of the total shares. assert_eq!(PoolMembers::::get(10).unwrap().points, 10 + 1); assert_eq!(BondedPools::::get(1).unwrap().points, 30 + 1); diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index 3b9350f25040b..eed8e5fd390cd 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -72,7 +72,7 @@ fn pool_lifecycle_e2e() { // depositor cannot unbond yet. assert_noop!( Pools::unbond(Origin::signed(10), 10, 50), - PoolsError::::NotOnlyPoolMember, + PoolsError::::MinimumBondNotMet, ); // now the members want to unbond. @@ -103,7 +103,7 @@ fn pool_lifecycle_e2e() { // depositor cannot still unbond assert_noop!( Pools::unbond(Origin::signed(10), 10, 50), - PoolsError::::NotOnlyPoolMember, + PoolsError::::MinimumBondNotMet, ); for e in 1..BondingDuration::get() { @@ -120,7 +120,7 @@ fn pool_lifecycle_e2e() { // depositor cannot still unbond assert_noop!( Pools::unbond(Origin::signed(10), 10, 50), - PoolsError::::NotOnlyPoolMember, + PoolsError::::MinimumBondNotMet, ); // but members can now withdraw. From 37cca710eed3dadd4ed5364c7686608f5175cce1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 14 Jul 2022 20:14:28 +0200 Subject: [PATCH 037/135] contracts: Composable `ChainExtension` (#11816) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add `RegisteredChainExtension` * Add tests * Update frame/contracts/src/chain_extension.rs Co-authored-by: Michael Müller * Add more docs * Remove debugging leftover * Make ChainExtension-registry lowercase * Apply suggestions from code review Co-authored-by: Hernando Castano * Improve clarity of test inputs Co-authored-by: Michael Müller Co-authored-by: Hernando Castano --- Cargo.lock | 1 + frame/contracts/Cargo.toml | 1 + frame/contracts/fixtures/chain_extension.wat | 10 +- frame/contracts/src/chain_extension.rs | 59 ++++++- frame/contracts/src/tests.rs | 175 +++++++++++++++++-- 5 files changed, 216 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3b7c315be4390..791d8037f3335 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5505,6 +5505,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", + "impl-trait-for-tuples", "log", "pallet-balances", "pallet-contracts-primitives", diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index d27801df33bda..ac85c469354fe 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -26,6 +26,7 @@ smallvec = { version = "1", default-features = false, features = [ "const_generics", ] } wasmi-validation = { version = "0.4", default-features = false } +impl-trait-for-tuples = "0.2" # Only used in benchmarking to generate random contract code rand = { version = "0.8", optional = true, default-features = false } diff --git a/frame/contracts/fixtures/chain_extension.wat b/frame/contracts/fixtures/chain_extension.wat index db7e83fd96b42..9b2534c540ab8 100644 --- a/frame/contracts/fixtures/chain_extension.wat +++ b/frame/contracts/fixtures/chain_extension.wat @@ -15,12 +15,12 @@ ) ;; [0, 4) len of input output - (data (i32.const 0) "\02") + (data (i32.const 0) "\08") ;; [4, 12) buffer for input - ;; [12, 16) len of output buffer - (data (i32.const 12) "\02") + ;; [12, 48) len of output buffer + (data (i32.const 12) "\20") ;; [16, inf) buffer for output @@ -31,7 +31,7 @@ ;; the chain extension passes through the input and returns it as output (call $seal_call_chain_extension - (i32.load8_u (i32.const 4)) ;; func_id + (i32.load (i32.const 4)) ;; func_id (i32.const 4) ;; input_ptr (i32.load (i32.const 0)) ;; input_len (i32.const 16) ;; output_ptr @@ -39,7 +39,7 @@ ) ;; the chain extension passes through the func_id - (call $assert (i32.eq (i32.load8_u (i32.const 4)))) + (call $assert (i32.eq (i32.load (i32.const 4)))) (call $seal_return (i32.const 0) (i32.const 16) (i32.load (i32.const 12))) ) diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index ed447719933be..536d58c94f68f 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -29,6 +29,22 @@ //! required for this endeavour are defined or re-exported in this module. There is an //! implementation on `()` which can be used to signal that no chain extension is available. //! +//! # Using multiple chain extensions +//! +//! Often there is a need for having multiple chain extensions. This is often the case when +//! some generally useful off-the-shelf extensions should be included. To have multiple chain +//! extensions they can be put into a tuple which is then passed to `[Config::ChainExtension]` like +//! this `type Extensions = (ExtensionA, ExtensionB)`. +//! +//! However, only extensions implementing [`RegisteredChainExtension`] can be put into a tuple. +//! This is because the [`RegisteredChainExtension::ID`] is used to decide which of those extensions +//! should should be used when the contract calls a chain extensions. Extensions which are generally +//! useful should claim their `ID` with [the registry](https://github.com/paritytech/chainextension-registry) +//! so that no collisions with other vendors will occur. +//! +//! **Chain specific extensions must use the reserved `ID = 0` so that they can't be registered with +//! the registry.** +//! //! # Security //! //! The chain author alone is responsible for the security of the chain extension. @@ -112,20 +128,51 @@ pub trait ChainExtension { } } -/// Implementation that indicates that no chain extension is available. -impl ChainExtension for () { - fn call(_func_id: u32, mut _env: Environment) -> Result +/// A [`ChainExtension`] that can be composed with other extensions using a tuple. +/// +/// An extension that implements this trait can be put in a tuple in order to have multiple +/// extensions available. The tuple implementation routes requests based on the first two +/// most significant bytes of the `func_id` passed to `call`. +/// +/// If this extensions is to be used by multiple runtimes consider +/// [registering it](https://github.com/paritytech/chainextension-registry) to ensure that there +/// are no collisions with other vendors. +/// +/// # Note +/// +/// Currently, we support tuples of up to ten registred chain extensions. If more chain extensions +/// are needed consider opening an issue. +pub trait RegisteredChainExtension: ChainExtension { + /// The extensions globally unique identifier. + const ID: u16; +} + +#[impl_trait_for_tuples::impl_for_tuples(10)] +#[tuple_types_custom_trait_bound(RegisteredChainExtension)] +impl ChainExtension for Tuple { + fn call(func_id: u32, mut env: Environment) -> Result where E: Ext, ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { - // Never called since [`Self::enabled()`] is set to `false`. Because we want to - // avoid panics at all costs we supply a sensible error value here instead - // of an `unimplemented!`. + for_tuples!( + #( + if (Tuple::ID == (func_id >> 16) as u16) && Tuple::enabled() { + return Tuple::call(func_id, env); + } + )* + ); Err(Error::::NoChainExtension.into()) } fn enabled() -> bool { + for_tuples!( + #( + if Tuple::enabled() { + return true; + } + )* + ); false } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index bbac18142a658..85a0e9977d2d7 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -17,8 +17,8 @@ use crate::{ chain_extension::{ - ChainExtension, Environment, Ext, InitState, Result as ExtensionResult, RetVal, - ReturnFlags, SysConfig, UncheckedFrom, + ChainExtension, Environment, Ext, InitState, RegisteredChainExtension, + Result as ExtensionResult, RetVal, ReturnFlags, SysConfig, UncheckedFrom, }, exec::{FixSizedKey, Frame}, storage::Storage, @@ -118,6 +118,10 @@ pub struct TestExtension { last_seen_inputs: (u32, u32, u32, u32), } +pub struct RevertingExtension; + +pub struct DisabledExtension; + impl TestExtension { fn disable() { TEST_EXTENSION.with(|e| e.borrow_mut().enabled = false) @@ -147,7 +151,7 @@ impl ChainExtension for TestExtension { match func_id { 0 => { let mut env = env.buf_in_buf_out(); - let input = env.read(2)?; + let input = env.read(8)?; env.write(&input, false, None)?; TEST_EXTENSION.with(|e| e.borrow_mut().last_seen_buffer = input); Ok(RetVal::Converging(func_id)) @@ -162,7 +166,7 @@ impl ChainExtension for TestExtension { }, 2 => { let mut env = env.buf_in_buf_out(); - let weight = env.read(2)?[1].into(); + let weight = env.read(5)?[4].into(); env.charge_weight(weight)?; Ok(RetVal::Converging(func_id)) }, @@ -178,6 +182,46 @@ impl ChainExtension for TestExtension { } } +impl RegisteredChainExtension for TestExtension { + const ID: u16 = 0; +} + +impl ChainExtension for RevertingExtension { + fn call(_func_id: u32, _env: Environment) -> ExtensionResult + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, + { + Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![0x4B, 0x1D] }) + } + + fn enabled() -> bool { + TEST_EXTENSION.with(|e| e.borrow().enabled) + } +} + +impl RegisteredChainExtension for RevertingExtension { + const ID: u16 = 1; +} + +impl ChainExtension for DisabledExtension { + fn call(_func_id: u32, _env: Environment) -> ExtensionResult + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, + { + panic!("Disabled chain extensions are never called") + } + + fn enabled() -> bool { + false + } +} + +impl RegisteredChainExtension for DisabledExtension { + const ID: u16 = 2; +} + parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); @@ -281,7 +325,7 @@ impl Config for Test { type CallStack = [Frame; 31]; type WeightPrice = Self; type WeightInfo = (); - type ChainExtension = TestExtension; + type ChainExtension = (TestExtension, DisabledExtension, RevertingExtension); type DeletionQueueDepth = ConstU32<1024>; type DeletionWeightLimit = ConstU64<500_000_000_000>; type Schedule = MySchedule; @@ -1523,6 +1567,23 @@ fn disabled_chain_extension_errors_on_call() { #[test] fn chain_extension_works() { + struct Input<'a> { + extension_id: u16, + func_id: u16, + extra: &'a [u8], + } + + impl<'a> From> for Vec { + fn from(input: Input) -> Vec { + ((input.extension_id as u32) << 16 | (input.func_id as u32)) + .to_le_bytes() + .iter() + .chain(input.extra) + .cloned() + .collect() + } + } + let (code, hash) = compile_module::("chain_extension").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); @@ -1543,31 +1604,107 @@ fn chain_extension_works() { // func_id. // 0 = read input buffer and pass it through as output + let input: Vec = Input { extension_id: 0, func_id: 0, extra: &[99] }.into(); let result = - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![0, 99], false); - let gas_consumed = result.gas_consumed; - assert_eq!(TestExtension::last_seen_buffer(), vec![0, 99]); - assert_eq!(result.result.unwrap().data, Bytes(vec![0, 99])); + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); + assert_eq!(TestExtension::last_seen_buffer(), input); + assert_eq!(result.result.unwrap().data, Bytes(input)); // 1 = treat inputs as integer primitives and store the supplied integers - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![1], false) - .result - .unwrap(); + Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + Input { extension_id: 0, func_id: 1, extra: &[] }.into(), + false, + ) + .result + .unwrap(); // those values passed in the fixture - assert_eq!(TestExtension::last_seen_inputs(), (4, 1, 16, 12)); + assert_eq!(TestExtension::last_seen_inputs(), (4, 4, 16, 12)); - // 2 = charge some extra weight (amount supplied in second byte) - let result = - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![2, 42], false); + // 2 = charge some extra weight (amount supplied in the fifth byte) + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + Input { extension_id: 0, func_id: 2, extra: &[0] }.into(), + false, + ); + assert_ok!(result.result); + let gas_consumed = result.gas_consumed; + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + Input { extension_id: 0, func_id: 2, extra: &[42] }.into(), + false, + ); assert_ok!(result.result); assert_eq!(result.gas_consumed, gas_consumed + 42); + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + Input { extension_id: 0, func_id: 2, extra: &[95] }.into(), + false, + ); + assert_ok!(result.result); + assert_eq!(result.gas_consumed, gas_consumed + 95); // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer - let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![3], false) - .result - .unwrap(); + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + Input { extension_id: 0, func_id: 3, extra: &[] }.into(), + false, + ) + .result + .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); assert_eq!(result.data, Bytes(vec![42, 99])); + + // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer + // We set the MSB part to 1 (instead of 0) which routes the request into the second + // extension + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + Input { extension_id: 1, func_id: 0, extra: &[] }.into(), + false, + ) + .result + .unwrap(); + assert_eq!(result.flags, ReturnFlags::REVERT); + assert_eq!(result.data, Bytes(vec![0x4B, 0x1D])); + + // Diverging to third chain extension that is disabled + // We set the MSB part to 2 (instead of 0) which routes the request into the third extension + assert_err_ignore_postinfo!( + Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + None, + Input { extension_id: 2, func_id: 0, extra: &[] }.into(), + ), + Error::::NoChainExtension, + ); }); } From 4400af779a733417c5bba098e30fd2e809af465f Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 15 Jul 2022 09:42:40 +0200 Subject: [PATCH 038/135] [ci] fix job `cancel-pipeline` (#11844) * [ci] fix cancel-pipeline job * test fail * remove debug --- .gitlab-ci.yml | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b4933333b3418..fba7b7a9d393f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -234,23 +234,42 @@ rusty-cachier-notify: # This job cancels the whole pipeline if any of provided jobs fail. # In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests # to fail the pipeline as soon as possible to shorten the feedback loop. -cancel-pipeline: +.cancel-pipeline-template: stage: .post + rules: + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + when: on_failure + variables: + PROJECT_ID: "${CI_PROJECT_ID}" + PIPELINE_ID: "${CI_PIPELINE_ID}" + trigger: "parity/infrastructure/ci_cd/pipeline-stopper" + +cancel-pipeline-test-linux-stable: + extends: .cancel-pipeline-template needs: - job: test-linux-stable artifacts: false + +cancel-pipeline-test-linux-stable-int: + extends: .cancel-pipeline-template + needs: - job: test-linux-stable-int artifacts: false + +cancel-pipeline-cargo-check-subkey: + extends: .cancel-pipeline-template + needs: - job: cargo-check-subkey artifacts: false + +cancel-pipeline-cargo-check-benches: + extends: .cancel-pipeline-template + needs: - job: cargo-check-benches artifacts: false + +cancel-pipeline-check-tracing: + extends: .cancel-pipeline-template + needs: - job: check-tracing artifacts: false - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - when: on_failure - variables: - PROJECT_ID: "${CI_PROJECT_ID}" - PIPELINE_ID: "${CI_PIPELINE_ID}" - trigger: "parity/infrastructure/ci_cd/pipeline-stopper" From 6eca5272deb30ca1abe9782ce8918bfb9047728c Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 15 Jul 2022 10:58:53 +0300 Subject: [PATCH 039/135] Speed up `rusty-cachier-notify` job (#11841) * Speed up `rusty-cachier-notify` job * Don't forget to setup `rusty-cachier` first --- .gitlab-ci.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index fba7b7a9d393f..9e80277404549 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -223,10 +223,15 @@ deploy-prometheus-alerting-rules: # This job notifies rusty-cachier about the latest commit with the cache. # This info is later used for the cache distribution and an overlay creation. +# Note that we don't use any .rusty-cachier references as we assume that a pipeline has reached this stage with working rusty-cachier. rusty-cachier-notify: stage: notify - extends: .docker-env + extends: .kubernetes-env + variables: + CI_IMAGE: paritytech/rusty-cachier-env:latest + GIT_STRATEGY: none script: + - curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash - rusty-cachier cache notify #### stage: .post From 66d2023925e28d7bc880dad5d38b5a0b0ed03855 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 15 Jul 2022 10:44:46 +0200 Subject: [PATCH 040/135] [ci] improvments to make pipeline faster (#11829) * [DO NOT MERGE] test-linux-stable parallel on 3 ci nodes * add debug message * adjust rusty-cachier * empty commit * move test-linux-stable to test.yml * make cargo-check-benches and test-wasmer-sandbox parallel * fix comment * Update scripts/ci/gitlab/pipeline/test.yml Co-authored-by: Denis Pisarev * Update scripts/ci/gitlab/pipeline/test.yml Co-authored-by: Denis Pisarev * if to case * use case instead if in cargo-check-benches * format * add comments to output * add comment * add quotes * Update scripts/ci/gitlab/pipeline/test.yml Co-authored-by: Denis Pisarev Co-authored-by: parity-processbot <> Co-authored-by: Denis Pisarev --- .gitlab-ci.yml | 1 - scripts/ci/gitlab/pipeline/test.yml | 58 ++++++++++++++++++++++------- 2 files changed, 44 insertions(+), 15 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9e80277404549..fff11e07326d8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -196,7 +196,6 @@ include: # publish jobs - scripts/ci/gitlab/pipeline/publish.yml - #### stage: deploy deploy-prometheus-alerting-rules: diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index cc167410f94a4..dcec896ddc18b 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -74,7 +74,9 @@ cargo-check-nixos: cargo-check-benches: stage: test variables: - RUSTY_CACHIER_TOOLCHAIN: nightly + # Override to use nightly toolchain + RUSTY_CACHIER_TOOLCHAIN: "nightly" + CI_JOB_NAME: "cargo-check-benches" extends: - .docker-env - .test-refs @@ -91,16 +93,25 @@ cargo-check-benches: git config user.email "ci@gitlab.parity.io"; git merge $CI_COMMIT_REF_NAME --verbose --no-edit; fi + parallel: 2 script: - rusty-cachier snapshot create - mkdir -p ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA - - SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all - - 'cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json - | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::native::sr25519::transfer_keep_alive::paritydb::small.json' - - 'cargo run --release -p node-bench -- ::trie::read::small --json - | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json' - - sccache -s - - rusty-cachier cache upload + # this job is executed in parallel on two runners + - echo "___Running benchmarks___"; + - case ${CI_NODE_INDEX} in + 1) + SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all; + cargo run --release -p node-bench -- ::trie::read::small --json + | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json; + echo "___Uploading cache for rusty-cachier___"; + rusty-cachier cache upload + ;; + 2) + cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json + | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::native::sr25519::transfer_keep_alive::paritydb::small.json + ;; + esac tags: - linux-docker-benches @@ -116,7 +127,7 @@ node-bench-regression-guard: # this is a DAG - job: cargo-check-benches artifacts: true - # this does not like a DAG, just polls the artifact + # polls artifact from master to compare with current result - project: $CI_PROJECT_PATH job: cargo-check-benches ref: master @@ -213,14 +224,27 @@ test-linux-stable: WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" # Ensure we run the UI tests. RUN_UI_TESTS: 1 + # needed for rusty-cachier to keep cache in test-linux-stable folder and not in test-linux-stable-1/3 + CI_JOB_NAME: "test-linux-stable" + parallel: 3 script: - rusty-cachier snapshot create - # TODO: add to paritytech/ci-linux image + # TODO: remove when current paritytech/ci-linux:staging (with rust stable 1.62) is switched to production - time cargo install cargo-nextest # this job runs all tests in former runtime-benchmarks, frame-staking and wasmtime tests + # tests are partitioned by nextest and executed in parallel on $CI_NODE_TOTAL runners # node-cli is excluded until https://github.com/paritytech/substrate/issues/11321 fixed - - time cargo nextest run --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path ./bin/node/cli/Cargo.toml --exclude node-cli - - rusty-cachier cache upload + - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" + - time cargo nextest run --workspace + --locked + --release + --verbose + --features runtime-benchmarks + --manifest-path ./bin/node/cli/Cargo.toml + --exclude node-cli + --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} + # we need to update cache only from one job + - if [ ${CI_NODE_INDEX} == 1 ]; then rusty-cachier cache upload; fi test-frame-support: stage: test @@ -356,10 +380,16 @@ test-wasmer-sandbox: extends: - .docker-env - .test-refs-wasmer-sandbox + variables: + CI_JOB_NAME: "test-wasmer-sandbox" + parallel: 3 script: - rusty-cachier snapshot create - - time cargo test --release --features runtime-benchmarks,wasmer-sandbox,disable-ui-tests - - rusty-cachier cache upload + # TODO: remove when current paritytech/ci-linux:staging (with rust stable 1.62) is switched to production + - cargo install cargo-nextest + - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" + - time cargo nextest run --release --features runtime-benchmarks,wasmer-sandbox,disable-ui-tests --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} + - if [ ${CI_NODE_INDEX} == 1 ]; then rusty-cachier cache upload; fi cargo-check-macos: stage: test From 4a19d408095ebcb47840ad1f2f086f53a0020cb7 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Fri, 15 Jul 2022 13:55:18 +0200 Subject: [PATCH 041/135] Fixed sync target detection (#11817) * Fixed sync target detection * Always report sync_target metric * Clamp median across all peers --- client/network/sync/src/lib.rs | 9 +++++++-- client/service/src/metrics.rs | 7 ++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index aff773bd12ed6..2f837cd6c4f51 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -1719,7 +1719,7 @@ where Ok(sync) } - /// Returns the best seen block. + /// Returns the best seen block number if we don't have that block yet, `None` otherwise. fn best_seen(&self) -> Option> { let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); @@ -1729,7 +1729,12 @@ where let middle = best_seens.len() / 2; // Not the "perfect median" when we have an even number of peers. - Some(*best_seens.select_nth_unstable(middle).1) + let median = *best_seens.select_nth_unstable(middle).1; + if median > self.best_queued_number { + Some(median) + } else { + None + } } } diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 555023f894488..56b145ccdf7f7 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -298,9 +298,10 @@ impl MetricsService { UniqueSaturatedInto::::unique_saturated_into(num) }); - if let Some(best_seen_block) = best_seen_block { - metrics.block_height.with_label_values(&["sync_target"]).set(best_seen_block); - } + metrics + .block_height + .with_label_values(&["sync_target"]) + .set(best_seen_block.unwrap_or(best_number)); } } } From ddfd02ef2e71f3b80564ae8807aac5a46f6c7134 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Sun, 17 Jul 2022 00:24:46 +0300 Subject: [PATCH 042/135] Ignore boot and reserved nodes with peer ID the same as in local node itself (#11851) --- client/network/src/service.rs | 45 +++++++++++++++++++++++++++++++---- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index ef7ef2f5a2deb..0d28b50df1821 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -148,6 +148,47 @@ where /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. pub fn new(mut params: Params) -> Result { + // Private and public keys configuration. + let local_identity = params.network_config.node_key.clone().into_keypair()?; + let local_public = local_identity.public(); + let local_peer_id = local_public.to_peer_id(); + + params.network_config.boot_nodes = params + .network_config + .boot_nodes + .into_iter() + .filter(|boot_node| { + if boot_node.peer_id == local_peer_id { + warn!( + target: "sub-libp2p", + "Local peer ID used in bootnode, ignoring: {}", + boot_node, + ); + false + } else { + true + } + }) + .collect(); + params.network_config.default_peers_set.reserved_nodes = params + .network_config + .default_peers_set + .reserved_nodes + .into_iter() + .filter(|reserved_node| { + if reserved_node.peer_id == local_peer_id { + warn!( + target: "sub-libp2p", + "Local peer ID used in reserved node, ignoring: {}", + reserved_node, + ); + false + } else { + true + } + }) + .collect(); + // Ensure the listen addresses are consistent with the transport. ensure_addresses_consistent_with_transport( params.network_config.listen_addresses.iter(), @@ -190,10 +231,6 @@ where .extra_sets .insert(0, transactions_handler_proto.set_config()); - // Private and public keys configuration. - let local_identity = params.network_config.node_key.clone().into_keypair()?; - let local_public = local_identity.public(); - let local_peer_id = local_public.to_peer_id(); info!( target: "sub-libp2p", "🏷 Local node identity is: {}", From 6f652840cc78629ac149bf36d23ab34345c65331 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Sun, 17 Jul 2022 00:38:38 +0300 Subject: [PATCH 043/135] removed evaluation.rs (#11766) * removed evaluation.rs * no need for parent_hash * fix --- .../basic-authorship/src/basic_authorship.rs | 12 +-- primitives/consensus/common/src/evaluation.rs | 74 ------------------- primitives/consensus/common/src/lib.rs | 1 - 3 files changed, 1 insertion(+), 86 deletions(-) delete mode 100644 primitives/consensus/common/src/evaluation.rs diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index b67008bc6f44b..bc328c40edb3c 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -34,9 +34,7 @@ use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; -use sp_consensus::{ - evaluation, DisableProofRecording, EnableProofRecording, ProofRecording, Proposal, -}; +use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal}; use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use sp_runtime::{ @@ -205,7 +203,6 @@ where let proposer = Proposer::<_, _, _, _, PR> { spawn_handle: self.spawn_handle.clone(), client: self.client.clone(), - parent_hash, parent_id: id, parent_number: *parent_header.number(), transaction_pool: self.transaction_pool.clone(), @@ -250,7 +247,6 @@ where pub struct Proposer { spawn_handle: Box, client: Arc, - parent_hash: ::Hash, parent_id: BlockId, parent_number: <::Header as HeaderT>::Number, transaction_pool: Arc, @@ -536,12 +532,6 @@ where "hash" => ?::Hash::from(block.header().hash()), ); - if let Err(err) = - evaluation::evaluate_initial(&block, &self.parent_hash, self.parent_number) - { - error!("Failed to evaluate authored block: {:?}", err); - } - let proof = PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; diff --git a/primitives/consensus/common/src/evaluation.rs b/primitives/consensus/common/src/evaluation.rs deleted file mode 100644 index d1ce8e9fc5109..0000000000000 --- a/primitives/consensus/common/src/evaluation.rs +++ /dev/null @@ -1,74 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Block evaluation and evaluation errors. - -use codec::Encode; -use sp_runtime::traits::{Block as BlockT, CheckedConversion, Header as HeaderT, One}; - -// This is just a best effort to encode the number. None indicated that it's too big to encode -// in a u128. -type BlockNumber = Option; - -/// Result type alias. -pub type Result = std::result::Result; - -/// Error type. -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error("Failed to verify block encoding/decoding")] - BadBlockCodec, - /// Proposal provided not a block. - #[error("Proposal provided not a block: decoding error: {0}")] - BadProposalFormat(#[from] codec::Error), - /// Proposal had wrong parent hash. - #[error("Proposal had wrong parent hash. Expected {expected:?}, got {got:?}")] - WrongParentHash { expected: String, got: String }, - /// Proposal had wrong number. - #[error("Proposal had wrong number. Expected {expected:?}, got {got:?}")] - WrongNumber { expected: BlockNumber, got: BlockNumber }, -} - -/// Attempt to evaluate a substrate block as a node block, returning error -/// upon any initial validity checks failing. -pub fn evaluate_initial( - proposal: &Block, - parent_hash: &::Hash, - parent_number: <::Header as HeaderT>::Number, -) -> Result<()> { - let encoded = Encode::encode(proposal); - let block = Block::decode(&mut &encoded[..]).map_err(Error::BadProposalFormat)?; - if &block != proposal { - return Err(Error::BadBlockCodec) - } - - if *parent_hash != *block.header().parent_hash() { - return Err(Error::WrongParentHash { - expected: format!("{:?}", *parent_hash), - got: format!("{:?}", block.header().parent_hash()), - }) - } - - if parent_number + One::one() != *block.header().number() { - return Err(Error::WrongNumber { - expected: parent_number.checked_into::().map(|x| x + 1), - got: (*block.header().number()).checked_into::(), - }) - } - - Ok(()) -} diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 2743f434c209b..4539cec2c8e0a 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -33,7 +33,6 @@ use sp_state_machine::StorageProof; pub mod block_validation; pub mod error; -pub mod evaluation; mod select_chain; pub use self::error::Error; From 62a7f2a7db20516673af63884fc83ced4f5d29cf Mon Sep 17 00:00:00 2001 From: lumir-mrkva Date: Sun, 17 Jul 2022 21:28:32 +0200 Subject: [PATCH 044/135] assert noop notifies that storage has been mutated (#11805) --- frame/support/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index f2cd7d1e165ec..8e43df82a284c 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -679,7 +679,7 @@ macro_rules! assert_noop { ) => { let h = $crate::storage_root($crate::StateVersion::V1); $crate::assert_err!($x, $y); - assert_eq!(h, $crate::storage_root($crate::StateVersion::V1)); + assert_eq!(h, $crate::storage_root($crate::StateVersion::V1), "storage has been mutated"); }; } @@ -826,6 +826,7 @@ pub mod tests { pub struct Module for enum Call where origin: T::Origin, system=self {} } } + use self::module::Module; decl_storage! { @@ -851,6 +852,7 @@ pub mod tests { } struct Test; + impl Config for Test { type BlockNumber = u32; type Origin = u32; @@ -867,6 +869,7 @@ pub mod tests { trait Sorted { fn sorted(self) -> Self; } + impl Sorted for Vec { fn sorted(mut self) -> Self { self.sort(); From d2c1c5f36faf0950bd0de467a2d38682eab1dc38 Mon Sep 17 00:00:00 2001 From: yjh Date: Mon, 18 Jul 2022 06:43:04 +0800 Subject: [PATCH 045/135] arrange node-template service (#11795) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * make template more clear * fmt check_equivocation Co-authored-by: Bastian Köcher --- bin/node-template/node/src/service.rs | 34 ++++++++++++------------ client/consensus/slots/src/aux_schema.rs | 6 ++--- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index f45f914d94f44..ffb2440caa0ed 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -296,24 +296,24 @@ pub fn new_full(mut config: Configuration) -> Result .spawn_blocking("aura", Some("block-authoring"), aura); } - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = - if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; - - let grandpa_config = sc_finality_grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - local_role: role, - telemetry: telemetry.as_ref().map(|x| x.handle()), - protocol_name: grandpa_protocol_name, - }; - if enable_grandpa { + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = + if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; + + let grandpa_config = sc_finality_grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore, + local_role: role, + telemetry: telemetry.as_ref().map(|x| x.handle()), + protocol_name: grandpa_protocol_name, + }; + // start the full GRANDPA voter // NOTE: non-authorities could run the GRANDPA observer protocol, but at // this point the full voter should provide better guarantees of block diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index eeaec68d369d2..c1d01500ffe47 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -89,8 +89,8 @@ where // 1) signed by the same voter, if prev_signer == signer { // 2) with different hash - if header.hash() != prev_header.hash() { - return Ok(Some(EquivocationProof { + return if header.hash() != prev_header.hash() { + Ok(Some(EquivocationProof { slot, offender: signer.clone(), first_header: prev_header.clone(), @@ -100,7 +100,7 @@ where // We don't need to continue in case of duplicated header, // since it's already saved and a possible equivocation // would have been detected before. - return Ok(None) + Ok(None) } } } From 64b0daa64d983738229af08f130f20aafc371a17 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 18 Jul 2022 00:57:13 +0200 Subject: [PATCH 046/135] Expose and link trie migration weights (#11775) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * expose and link trie migration weights * actually limit to one trait * Run test on dummy weights * fmt Co-authored-by: Bastian Köcher --- frame/state-trie-migration/src/lib.rs | 69 +++++++++++++-------------- 1 file changed, 33 insertions(+), 36 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 9ac128a0e3108..94f6c1f223b9c 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -56,6 +56,7 @@ #![cfg_attr(not(feature = "std"), no_std)] pub use pallet::*; +pub mod weights; const LOG_TARGET: &str = "runtime::state-trie-migration"; @@ -71,6 +72,9 @@ macro_rules! log { #[frame_support::pallet] pub mod pallet { + + pub use crate::weights::WeightInfo; + use frame_support::{ dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, ensure, @@ -90,41 +94,6 @@ pub mod pallet { pub(crate) type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; - /// The weight information of this pallet. - pub trait WeightInfo { - fn process_top_key(x: u32) -> Weight; - fn continue_migrate() -> Weight; - fn continue_migrate_wrong_witness() -> Weight; - fn migrate_custom_top_fail() -> Weight; - fn migrate_custom_top_success() -> Weight; - fn migrate_custom_child_fail() -> Weight; - fn migrate_custom_child_success() -> Weight; - } - - impl WeightInfo for () { - fn process_top_key(_: u32) -> Weight { - 1000000 - } - fn continue_migrate() -> Weight { - 1000000 - } - fn continue_migrate_wrong_witness() -> Weight { - 1000000 - } - fn migrate_custom_top_fail() -> Weight { - 1000000 - } - fn migrate_custom_top_success() -> Weight { - 1000000 - } - fn migrate_custom_child_fail() -> Weight { - 1000000 - } - fn migrate_custom_child_success() -> Weight { - 1000000 - } - } - /// The progress of either the top or child keys. #[derive( CloneNoBound, @@ -1072,6 +1041,7 @@ mod mock { use frame_support::{ parameter_types, traits::{ConstU32, ConstU64, Hooks}, + weights::Weight, }; use frame_system::{EnsureRoot, EnsureSigned}; use sp_core::{ @@ -1148,6 +1118,33 @@ mod mock { type WeightInfo = (); } + /// Test only Weights for state migration. + pub struct StateMigrationTestWeight; + + impl WeightInfo for StateMigrationTestWeight { + fn process_top_key(_: u32) -> Weight { + 1000000 + } + fn continue_migrate() -> Weight { + 1000000 + } + fn continue_migrate_wrong_witness() -> Weight { + 1000000 + } + fn migrate_custom_top_fail() -> Weight { + 1000000 + } + fn migrate_custom_top_success() -> Weight { + 1000000 + } + fn migrate_custom_child_fail() -> Weight { + 1000000 + } + fn migrate_custom_child_success() -> Weight { + 1000000 + } + } + impl pallet_state_trie_migration::Config for Test { type Event = Event; type ControlOrigin = EnsureRoot; @@ -1156,7 +1153,7 @@ mod mock { type SignedDepositPerItem = SignedDepositPerItem; type SignedDepositBase = SignedDepositBase; type SignedFilter = EnsureSigned; - type WeightInfo = (); + type WeightInfo = StateMigrationTestWeight; } pub fn new_test_ext( From 8016da9ddf1c10a5a74cc5a273905cfeb60ed82c Mon Sep 17 00:00:00 2001 From: Koute Date: Mon, 18 Jul 2022 20:51:38 +0900 Subject: [PATCH 047/135] Improve `wasmtime` error reporting (#11856) * Improve `wasmtime` error reporting * cargo fmt --- client/executor/wasmtime/src/imports.rs | 2 +- .../executor/wasmtime/src/instance_wrapper.rs | 7 +++--- client/executor/wasmtime/src/runtime.rs | 24 +++++++++---------- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index fae8d75854ef3..c80952a2541ce 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -112,7 +112,7 @@ impl<'a, 'b> sp_wasm_interface::HostFunctionRegistry for Registry<'a, 'b> { if self.pending_func_imports.remove(fn_name).is_some() { self.linker.func_wrap("env", fn_name, func).map_err(|error| { WasmError::Other(format!( - "failed to register host function '{}' with the WASM linker: {}", + "failed to register host function '{}' with the WASM linker: {:#}", fn_name, error )) })?; diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 6abcbca1bba6f..5d272accd3524 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -186,9 +186,10 @@ impl InstanceWrapper { ) -> Result { let mut store = create_store(engine, max_memory_size); let instance = instance_pre.instantiate(&mut store).map_err(|error| { - WasmError::Other( - format!("failed to instantiate a new WASM module instance: {}", error,), - ) + WasmError::Other(format!( + "failed to instantiate a new WASM module instance: {:#}", + error, + )) })?; let memory = get_linear_memory(&instance, &mut store)?; diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index c63a802df07cc..fb5c6cb16993a 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -257,12 +257,12 @@ fn setup_wasmtime_caching( let wasmtime_cache_root = cache_path.join("wasmtime"); fs::create_dir_all(&wasmtime_cache_root) - .map_err(|err| format!("cannot create the dirs to cache: {:?}", err))?; + .map_err(|err| format!("cannot create the dirs to cache: {}", err))?; // Canonicalize the path after creating the directories. let wasmtime_cache_root = wasmtime_cache_root .canonicalize() - .map_err(|err| format!("failed to canonicalize the path: {:?}", err))?; + .map_err(|err| format!("failed to canonicalize the path: {}", err))?; // Write the cache config file let cache_config_path = wasmtime_cache_root.join("cache-config.toml"); @@ -275,11 +275,11 @@ directory = \"{cache_dir}\" cache_dir = wasmtime_cache_root.display() ); fs::write(&cache_config_path, config_content) - .map_err(|err| format!("cannot write the cache config: {:?}", err))?; + .map_err(|err| format!("cannot write the cache config: {}", err))?; config .cache_config_load(cache_config_path) - .map_err(|err| format!("failed to parse the config: {:?}", err))?; + .map_err(|err| format!("failed to parse the config: {:#}", err))?; Ok(()) } @@ -304,14 +304,14 @@ fn common_config(semantics: &Semantics) -> std::result::Result { @@ -626,7 +626,7 @@ where let serialized_blob = blob.clone().serialize(); let module = wasmtime::Module::new(&engine, &serialized_blob) - .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; + .map_err(|e| WasmError::Other(format!("cannot create module: {:#}", e)))?; match config.semantics.instantiation_strategy { InstantiationStrategy::LegacyInstanceReuse => { @@ -664,7 +664,7 @@ where // // See [`create_runtime_from_artifact`] for more details. let module = wasmtime::Module::deserialize_file(&engine, compiled_artifact_path) - .map_err(|e| WasmError::Other(format!("cannot deserialize module: {}", e)))?; + .map_err(|e| WasmError::Other(format!("cannot deserialize module: {:#}", e)))?; (module, InternalInstantiationStrategy::Builtin) }, @@ -677,7 +677,7 @@ where crate::instance_wrapper::create_store(module.engine(), config.semantics.max_memory_size); let instance_pre = linker .instantiate_pre(&mut store, &module) - .map_err(|e| WasmError::Other(format!("cannot preinstantiate module: {}", e)))?; + .map_err(|e| WasmError::Other(format!("cannot preinstantiate module: {:#}", e)))?; Ok(WasmtimeRuntime { engine, @@ -729,11 +729,11 @@ pub fn prepare_runtime_artifact( let blob = prepare_blob_for_compilation(blob, &semantics)?; let engine = Engine::new(&common_config(&semantics)?) - .map_err(|e| WasmError::Other(format!("cannot create the engine: {}", e)))?; + .map_err(|e| WasmError::Other(format!("cannot create the engine: {:#}", e)))?; engine .precompile_module(&blob.serialize()) - .map_err(|e| WasmError::Other(format!("cannot precompile module: {}", e))) + .map_err(|e| WasmError::Other(format!("cannot precompile module: {:#}", e))) } fn perform_call( From 32bc42ba64f88a8ea65734ba9a404779fafbc0df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torsten=20St=C3=BCber?= Date: Mon, 18 Jul 2022 14:56:22 +0200 Subject: [PATCH 048/135] Fix invalid state transitions in pallet-bounties (#11630) * Pallet-bounty: disallow invalid state transitions * Fix tests: funding only at even block numbers * Fix benchmarks: bounties need to be funded * fix on_initialize Co-authored-by: Shawn Tabrizi --- frame/bounties/src/lib.rs | 2 +- frame/bounties/src/tests.rs | 8 ++++---- frame/child-bounties/src/benchmarking.rs | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index 8e28cacb9d929..fca758fd96b8e 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -390,7 +390,7 @@ pub mod pallet { Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; match bounty.status { - BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => {}, + BountyStatus::Funded => {}, _ => return Err(Error::::UnexpectedStatus.into()), }; diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index ff220600794d4..b4ce039b35fbc 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -1126,8 +1126,8 @@ fn accept_curator_handles_different_deposit_calculations() { assert_ok!(Bounties::propose_bounty(Origin::signed(0), value, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(Origin::root(), bounty_index)); - System::set_block_number(3); - >::on_initialize(3); + System::set_block_number(4); + >::on_initialize(4); assert_ok!(Bounties::propose_curator(Origin::root(), bounty_index, user, fee)); assert_ok!(Bounties::accept_curator(Origin::signed(user), bounty_index)); @@ -1150,8 +1150,8 @@ fn accept_curator_handles_different_deposit_calculations() { assert_ok!(Bounties::propose_bounty(Origin::signed(0), value, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(Origin::root(), bounty_index)); - System::set_block_number(3); - >::on_initialize(3); + System::set_block_number(6); + >::on_initialize(6); assert_ok!(Bounties::propose_curator(Origin::root(), bounty_index, user, fee)); assert_ok!(Bounties::accept_curator(Origin::signed(user), bounty_index)); diff --git a/frame/child-bounties/src/benchmarking.rs b/frame/child-bounties/src/benchmarking.rs index dcb54361fac89..ca5af50276b9d 100644 --- a/frame/child-bounties/src/benchmarking.rs +++ b/frame/child-bounties/src/benchmarking.rs @@ -221,7 +221,7 @@ benchmarks! { unassign_curator { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - Bounties::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(T::BlockNumber::zero()); frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), bounty_setup.bounty_id, @@ -295,7 +295,7 @@ benchmarks! { close_child_bounty_active { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - Bounties::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(T::BlockNumber::zero()); }: close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, bounty_setup.child_bounty_id) verify { assert_last_event::(Event::Canceled { From a434d07ab18982eceff528c6e7f1e95a6f71bea8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 18 Jul 2022 21:25:18 +0200 Subject: [PATCH 049/135] Disable the interest cache (#11854) * Disable the interest cache The feature is currently broken with the latest `tracing-core`. We will enable it again, when it is fixed upstream. * FMT --- Cargo.lock | 8 +++----- client/tracing/Cargo.toml | 2 +- client/tracing/src/logging/mod.rs | 5 +---- primitives/io/Cargo.toml | 2 +- primitives/runtime-interface/test/Cargo.toml | 2 +- primitives/tracing/Cargo.toml | 2 +- 6 files changed, 8 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 791d8037f3335..279864e056b0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11066,11 +11066,11 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.26" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" +checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" dependencies = [ - "lazy_static", + "once_cell", "valuable", ] @@ -11090,10 +11090,8 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ - "ahash", "lazy_static", "log", - "lru", "tracing-core", ] diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 3f0bbbe922d3f..476e03ee741f3 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -26,7 +26,7 @@ rustc-hash = "1.1.0" serde = "1.0.136" thiserror = "1.0.30" tracing = "0.1.29" -tracing-log = { version = "0.1.3", features = ["interest-cache"] } +tracing-log = "0.1.3" tracing-subscriber = { version = "0.2.25", features = ["parking_lot"] } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-rpc-server = { version = "4.0.0-dev", path = "../rpc-servers" } diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index 33c83dd87189e..58941617bfb6a 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -155,10 +155,7 @@ where let max_level_hint = Layer::::max_level_hint(&env_filter); let max_level = to_log_level_filter(max_level_hint); - tracing_log::LogTracer::builder() - .with_max_level(max_level) - .with_interest_cache(tracing_log::InterestCacheConfig::default()) - .init()?; + tracing_log::LogTracer::builder().with_max_level(max_level).init()?; // If we're only logging `INFO` entries then we'll use a simplified logging format. let detailed_output = match max_level_hint { diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 09a087d509416..03cd8535665ac 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -32,7 +32,7 @@ futures = { version = "0.3.21", features = ["thread-pool"], optional = true } parking_lot = { version = "0.12.0", optional = true } secp256k1 = { version = "0.21.2", features = ["recovery", "global-context"], optional = true } tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.26", default-features = false} +tracing-core = { version = "0.1.28", default-features = false} [features] default = ["std"] diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index e897fc0bab71c..880d03902b421 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] tracing = "0.1.29" -tracing-core = "0.1.26" +tracing-core = "0.1.28" sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } sc-executor-common = { version = "0.10.0-dev", path = "../../../client/executor/common" } sp-io = { version = "6.0.0", path = "../../io" } diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index d305b756e2d68..c2ca57d2b5a43 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -23,7 +23,7 @@ codec = { version = "3.0.0", package = "parity-scale-codec", default-features = "derive", ] } tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.26", default-features = false } +tracing-core = { version = "0.1.28", default-features = false } tracing-subscriber = { version = "0.2.25", optional = true, features = [ "tracing-log", ] } From ced41695aafa648ebdb01305e563fa7ca89756bc Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 19 Jul 2022 08:10:15 +0200 Subject: [PATCH 050/135] Add `benchmark extrinsic` command (#11456) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Benchmark extrinsic Signed-off-by: Oliver Tale-Yazdi * Reduce warmup and repeat Running this 1000 times with a full block takes ~33 minutes 🙈. Reducing it to ~3 minutes per default. Signed-off-by: Oliver Tale-Yazdi * Make ExistentialDeposit public Signed-off-by: Oliver Tale-Yazdi * Add 'bechmark extrinsic' command Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Add --list and cleanup Signed-off-by: Oliver Tale-Yazdi * Unrelated Clippy Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * Fix tests and doc Signed-off-by: Oliver Tale-Yazdi * Move implementations up + fmt Signed-off-by: Oliver Tale-Yazdi * Dont use parameter_types macro Signed-off-by: Oliver Tale-Yazdi * Cache to_lowercase() call The .to_lowercase() on the builder is actually not needes since its already documented to only return lower case. Signed-off-by: Oliver Tale-Yazdi * Spelling Signed-off-by: Oliver Tale-Yazdi * Use correct nightly for fmt... Signed-off-by: Oliver Tale-Yazdi * Rename ED Signed-off-by: Oliver Tale-Yazdi * Fix compile Signed-off-by: Oliver Tale-Yazdi * Subtract block base weight Signed-off-by: Oliver Tale-Yazdi * Fixes Signed-off-by: Oliver Tale-Yazdi * Use tmp folder for test This should already be the case since --dev is passed but somehow not... Signed-off-by: Oliver Tale-Yazdi * Fix test Signed-off-by: Oliver Tale-Yazdi --- .../{command_helper.rs => benchmarking.rs} | 66 ++++++++- bin/node-template/node/src/command.rs | 26 +++- bin/node-template/node/src/main.rs | 2 +- bin/node-template/runtime/src/lib.rs | 5 +- .../{command_helper.rs => benchmarking.rs} | 70 +++++++-- bin/node/cli/src/command.rs | 23 ++- bin/node/cli/src/lib.rs | 4 +- .../cli/tests/benchmark_extrinsic_works.rs | 46 ++++++ .../src/{overhead => extrinsic}/bench.rs | 107 +++++++------- .../benchmarking-cli/src/extrinsic/cmd.rs | 134 ++++++++++++++++++ .../src/extrinsic/extrinsic_factory.rs | 70 +++++++++ .../benchmarking-cli/src/extrinsic/mod.rs | 27 ++++ utils/frame/benchmarking-cli/src/lib.rs | 7 +- .../benchmarking-cli/src/overhead/cmd.rs | 52 +++++-- .../benchmarking-cli/src/overhead/mod.rs | 5 +- .../benchmarking-cli/src/overhead/template.rs | 2 +- 16 files changed, 537 insertions(+), 109 deletions(-) rename bin/node-template/node/src/{command_helper.rs => benchmarking.rs} (71%) rename bin/node/cli/src/{command_helper.rs => benchmarking.rs} (52%) create mode 100644 bin/node/cli/tests/benchmark_extrinsic_works.rs rename utils/frame/benchmarking-cli/src/{overhead => extrinsic}/bench.rs (70%) create mode 100644 utils/frame/benchmarking-cli/src/extrinsic/cmd.rs create mode 100644 utils/frame/benchmarking-cli/src/extrinsic/extrinsic_factory.rs create mode 100644 utils/frame/benchmarking-cli/src/extrinsic/mod.rs diff --git a/bin/node-template/node/src/command_helper.rs b/bin/node-template/node/src/benchmarking.rs similarity index 71% rename from bin/node-template/node/src/command_helper.rs rename to bin/node-template/node/src/benchmarking.rs index 287e81b1e96bd..f0e32104cd3ee 100644 --- a/bin/node-template/node/src/command_helper.rs +++ b/bin/node-template/node/src/benchmarking.rs @@ -16,13 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Contains code to setup the command invocations in [`super::command`] which would -//! otherwise bloat that module. +//! Setup code for [`super::command`] which would otherwise bloat that module. +//! +//! Should only be used for benchmarking as it may break in other contexts. use crate::service::FullClient; use node_template_runtime as runtime; -use runtime::SystemCall; +use runtime::{AccountId, Balance, BalancesCall, SystemCall}; use sc_cli::Result; use sc_client_api::BlockBackend; use sp_core::{Encode, Pair}; @@ -35,19 +36,27 @@ use std::{sync::Arc, time::Duration}; /// Generates extrinsics for the `benchmark overhead` command. /// /// Note: Should only be used for benchmarking. -pub struct BenchmarkExtrinsicBuilder { +pub struct RemarkBuilder { client: Arc, } -impl BenchmarkExtrinsicBuilder { +impl RemarkBuilder { /// Creates a new [`Self`] from the given client. pub fn new(client: Arc) -> Self { Self { client } } } -impl frame_benchmarking_cli::ExtrinsicBuilder for BenchmarkExtrinsicBuilder { - fn remark(&self, nonce: u32) -> std::result::Result { +impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder { + fn pallet(&self) -> &str { + "system" + } + + fn extrinsic(&self) -> &str { + "remark" + } + + fn build(&self, nonce: u32) -> std::result::Result { let acc = Sr25519Keyring::Bob.pair(); let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic( self.client.as_ref(), @@ -61,6 +70,49 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for BenchmarkExtrinsicBuilder { } } +/// Generates `Balances::TransferKeepAlive` extrinsics for the benchmarks. +/// +/// Note: Should only be used for benchmarking. +pub struct TransferKeepAliveBuilder { + client: Arc, + dest: AccountId, + value: Balance, +} + +impl TransferKeepAliveBuilder { + /// Creates a new [`Self`] from the given client. + pub fn new(client: Arc, dest: AccountId, value: Balance) -> Self { + Self { client, dest, value } + } +} + +impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { + fn pallet(&self) -> &str { + "balances" + } + + fn extrinsic(&self) -> &str { + "transfer_keep_alive" + } + + fn build(&self, nonce: u32) -> std::result::Result { + let acc = Sr25519Keyring::Bob.pair(); + let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic( + self.client.as_ref(), + acc, + BalancesCall::transfer_keep_alive { + dest: self.dest.clone().into(), + value: self.value.into(), + } + .into(), + nonce, + ) + .into(); + + Ok(extrinsic) + } +} + /// Create a transaction using the given `call`. /// /// Note: Should only be used for benchmarking. diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index e3e10007929e6..142f0b40c325e 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -1,14 +1,14 @@ use crate::{ + benchmarking::{inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder}, chain_spec, cli::{Cli, Subcommand}, - command_helper::{inherent_benchmark_data, BenchmarkExtrinsicBuilder}, service, }; -use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; -use node_template_runtime::Block; +use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE}; +use node_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; -use std::sync::Arc; +use sp_keyring::Sr25519Keyring; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -137,9 +137,23 @@ pub fn run() -> sc_cli::Result<()> { }, BenchmarkCmd::Overhead(cmd) => { let PartialComponents { client, .. } = service::new_partial(&config)?; - let ext_builder = BenchmarkExtrinsicBuilder::new(client.clone()); + let ext_builder = RemarkBuilder::new(client.clone()); - cmd.run(config, client, inherent_benchmark_data()?, Arc::new(ext_builder)) + cmd.run(config, client, inherent_benchmark_data()?, &ext_builder) + }, + BenchmarkCmd::Extrinsic(cmd) => { + let PartialComponents { client, .. } = service::new_partial(&config)?; + // Register the *Remark* and *TKA* builders. + let ext_factory = ExtrinsicFactory(vec![ + Box::new(RemarkBuilder::new(client.clone())), + Box::new(TransferKeepAliveBuilder::new( + client.clone(), + Sr25519Keyring::Alice.to_account_id(), + EXISTENTIAL_DEPOSIT, + )), + ]); + + cmd.run(client, inherent_benchmark_data()?, &ext_factory) }, BenchmarkCmd::Machine(cmd) => cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), diff --git a/bin/node-template/node/src/main.rs b/bin/node-template/node/src/main.rs index 0f2fbd5a909c6..426cbabb6fbf7 100644 --- a/bin/node-template/node/src/main.rs +++ b/bin/node-template/node/src/main.rs @@ -4,9 +4,9 @@ mod chain_spec; #[macro_use] mod service; +mod benchmarking; mod cli; mod command; -mod command_helper; mod rpc; fn main() -> sc_cli::Result<()> { diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index c514cdf6c25fd..a2f595f571a90 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -234,6 +234,9 @@ impl pallet_timestamp::Config for Runtime { type WeightInfo = (); } +/// Existential deposit. +pub const EXISTENTIAL_DEPOSIT: u128 = 500; + impl pallet_balances::Config for Runtime { type MaxLocks = ConstU32<50>; type MaxReserves = (); @@ -243,7 +246,7 @@ impl pallet_balances::Config for Runtime { /// The ubiquitous event type. type Event = Event; type DustRemoval = (); - type ExistentialDeposit = ConstU128<500>; + type ExistentialDeposit = ConstU128; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; } diff --git a/bin/node/cli/src/command_helper.rs b/bin/node/cli/src/benchmarking.rs similarity index 52% rename from bin/node/cli/src/command_helper.rs rename to bin/node/cli/src/benchmarking.rs index 84d85ee367cab..52657016c046a 100644 --- a/bin/node/cli/src/command_helper.rs +++ b/bin/node/cli/src/benchmarking.rs @@ -16,12 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Contains code to setup the command invocations in [`super::command`] which would -//! otherwise bloat that module. +//! Setup code for [`super::command`] which would otherwise bloat that module. +//! +//! Should only be used for benchmarking as it may break in other contexts. use crate::service::{create_extrinsic, FullClient}; -use node_runtime::SystemCall; +use node_primitives::{AccountId, Balance}; +use node_runtime::{BalancesCall, SystemCall}; use sc_cli::Result; use sp_inherents::{InherentData, InherentDataProvider}; use sp_keyring::Sr25519Keyring; @@ -29,20 +31,30 @@ use sp_runtime::OpaqueExtrinsic; use std::{sync::Arc, time::Duration}; -/// Generates extrinsics for the `benchmark overhead` command. -pub struct BenchmarkExtrinsicBuilder { +/// Generates `System::Remark` extrinsics for the benchmarks. +/// +/// Note: Should only be used for benchmarking. +pub struct RemarkBuilder { client: Arc, } -impl BenchmarkExtrinsicBuilder { +impl RemarkBuilder { /// Creates a new [`Self`] from the given client. pub fn new(client: Arc) -> Self { Self { client } } } -impl frame_benchmarking_cli::ExtrinsicBuilder for BenchmarkExtrinsicBuilder { - fn remark(&self, nonce: u32) -> std::result::Result { +impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder { + fn pallet(&self) -> &str { + "system" + } + + fn extrinsic(&self) -> &str { + "remark" + } + + fn build(&self, nonce: u32) -> std::result::Result { let acc = Sr25519Keyring::Bob.pair(); let extrinsic: OpaqueExtrinsic = create_extrinsic( self.client.as_ref(), @@ -56,6 +68,48 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for BenchmarkExtrinsicBuilder { } } +/// Generates `Balances::TransferKeepAlive` extrinsics for the benchmarks. +/// +/// Note: Should only be used for benchmarking. +pub struct TransferKeepAliveBuilder { + client: Arc, + dest: AccountId, + value: Balance, +} + +impl TransferKeepAliveBuilder { + /// Creates a new [`Self`] from the given client. + pub fn new(client: Arc, dest: AccountId, value: Balance) -> Self { + Self { client, dest, value } + } +} + +impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { + fn pallet(&self) -> &str { + "balances" + } + + fn extrinsic(&self) -> &str { + "transfer_keep_alive" + } + + fn build(&self, nonce: u32) -> std::result::Result { + let acc = Sr25519Keyring::Bob.pair(); + let extrinsic: OpaqueExtrinsic = create_extrinsic( + self.client.as_ref(), + acc, + BalancesCall::transfer_keep_alive { + dest: self.dest.clone().into(), + value: self.value.into(), + }, + Some(nonce), + ) + .into(); + + Ok(extrinsic) + } +} + /// Generates inherent data for the `benchmark overhead` command. pub fn inherent_benchmark_data() -> Result { let mut inherent_data = InherentData::new(); diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index b17a26fa02935..df3d5a891e2ab 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::command_helper::{inherent_benchmark_data, BenchmarkExtrinsicBuilder}; +use super::benchmarking::{inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder}; use crate::{ chain_spec, service, service::{new_partial, FullClient}, @@ -25,9 +25,10 @@ use crate::{ use frame_benchmarking_cli::*; use node_executor::ExecutorDispatch; use node_primitives::Block; -use node_runtime::RuntimeApi; +use node_runtime::{ExistentialDeposit, RuntimeApi}; use sc_cli::{ChainSpec, Result, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; +use sp_keyring::Sr25519Keyring; use std::sync::Arc; @@ -126,9 +127,23 @@ pub fn run() -> Result<()> { }, BenchmarkCmd::Overhead(cmd) => { let PartialComponents { client, .. } = new_partial(&config)?; - let ext_builder = BenchmarkExtrinsicBuilder::new(client.clone()); + let ext_builder = RemarkBuilder::new(client.clone()); - cmd.run(config, client, inherent_benchmark_data()?, Arc::new(ext_builder)) + cmd.run(config, client, inherent_benchmark_data()?, &ext_builder) + }, + BenchmarkCmd::Extrinsic(cmd) => { + let PartialComponents { client, .. } = service::new_partial(&config)?; + // Register the *Remark* and *TKA* builders. + let ext_factory = ExtrinsicFactory(vec![ + Box::new(RemarkBuilder::new(client.clone())), + Box::new(TransferKeepAliveBuilder::new( + client.clone(), + Sr25519Keyring::Alice.to_account_id(), + ExistentialDeposit::get(), + )), + ]); + + cmd.run(client, inherent_benchmark_data()?, &ext_factory) }, BenchmarkCmd::Machine(cmd) => cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), diff --git a/bin/node/cli/src/lib.rs b/bin/node/cli/src/lib.rs index 06c0bcccbc296..13c074268e50f 100644 --- a/bin/node/cli/src/lib.rs +++ b/bin/node/cli/src/lib.rs @@ -35,11 +35,11 @@ pub mod chain_spec; #[macro_use] pub mod service; #[cfg(feature = "cli")] +mod benchmarking; +#[cfg(feature = "cli")] mod cli; #[cfg(feature = "cli")] mod command; -#[cfg(feature = "cli")] -mod command_helper; #[cfg(feature = "cli")] pub use cli::*; diff --git a/bin/node/cli/tests/benchmark_extrinsic_works.rs b/bin/node/cli/tests/benchmark_extrinsic_works.rs new file mode 100644 index 0000000000000..69800ad3c6c13 --- /dev/null +++ b/bin/node/cli/tests/benchmark_extrinsic_works.rs @@ -0,0 +1,46 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use assert_cmd::cargo::cargo_bin; +use std::process::Command; +use tempfile::tempdir; + +/// Tests that the `benchmark extrinsic` command works for +/// remark and transfer_keep_alive within the substrate dev runtime. +#[test] +fn benchmark_extrinsic_works() { + benchmark_extrinsic("system", "remark"); + benchmark_extrinsic("balances", "transfer_keep_alive"); +} + +/// Checks that the `benchmark extrinsic` command works for the given pallet and extrinsic. +fn benchmark_extrinsic(pallet: &str, extrinsic: &str) { + let base_dir = tempdir().expect("could not create a temp dir"); + + let status = Command::new(cargo_bin("substrate")) + .args(&["benchmark", "extrinsic", "--dev"]) + .arg("-d") + .arg(base_dir.path()) + .args(&["--pallet", pallet, "--extrinsic", extrinsic]) + // Run with low repeats for faster execution. + .args(["--warmup=10", "--repeat=10", "--max-ext-per-block=10"]) + .status() + .unwrap(); + + assert!(status.success()); +} diff --git a/utils/frame/benchmarking-cli/src/overhead/bench.rs b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs similarity index 70% rename from utils/frame/benchmarking-cli/src/overhead/bench.rs rename to utils/frame/benchmarking-cli/src/extrinsic/bench.rs index be7dac24021cf..27fc40fb34774 100644 --- a/utils/frame/benchmarking-cli/src/overhead/bench.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -36,8 +36,8 @@ use log::info; use serde::Serialize; use std::{marker::PhantomData, sync::Arc, time::Instant}; -use super::cmd::ExtrinsicBuilder; -use crate::shared::Stats; +use super::ExtrinsicBuilder; +use crate::shared::{StatSelect, Stats}; /// Parameters to configure an *overhead* benchmark. #[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] @@ -60,21 +60,11 @@ pub struct BenchmarkParams { /// The results of multiple runs in nano seconds. pub(crate) type BenchRecord = Vec; -/// Type of a benchmark. -#[derive(Serialize, Clone, PartialEq, Copy)] -pub(crate) enum BenchmarkType { - /// Measure the per-extrinsic execution overhead. - Extrinsic, - /// Measure the per-block execution overhead. - Block, -} - /// Holds all objects needed to run the *overhead* benchmarks. pub(crate) struct Benchmark { client: Arc, params: BenchmarkParams, inherent_data: sp_inherents::InherentData, - ext_builder: Arc, _p: PhantomData<(Block, BA)>, } @@ -90,23 +80,51 @@ where client: Arc, params: BenchmarkParams, inherent_data: sp_inherents::InherentData, - ext_builder: Arc, ) -> Self { - Self { client, params, inherent_data, ext_builder, _p: PhantomData } + Self { client, params, inherent_data, _p: PhantomData } } - /// Run the specified benchmark. - pub fn bench(&self, bench_type: BenchmarkType) -> Result { - let (block, num_ext) = self.build_block(bench_type)?; - let record = self.measure_block(&block, num_ext, bench_type)?; + /// Benchmark a block with only inherents. + pub fn bench_block(&self) -> Result { + let (block, _) = self.build_block(None)?; + let record = self.measure_block(&block)?; Stats::new(&record) } - /// Builds a block for the given benchmark type. + /// Benchmark the time of an extrinsic in a full block. + /// + /// First benchmarks an empty block, analogous to `bench_block` and use it as baseline. + /// Then benchmarks a full block built with the given `ext_builder` and subtracts the baseline + /// from the result. + /// This is necessary to account for the time the inherents use. + pub fn bench_extrinsic(&self, ext_builder: &dyn ExtrinsicBuilder) -> Result { + let (block, _) = self.build_block(None)?; + let base = self.measure_block(&block)?; + let base_time = Stats::new(&base)?.select(StatSelect::Average); + + let (block, num_ext) = self.build_block(Some(ext_builder))?; + let num_ext = num_ext.ok_or_else(|| Error::Input("Block was empty".into()))?; + let mut records = self.measure_block(&block)?; + + for r in &mut records { + // Subtract the base time. + *r = r.saturating_sub(base_time); + // Divide by the number of extrinsics in the block. + *r = ((*r as f64) / (num_ext as f64)).ceil() as u64; + } + + Stats::new(&records) + } + + /// Builds a block with some optional extrinsics. /// /// Returns the block and the number of extrinsics in the block /// that are not inherents. - fn build_block(&self, bench_type: BenchmarkType) -> Result<(Block, u64)> { + /// Returns a block with only inherents if `ext_builder` is `None`. + fn build_block( + &self, + ext_builder: Option<&dyn ExtrinsicBuilder>, + ) -> Result<(Block, Option)> { let mut builder = self.client.new_block(Default::default())?; // Create and insert the inherents. let inherents = builder.create_inherents(self.inherent_data.clone())?; @@ -114,16 +132,18 @@ where builder.push(inherent)?; } - // Return early if we just want a block with inherents and no additional extrinsics. - if bench_type == BenchmarkType::Block { - return Ok((builder.build()?.block, 0)) - } + // Return early if `ext_builder` is `None`. + let ext_builder = if let Some(ext_builder) = ext_builder { + ext_builder + } else { + return Ok((builder.build()?.block, None)) + }; // Put as many extrinsics into the block as possible and count them. info!("Building block, this takes some time..."); let mut num_ext = 0; for nonce in 0..self.max_ext_per_block() { - let ext = self.ext_builder.remark(nonce)?; + let ext = ext_builder.build(nonce)?; match builder.push(ext.clone()) { Ok(()) => {}, Err(ApplyExtrinsicFailed(Validity(TransactionValidityError::Invalid( @@ -139,20 +159,12 @@ where info!("Extrinsics per block: {}", num_ext); let block = builder.build()?.block; - Ok((block, num_ext)) + Ok((block, Some(num_ext))) } /// Measures the time that it take to execute a block or an extrinsic. - fn measure_block( - &self, - block: &Block, - num_ext: u64, - bench_type: BenchmarkType, - ) -> Result { + fn measure_block(&self, block: &Block) -> Result { let mut record = BenchRecord::new(); - if bench_type == BenchmarkType::Extrinsic && num_ext == 0 { - return Err("Cannot measure the extrinsic time of an empty block".into()) - } let genesis = BlockId::Number(Zero::zero()); info!("Running {} warmups...", self.params.warmup); @@ -176,12 +188,7 @@ where .map_err(|e| Error::Client(RuntimeApiError(e)))?; let elapsed = start.elapsed().as_nanos(); - if bench_type == BenchmarkType::Extrinsic { - // Checked for non-zero div above. - record.push((elapsed as f64 / num_ext as f64).ceil() as u64); - } else { - record.push(elapsed as u64); - } + record.push(elapsed as u64); } Ok(record) @@ -191,21 +198,3 @@ where self.params.max_ext_per_block.unwrap_or(u32::MAX) } } - -impl BenchmarkType { - /// Short name of the benchmark type. - pub(crate) fn short_name(&self) -> &'static str { - match self { - Self::Extrinsic => "extrinsic", - Self::Block => "block", - } - } - - /// Long name of the benchmark type. - pub(crate) fn long_name(&self) -> &'static str { - match self { - Self::Extrinsic => "ExtrinsicBase", - Self::Block => "BlockExecution", - } - } -} diff --git a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs new file mode 100644 index 0000000000000..6b4fd0fad6638 --- /dev/null +++ b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -0,0 +1,134 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; +use sc_client_api::Backend as ClientBackend; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic}; + +use clap::{Args, Parser}; +use log::info; +use serde::Serialize; +use std::{fmt::Debug, sync::Arc}; + +use super::{ + bench::{Benchmark, BenchmarkParams}, + extrinsic_factory::ExtrinsicFactory, +}; + +/// Benchmark the execution time of different extrinsics. +/// +/// This is calculated by filling a block with a specific extrinsic and executing the block. +/// The result time is then divided by the number of extrinsics in that block. +/// +/// NOTE: The BlockExecutionWeight is ignored in this case since it +// is very small compared to the total block execution time. +#[derive(Debug, Parser)] +pub struct ExtrinsicCmd { + #[allow(missing_docs)] + #[clap(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub import_params: ImportParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub params: ExtrinsicParams, +} + +/// The params for the [`ExtrinsicCmd`]. +#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] +pub struct ExtrinsicParams { + #[clap(flatten)] + pub bench: BenchmarkParams, + + /// List all available pallets and extrinsics. + /// + /// The format is CSV with header `pallet, extrinsic`. + #[clap(long)] + pub list: bool, + + /// Pallet name of the extrinsic to benchmark. + #[clap(long, value_name = "PALLET", required_unless_present = "list")] + pub pallet: Option, + + /// Extrinsic to benchmark. + #[clap(long, value_name = "EXTRINSIC", required_unless_present = "list")] + pub extrinsic: Option, +} + +impl ExtrinsicCmd { + /// Benchmark the execution time of a specific type of extrinsic. + /// + /// The output will be printed to console. + pub fn run( + &self, + client: Arc, + inherent_data: sp_inherents::InherentData, + ext_factory: &ExtrinsicFactory, + ) -> Result<()> + where + Block: BlockT, + BA: ClientBackend, + C: BlockBuilderProvider + ProvideRuntimeApi, + C::Api: ApiExt + BlockBuilderApi, + { + // Short circuit if --list was specified. + if self.params.list { + let list: Vec = ext_factory.0.iter().map(|b| b.name()).collect(); + info!( + "Listing available extrinsics ({}):\npallet, extrinsic\n{}", + list.len(), + list.join("\n") + ); + return Ok(()) + } + + let pallet = self.params.pallet.clone().unwrap_or_default(); + let extrinsic = self.params.extrinsic.clone().unwrap_or_default(); + let ext_builder = match ext_factory.try_get(&pallet, &extrinsic) { + Some(ext_builder) => ext_builder, + None => + return Err("Unknown pallet or extrinsic. Use --list for a complete list.".into()), + }; + + let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data); + let stats = bench.bench_extrinsic(ext_builder)?; + info!( + "Executing a {}::{} extrinsic takes[ns]:\n{:?}", + ext_builder.pallet(), + ext_builder.extrinsic(), + stats + ); + + Ok(()) + } +} + +// Boilerplate +impl CliConfiguration for ExtrinsicCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } + + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) + } +} diff --git a/utils/frame/benchmarking-cli/src/extrinsic/extrinsic_factory.rs b/utils/frame/benchmarking-cli/src/extrinsic/extrinsic_factory.rs new file mode 100644 index 0000000000000..7e1a22ccd1419 --- /dev/null +++ b/utils/frame/benchmarking-cli/src/extrinsic/extrinsic_factory.rs @@ -0,0 +1,70 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provides the [`ExtrinsicFactory`] and the [`ExtrinsicBuilder`] types. +//! Is used by the *overhead* and *extrinsic* benchmarks to build extrinsics. + +use sp_runtime::OpaqueExtrinsic; + +/// Helper to manage [`ExtrinsicBuilder`] instances. +#[derive(Default)] +pub struct ExtrinsicFactory(pub Vec>); + +impl ExtrinsicFactory { + /// Returns a builder for a pallet and extrinsic name. + /// + /// Is case in-sensitive. + pub fn try_get(&self, pallet: &str, extrinsic: &str) -> Option<&dyn ExtrinsicBuilder> { + let pallet = pallet.to_lowercase(); + let extrinsic = extrinsic.to_lowercase(); + + self.0 + .iter() + .find(|b| b.pallet() == pallet && b.extrinsic() == extrinsic) + .map(|b| b.as_ref()) + } +} + +/// Used by the benchmark to build signed extrinsics. +/// +/// The built extrinsics only need to be valid in the first block +/// who's parent block is the genesis block. +/// This assumption simplifies the generation of the extrinsics. +/// The signer should be one of the pre-funded dev accounts. +pub trait ExtrinsicBuilder { + /// Name of the pallet this builder is for. + /// + /// Should be all lowercase. + fn pallet(&self) -> &str; + + /// Name of the extrinsic this builder is for. + /// + /// Should be all lowercase. + fn extrinsic(&self) -> &str; + + /// Builds an extrinsic. + /// + /// Will be called multiple times with increasing nonces. + fn build(&self, nonce: u32) -> std::result::Result; +} + +impl dyn ExtrinsicBuilder + '_ { + /// Name of this builder in CSV format: `pallet, extrinsic`. + pub fn name(&self) -> String { + format!("{}, {}", self.pallet(), self.extrinsic()) + } +} diff --git a/utils/frame/benchmarking-cli/src/extrinsic/mod.rs b/utils/frame/benchmarking-cli/src/extrinsic/mod.rs new file mode 100644 index 0000000000000..12a09c3b1af63 --- /dev/null +++ b/utils/frame/benchmarking-cli/src/extrinsic/mod.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmark the time it takes to execute a specific extrinsic. +//! This is a generalization of the *overhead* benchmark which can only measure `System::Remark` +//! extrinsics. + +pub mod bench; +pub mod cmd; +pub mod extrinsic_factory; + +pub use cmd::ExtrinsicCmd; +pub use extrinsic_factory::{ExtrinsicBuilder, ExtrinsicFactory}; diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index d0eee3d2939fc..96c7a997895d4 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -18,6 +18,7 @@ //! Contains the root [`BenchmarkCmd`] command and exports its sub-commands. mod block; +mod extrinsic; mod machine; mod overhead; mod pallet; @@ -25,8 +26,9 @@ mod shared; mod storage; pub use block::BlockCmd; +pub use extrinsic::{ExtrinsicBuilder, ExtrinsicCmd, ExtrinsicFactory}; pub use machine::{MachineCmd, Requirements, SUBSTRATE_REFERENCE_HARDWARE}; -pub use overhead::{ExtrinsicBuilder, OverheadCmd}; +pub use overhead::OverheadCmd; pub use pallet::PalletCmd; pub use storage::StorageCmd; @@ -41,8 +43,8 @@ pub enum BenchmarkCmd { Storage(StorageCmd), Overhead(OverheadCmd), Block(BlockCmd), - #[clap(hide = true)] // Hidden until fully completed. Machine(MachineCmd), + Extrinsic(ExtrinsicCmd), } /// Unwraps a [`BenchmarkCmd`] into its concrete sub-command. @@ -58,6 +60,7 @@ macro_rules! unwrap_cmd { BenchmarkCmd::Overhead($cmd) => $code, BenchmarkCmd::Block($cmd) => $code, BenchmarkCmd::Machine($cmd) => $code, + BenchmarkCmd::Extrinsic($cmd) => $code, } } } diff --git a/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/utils/frame/benchmarking-cli/src/overhead/cmd.rs index 357c89d97a5ac..28ceea63f1572 100644 --- a/utils/frame/benchmarking-cli/src/overhead/cmd.rs +++ b/utils/frame/benchmarking-cli/src/overhead/cmd.rs @@ -31,10 +31,11 @@ use serde::Serialize; use std::{fmt::Debug, sync::Arc}; use crate::{ - overhead::{ - bench::{Benchmark, BenchmarkParams, BenchmarkType}, - template::TemplateData, + extrinsic::{ + bench::{Benchmark, BenchmarkParams as ExtrinsicBenchmarkParams}, + ExtrinsicBuilder, }, + overhead::template::TemplateData, shared::{HostInfoParams, WeightParams}, }; @@ -63,20 +64,20 @@ pub struct OverheadParams { #[allow(missing_docs)] #[clap(flatten)] - pub bench: BenchmarkParams, + pub bench: ExtrinsicBenchmarkParams, #[allow(missing_docs)] #[clap(flatten)] pub hostinfo: HostInfoParams, } -/// Used by the benchmark to build signed extrinsics. -/// -/// The built extrinsics only need to be valid in the first block -/// who's parent block is the genesis block. -pub trait ExtrinsicBuilder { - /// Build a `System::remark` extrinsic. - fn remark(&self, nonce: u32) -> std::result::Result; +/// Type of a benchmark. +#[derive(Serialize, Clone, PartialEq, Copy)] +pub(crate) enum BenchmarkType { + /// Measure the per-extrinsic execution overhead. + Extrinsic, + /// Measure the per-block execution overhead. + Block, } impl OverheadCmd { @@ -89,7 +90,7 @@ impl OverheadCmd { cfg: Configuration, client: Arc, inherent_data: sp_inherents::InherentData, - ext_builder: Arc, + ext_builder: &dyn ExtrinsicBuilder, ) -> Result<()> where Block: BlockT, @@ -97,18 +98,21 @@ impl OverheadCmd { C: BlockBuilderProvider + ProvideRuntimeApi, C::Api: ApiExt + BlockBuilderApi, { - let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data, ext_builder); + if ext_builder.pallet() != "system" || ext_builder.extrinsic() != "remark" { + return Err(format!("The extrinsic builder is required to build `System::Remark` extrinsics but builds `{}` extrinsics instead", ext_builder.name()).into()); + } + let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data); // per-block execution overhead { - let stats = bench.bench(BenchmarkType::Block)?; + let stats = bench.bench_block()?; info!("Per-block execution overhead [ns]:\n{:?}", stats); let template = TemplateData::new(BenchmarkType::Block, &cfg, &self.params, &stats)?; template.write(&self.params.weight.weight_path)?; } // per-extrinsic execution overhead { - let stats = bench.bench(BenchmarkType::Extrinsic)?; + let stats = bench.bench_extrinsic(ext_builder)?; info!("Per-extrinsic execution overhead [ns]:\n{:?}", stats); let template = TemplateData::new(BenchmarkType::Extrinsic, &cfg, &self.params, &stats)?; template.write(&self.params.weight.weight_path)?; @@ -118,6 +122,24 @@ impl OverheadCmd { } } +impl BenchmarkType { + /// Short name of the benchmark type. + pub(crate) fn short_name(&self) -> &'static str { + match self { + Self::Extrinsic => "extrinsic", + Self::Block => "block", + } + } + + /// Long name of the benchmark type. + pub(crate) fn long_name(&self) -> &'static str { + match self { + Self::Extrinsic => "ExtrinsicBase", + Self::Block => "BlockExecution", + } + } +} + // Boilerplate impl CliConfiguration for OverheadCmd { fn shared_params(&self) -> &SharedParams { diff --git a/utils/frame/benchmarking-cli/src/overhead/mod.rs b/utils/frame/benchmarking-cli/src/overhead/mod.rs index abdeac22b7898..fc3db912f7a30 100644 --- a/utils/frame/benchmarking-cli/src/overhead/mod.rs +++ b/utils/frame/benchmarking-cli/src/overhead/mod.rs @@ -15,8 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod bench; pub mod cmd; -mod template; +pub mod template; -pub use cmd::{ExtrinsicBuilder, OverheadCmd}; +pub use cmd::OverheadCmd; diff --git a/utils/frame/benchmarking-cli/src/overhead/template.rs b/utils/frame/benchmarking-cli/src/overhead/template.rs index 33c2c7999039a..aa82e45cf6db9 100644 --- a/utils/frame/benchmarking-cli/src/overhead/template.rs +++ b/utils/frame/benchmarking-cli/src/overhead/template.rs @@ -27,7 +27,7 @@ use serde::Serialize; use std::{env, fs, path::PathBuf}; use crate::{ - overhead::{bench::BenchmarkType, cmd::OverheadParams}, + overhead::cmd::{BenchmarkType, OverheadParams}, shared::{Stats, UnderscoreHelper}, }; From 752980bf4a16488f0b37a4741bdefdd2f90e93bf Mon Sep 17 00:00:00 2001 From: Vlad Date: Tue, 19 Jul 2022 15:47:51 +0300 Subject: [PATCH 051/135] Don't download artifacts into the `rusty-cachier-notify` job (#11850) --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index fff11e07326d8..9608d88e9554d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -229,6 +229,7 @@ rusty-cachier-notify: variables: CI_IMAGE: paritytech/rusty-cachier-env:latest GIT_STRATEGY: none + dependencies: [] script: - curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash - rusty-cachier cache notify From 7b81072843e584ef2bee449081aed5c963021ee9 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 20 Jul 2022 10:50:54 +0200 Subject: [PATCH 052/135] Bump wasmtime default-pages (#11864) Signed-off-by: Oliver Tale-Yazdi --- client/executor/wasmtime/src/runtime.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index fb5c6cb16993a..dcf07c454e46d 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -366,7 +366,7 @@ fn common_config(semantics: &Semantics) -> std::result::Result Date: Wed, 20 Jul 2022 18:04:31 -0400 Subject: [PATCH 053/135] Make reading json genesis file faster (#11868) * Make reading json genesis file faster * Formatting * fmt --- client/chain-spec/src/chain_spec.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index efb40d46f216a..5aafc28524dbf 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -61,7 +61,16 @@ impl GenesisSource { let file = File::open(path).map_err(|e| { format!("Error opening spec file at `{}`: {}", path.display(), e) })?; - let genesis: GenesisContainer = json::from_reader(file) + // SAFETY: `mmap` is fundamentally unsafe since technically the file can change + // underneath us while it is mapped; in practice it's unlikely to be a + // problem + let bytes = unsafe { + memmap2::Mmap::map(&file).map_err(|e| { + format!("Error mmaping spec file `{}`: {}", path.display(), e) + })? + }; + + let genesis: GenesisContainer = json::from_slice(&bytes) .map_err(|e| format!("Error parsing spec file: {}", e))?; Ok(genesis.genesis) }, From 58a80806f01bab8bcf9da9be8afdfd70ad50e7fe Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Thu, 21 Jul 2022 11:36:00 +0300 Subject: [PATCH 054/135] Cleanup light client leftovers (#11865) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove --light cli option * Cleanup light client leftovers * Remove commented-out code and clean-up more light client leftovers * Fix formatting with `cargo +nightly fmt` * Remove FIXME regarding db directory structure Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- client/cli/src/commands/run_cmd.rs | 17 +--- client/cli/src/config.rs | 8 +- .../src/communication/gossip.rs | 14 +-- client/network/src/config.rs | 8 -- client/network/src/protocol/message.rs | 1 - client/network/src/service.rs | 15 +-- client/network/src/transactions.rs | 13 --- client/rpc-api/src/system/helpers.rs | 2 - client/service/src/builder.rs | 93 +++++++------------ client/service/src/lib.rs | 7 -- client/service/src/metrics.rs | 2 +- 11 files changed, 44 insertions(+), 136 deletions(-) diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 12774eecc4174..99eaa4be1cd0f 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -50,10 +50,6 @@ pub struct RunCmd { #[clap(long)] pub no_grandpa: bool, - /// Experimental: Run in light client mode. - #[clap(long)] - pub light: bool, - /// Listen to all RPC interfaces. /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC @@ -337,7 +333,7 @@ impl CliConfiguration for RunCmd { fn dev_key_seed(&self, is_dev: bool) -> Result> { Ok(self.get_keyring().map(|a| format!("//{}", a)).or_else(|| { - if is_dev && !self.light { + if is_dev { Some("//Alice".into()) } else { None @@ -363,16 +359,9 @@ impl CliConfiguration for RunCmd { fn role(&self, is_dev: bool) -> Result { let keyring = self.get_keyring(); - let is_light = self.light; - let is_authority = (self.validator || is_dev || keyring.is_some()) && !is_light; + let is_authority = self.validator || is_dev || keyring.is_some(); - Ok(if is_light { - sc_service::Role::Light - } else if is_authority { - sc_service::Role::Authority - } else { - sc_service::Role::Full - }) + Ok(if is_authority { sc_service::Role::Authority } else { sc_service::Role::Full }) } fn force_authoring(&self) -> Result { diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 01c541bf75255..4ebbc8c72c19a 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -211,12 +211,8 @@ pub trait CliConfiguration: Sized { base_path: &PathBuf, cache_size: usize, database: Database, - role: &Role, ) -> Result { - let role_dir = match role { - Role::Light => "light", - Role::Full | Role::Authority => "full", - }; + let role_dir = "full"; let rocksdb_path = base_path.join("db").join(role_dir); let paritydb_path = base_path.join("paritydb").join(role_dir); Ok(match database { @@ -536,7 +532,7 @@ pub trait CliConfiguration: Sized { )?, keystore_remote, keystore, - database: self.database_config(&config_dir, database_cache_size, database, &role)?, + database: self.database_config(&config_dir, database_cache_size, database)?, state_cache_size: self.state_cache_size()?, state_cache_child_ratio: self.state_cache_child_ratio()?, state_pruning: self.state_pruning()?, diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 250e640cbf037..65d7dfb783aa3 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -728,11 +728,7 @@ type MaybeMessage = Option<(Vec, NeighborPacket> impl Inner { fn new(config: crate::Config) -> Self { - let catch_up_config = if config.local_role.is_light() { - // if we are a light client we shouldn't be issuing any catch-up requests - // as we don't participate in the full GRANDPA protocol - CatchUpConfig::disabled() - } else if config.observer_enabled { + let catch_up_config = if config.observer_enabled { if config.local_role.is_authority() { // since the observer protocol is enabled, we will only issue // catch-up requests if we are an authority (and only to other @@ -1231,10 +1227,6 @@ impl Inner { None => return false, }; - if self.config.local_role.is_light() { - return false - } - if round_elapsed < round_duration.mul_f32(PROPAGATION_SOME) { self.peers.first_stage_peers.contains(who) } else if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { @@ -1266,10 +1258,6 @@ impl Inner { None => return false, }; - if self.config.local_role.is_light() { - return false - } - if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { self.peers.first_stage_peers.contains(who) || self.peers.second_stage_peers.contains(who) || diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 430efd697a18c..58349fe973330 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -147,8 +147,6 @@ where pub enum Role { /// Regular full node. Full, - /// Regular light node. - Light, /// Actual authority. Authority, } @@ -158,18 +156,12 @@ impl Role { pub fn is_authority(&self) -> bool { matches!(self, Self::Authority { .. }) } - - /// True for [`Role::Light`]. - pub fn is_light(&self) -> bool { - matches!(self, Self::Light { .. }) - } } impl fmt::Display for Role { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Full => write!(f, "FULL"), - Self::Light => write!(f, "LIGHT"), Self::Authority { .. } => write!(f, "AUTHORITY"), } } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index c9512f82e23bb..50c4a264a5f95 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -107,7 +107,6 @@ pub mod generic { fn from(roles: &'a crate::config::Role) -> Self { match roles { crate::config::Role::Full => Self::FULL, - crate::config::Role::Light => Self::LIGHT, crate::config::Role::Authority { .. } => Self::AUTHORITY, } } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 0d28b50df1821..bc6cfc73bec36 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -245,15 +245,11 @@ where }; let chain_sync = (params.create_chain_sync)( - if params.role.is_light() { - SyncMode::Light - } else { - match params.network_config.sync_mode { - config::SyncMode::Full => SyncMode::Full, - config::SyncMode::Fast { skip_proofs, storage_chain_mode } => - SyncMode::LightState { skip_proofs, storage_chain_mode }, - config::SyncMode::Warp => SyncMode::Warp, - } + match params.network_config.sync_mode { + config::SyncMode::Full => SyncMode::Full, + config::SyncMode::Fast { skip_proofs, storage_chain_mode } => + SyncMode::LightState { skip_proofs, storage_chain_mode }, + config::SyncMode::Warp => SyncMode::Warp, }, params.chain.clone(), warp_sync_provider, @@ -489,7 +485,6 @@ where let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( service.clone(), - params.role, params.transaction_pool, params.metrics_registry.as_ref(), )?; diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 1f54f05d7446f..043bdeff7ebfc 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -83,8 +83,6 @@ mod rep { pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction"); /// Reputation change when a peer sends us a bad transaction. pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); - /// We received an unexpected transaction packet. - pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet"); } struct Metrics { @@ -160,7 +158,6 @@ impl TransactionsHandlerPrototype { pub fn build( self, service: Arc>, - local_role: config::Role, transaction_pool: Arc>, metrics_registry: Option<&Registry>, ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { @@ -178,7 +175,6 @@ impl TransactionsHandlerPrototype { event_stream, peers: HashMap::new(), transaction_pool, - local_role, from_controller, metrics: if let Some(r) = metrics_registry { Some(Metrics::register(r)?) @@ -247,7 +243,6 @@ pub struct TransactionsHandler { peers: HashMap>, transaction_pool: Arc>, gossip_enabled: Arc, - local_role: config::Role, from_controller: mpsc::UnboundedReceiver>, /// Prometheus metrics. metrics: Option, @@ -360,14 +355,6 @@ impl TransactionsHandler { /// Called when peer sends us new transactions fn on_transactions(&mut self, who: PeerId, transactions: message::Transactions) { - // sending transaction to light node is considered a bad behavior - if matches!(self.local_role, config::Role::Light) { - debug!(target: "sync", "Peer {} is trying to send transactions to the light node", who); - self.service.disconnect_peer(who, self.protocol_name.clone()); - self.service.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); - return - } - // Accept transactions only when enabled if !self.gossip_enabled.load(Ordering::Relaxed) { trace!(target: "sync", "{} Ignoring transactions while disabled", who); diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index e17e0f518c3e6..4561fccc1e81b 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -76,8 +76,6 @@ pub struct PeerInfo { pub enum NodeRole { /// The node is a full node Full, - /// The node is a light client - LightClient, /// The node is an authority Authority, } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index fe0ae5db53e70..0a18943f45006 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -37,17 +37,12 @@ use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; -use sc_network::{ - config::{Role, SyncMode}, - NetworkService, -}; +use sc_network::{config::SyncMode, NetworkService}; use sc_network_common::sync::warp::WarpSyncProvider; -use sc_network_light::light_client_requests::{self, handler::LightClientRequestHandler}; +use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ - block_request_handler::{self, BlockRequestHandler}, - state_request_handler::{self, StateRequestHandler}, - warp_request_handler::{self, RequestHandler as WarpSyncRequestHandler}, - ChainSync, + block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + warp_request_handler::RequestHandler as WarpSyncRequestHandler, ChainSync, }; use sc_rpc::{ author::AuthorApiServer, @@ -731,11 +726,8 @@ where } } - let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { - imports_external_transactions: !matches!(config.role, Role::Light), - pool: transaction_pool, - client: client.clone(), - }); + let transaction_pool_adapter = + Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }); let protocol_id = config.protocol_id(); @@ -746,63 +738,42 @@ where }; let block_request_protocol_config = { - if matches!(config.role, Role::Light) { - // Allow outgoing requests but deny incoming requests. - block_request_handler::generate_protocol_config(&protocol_id) - } else { - // Allow both outgoing and incoming requests. - let (handler, protocol_config) = BlockRequestHandler::new( - &protocol_id, - client.clone(), - config.network.default_peers_set.in_peers as usize + - config.network.default_peers_set.out_peers as usize, - ); - spawn_handle.spawn("block-request-handler", Some("networking"), handler.run()); - protocol_config - } + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = BlockRequestHandler::new( + &protocol_id, + client.clone(), + config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize, + ); + spawn_handle.spawn("block-request-handler", Some("networking"), handler.run()); + protocol_config }; let state_request_protocol_config = { - if matches!(config.role, Role::Light) { - // Allow outgoing requests but deny incoming requests. - state_request_handler::generate_protocol_config(&protocol_id) - } else { - // Allow both outgoing and incoming requests. - let (handler, protocol_config) = StateRequestHandler::new( - &protocol_id, - client.clone(), - config.network.default_peers_set_num_full as usize, - ); - spawn_handle.spawn("state-request-handler", Some("networking"), handler.run()); - protocol_config - } + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = StateRequestHandler::new( + &protocol_id, + client.clone(), + config.network.default_peers_set_num_full as usize, + ); + spawn_handle.spawn("state-request-handler", Some("networking"), handler.run()); + protocol_config }; let warp_sync_params = warp_sync.map(|provider| { - let protocol_config = if matches!(config.role, Role::Light) { - // Allow outgoing requests but deny incoming requests. - warp_request_handler::generate_request_response_config(protocol_id.clone()) - } else { - // Allow both outgoing and incoming requests. - let (handler, protocol_config) = - WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone()); - spawn_handle.spawn("warp-sync-request-handler", Some("networking"), handler.run()); - protocol_config - }; + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = + WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone()); + spawn_handle.spawn("warp-sync-request-handler", Some("networking"), handler.run()); (provider, protocol_config) }); let light_client_request_protocol_config = { - if matches!(config.role, Role::Light) { - // Allow outgoing requests but deny incoming requests. - light_client_requests::generate_protocol_config(&protocol_id) - } else { - // Allow both outgoing and incoming requests. - let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, client.clone()); - spawn_handle.spawn("light-client-request-handler", Some("networking"), handler.run()); - protocol_config - } + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, client.clone()); + spawn_handle.spawn("light-client-request-handler", Some("networking"), handler.run()); + protocol_config }; let max_parallel_downloads = config.network.max_parallel_downloads; diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 24ba670cfcd65..1b70227a70488 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -253,7 +253,6 @@ async fn build_network_future< let node_role = match role { Role::Authority { .. } => NodeRole::Authority, - Role::Light => NodeRole::LightClient, Role::Full => NodeRole::Full, }; @@ -377,7 +376,6 @@ where /// Transaction pool adapter. pub struct TransactionPoolAdapter { - imports_external_transactions: bool, pool: Arc

, client: Arc, } @@ -425,11 +423,6 @@ where } fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture { - if !self.imports_external_transactions { - debug!("Transaction rejected"); - return Box::pin(futures::future::ready(TransactionImport::None)) - } - let encoded = transaction.encode(); let uxt = match Decode::decode(&mut &encoded[..]) { Ok(uxt) => uxt, diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 56b145ccdf7f7..ef3132f61ab99 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -160,7 +160,7 @@ impl MetricsService { ) -> Result { let role_bits = match config.role { Role::Full => 1u64, - Role::Light => 2u64, + // 2u64 used to represent light client role Role::Authority { .. } => 4u64, }; From cac91f59b9e3fb8fd59842c023f87b4206931993 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Thu, 21 Jul 2022 12:11:19 +0300 Subject: [PATCH 055/135] Stop RPC servers on drop (#11679) * Stop RPC servers on drop * Switch to existing wrappers that stop RPC servers * Apply formatting --- client/service/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 1b70227a70488..2e7b611ffb187 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -369,7 +369,8 @@ where match tokio::task::block_in_place(|| { config.tokio_handle.block_on(futures::future::try_join(http_fut, ws_fut)) }) { - Ok((http, ws)) => Ok(Box::new((http, ws))), + Ok((http, ws)) => + Ok(Box::new((waiting::HttpServer(Some(http)), waiting::WsServer(Some(ws))))), Err(e) => Err(Error::Application(e)), } } From b28bf15e5160f33e257e2cc6066bdd6c55a3eb42 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 21 Jul 2022 15:35:18 +0200 Subject: [PATCH 056/135] [ci] Generate rustdocs without dependencies (#11885) --- scripts/ci/gitlab/pipeline/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index 25ecad5bc5264..eedd2ee0bb409 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -149,7 +149,7 @@ build-rustdoc: - ./crate-docs/ script: - rusty-cachier snapshot create - - time cargo +nightly doc --workspace --all-features --verbose + - time cargo +nightly doc --workspace --all-features --verbose --no-deps - rm -f $CARGO_TARGET_DIR/doc/.lock - mv $CARGO_TARGET_DIR/doc ./crate-docs # FIXME: remove me after CI image gets nonroot From 1399afdcd8ab4d7fad149c96f9cadcaf04b94f86 Mon Sep 17 00:00:00 2001 From: Robert Hambrock Date: Thu, 21 Jul 2022 17:09:21 +0200 Subject: [PATCH 057/135] pallet-mmr: fix batch proof failures (#11840) * pallet-mmr: extend batch proof verification test covers all possible 2-leaf combinations now, including current verification failures that batch proof item count limit is too low sometimes. * raise upper bound on proof item number as described in https://github.com/paritytech/substrate/issues/11753#issuecomment-1179838174 * test for powerset of leaves * refactor batch proof verification test * test all batch proofs for mmr sizes up to n=13 * limit mmr size to reduce batch proof test duration * use saturating integer addition for proof check * extract common chain building in batch proof tests note: right now, since not killing old chain, it keeps growing by 7 blocks for every leaf selection (added after proof generation), hence heavier to compute. * only add blocks after a proof generation once * increase batch proof testing range * register offchain extensions only once * fmt & remove unused util --- Cargo.lock | 1 + frame/merkle-mountain-range/Cargo.toml | 1 + frame/merkle-mountain-range/src/lib.rs | 2 +- .../merkle-mountain-range/src/mmr/storage.rs | 2 +- frame/merkle-mountain-range/src/mmr/utils.rs | 22 +----- frame/merkle-mountain-range/src/tests.rs | 70 ++++++++++++++----- 6 files changed, 58 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 279864e056b0b..3d5bfe5e51201 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5871,6 +5871,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", + "itertools", "parity-scale-codec", "scale-info", "sp-core", diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index f5f8bdee0855d..75301afed0094 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -27,6 +27,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [dev-dependencies] env_logger = "0.9" hex-literal = "0.3" +itertools = "0.10.3" [features] default = ["std"] diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 9274e8e72c508..4644ebcb7da1c 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -351,7 +351,7 @@ impl, I: 'static> Pallet { ) -> Result<(), primitives::Error> { if proof.leaf_count > Self::mmr_leaves() || proof.leaf_count == 0 || - proof.items.len() as u32 > mmr::utils::NodesUtils::new(proof.leaf_count).depth() + (proof.items.len().saturating_add(leaves.len())) as u64 > proof.leaf_count { return Err(primitives::Error::Verify .log_debug("The proof has incorrect number of leaves or proof items.")) diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index d31262d4d7f2f..8b623edf56957 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -136,7 +136,7 @@ where let window_size = ::BlockHashCount::get().unique_saturated_into(); if leaves >= window_size { - // Move the rolling window towards the end of `block_num->hash` mappings available + // Move the rolling window towards the end of `block_num->hash` mappings available // in the runtime: we "canonicalize" the leaf at the end, let to_canon_leaf = leaves.saturating_sub(window_size); // and all the nodes added by that leaf. diff --git a/frame/merkle-mountain-range/src/mmr/utils.rs b/frame/merkle-mountain-range/src/mmr/utils.rs index 3734ea514782d..0b8e88a9283da 100644 --- a/frame/merkle-mountain-range/src/mmr/utils.rs +++ b/frame/merkle-mountain-range/src/mmr/utils.rs @@ -46,15 +46,6 @@ impl NodesUtils { 2 * self.no_of_leaves - self.number_of_peaks() } - /// Calculate maximal depth of the MMR. - pub fn depth(&self) -> u32 { - if self.no_of_leaves == 0 { - return 0 - } - - 64 - self.no_of_leaves.next_power_of_two().leading_zeros() - } - /// Calculate `LeafIndex` for the leaf that added `node_index` to the MMR. pub fn leaf_index_that_added_node(node_index: NodeIndex) -> LeafIndex { let rightmost_leaf_pos = Self::rightmost_leaf_node_index_from_pos(node_index); @@ -128,18 +119,7 @@ mod tests { } #[test] - fn should_calculate_number_of_leaves_correctly() { - assert_eq!( - vec![0, 1, 2, 3, 4, 9, 15, 21] - .into_iter() - .map(|n| NodesUtils::new(n).depth()) - .collect::>(), - vec![0, 1, 2, 3, 3, 5, 5, 6] - ); - } - - #[test] - fn should_calculate_depth_correclty() { + fn should_calculate_depth_correctly() { assert_eq!( vec![0, 1, 2, 3, 4, 9, 15, 21] .into_iter() diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index 53226f8419988..566a051823d5e 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -347,28 +347,64 @@ fn should_verify() { } #[test] -fn should_verify_batch_proof() { +fn should_verify_batch_proofs() { + fn generate_and_verify_batch_proof( + ext: &mut sp_io::TestExternalities, + leaves: &Vec, + blocks_to_add: usize, + ) { + let (leaves, proof) = ext + .execute_with(|| crate::Pallet::::generate_batch_proof(leaves.to_vec()).unwrap()); + + ext.execute_with(|| { + add_blocks(blocks_to_add); + // then + assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); + }) + } + let _ = env_logger::try_init(); - // Start off with chain initialisation and storing indexing data off-chain - // (MMR Leafs) - let mut ext = new_test_ext(); - ext.execute_with(|| add_blocks(7)); - ext.persist_offchain_overlay(); + use itertools::Itertools; - // Try to generate proof now. This requires the offchain extensions to be present - // to retrieve full leaf data. + let mut ext = new_test_ext(); + // require the offchain extensions to be present + // to retrieve full leaf data when generating proofs register_offchain_ext(&mut ext); - let (leaves, proof) = ext.execute_with(|| { - // when - crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() - }); - ext.execute_with(|| { - add_blocks(7); - // then - assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); - }); + // verify that up to n=10, valid proofs are generated for all possible leaf combinations + for n in 0..10 { + ext.execute_with(|| new_block()); + ext.persist_offchain_overlay(); + + // generate powerset (skipping empty set) of all possible leaf combinations for mmr size n + let leaves_set: Vec> = (0..n).into_iter().powerset().skip(1).collect(); + + leaves_set.iter().for_each(|leaves_subset| { + generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); + ext.persist_offchain_overlay(); + }); + } + + // verify that up to n=15, valid proofs are generated for all possible 2-leaf combinations + for n in 10..15 { + // (MMR Leafs) + ext.execute_with(|| new_block()); + ext.persist_offchain_overlay(); + + // generate all possible 2-leaf combinations for mmr size n + let leaves_set: Vec> = (0..n).into_iter().combinations(2).collect(); + + leaves_set.iter().for_each(|leaves_subset| { + generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); + ext.persist_offchain_overlay(); + }); + } + + generate_and_verify_batch_proof(&mut ext, &vec![7, 11], 20); + ext.execute_with(|| add_blocks(1000)); + ext.persist_offchain_overlay(); + generate_and_verify_batch_proof(&mut ext, &vec![7, 11, 100, 800], 100); } #[test] From dbdbef95c84644328bee60bd170d79814c04e32b Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 22 Jul 2022 15:08:52 +0300 Subject: [PATCH 058/135] Clarify wrong bootnode peer ID error message (#11872) --- client/network/src/service.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index bc6cfc73bec36..aeb5e25497bb3 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -2020,12 +2020,16 @@ where if this.boot_node_ids.contains(&peer_id) { if let DialError::WrongPeerId { obtained, endpoint } = &error { - error!( - "💔 The bootnode you want to connect provided a different peer ID than the one you expect: `{}` with `{}`:`{:?}`.", - peer_id, - obtained, - endpoint, - ); + if let ConnectedPoint::Dialer { address, role_override: _ } = + endpoint + { + error!( + "💔 The bootnode you want to connect to at `{}` provided a different peer ID `{}` than the one you expect `{}`.", + address, + obtained, + peer_id, + ); + } } } } From 7bb4936d788c84bfd9ed1bbde49336538459c5aa Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Sat, 23 Jul 2022 20:01:57 +0200 Subject: [PATCH 059/135] Fix for 'note_applied_extrinsic' - consider also 'extract_actual_pays_fee' (#11849) --- frame/support/src/weights.rs | 37 +++++++++++ frame/system/src/lib.rs | 5 +- frame/system/src/tests.rs | 119 ++++++++++++++++++++++++++++++++++- 3 files changed, 157 insertions(+), 4 deletions(-) diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 24240c7cc4e6a..dd49409ccc912 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -358,6 +358,15 @@ pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &Dispatc .calc_actual_weight(info) } +/// Extract the actual pays_fee from a dispatch result if any or fall back to the default weight. +pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Pays { + match result { + Ok(post_info) => post_info, + Err(err) => &err.post_info, + } + .pays_fee(info) +} + impl From<(Option, Pays)> for PostDispatchInfo { fn from(post_weight_info: (Option, Pays)) -> Self { let (actual_weight, pays_fee) = post_weight_info; @@ -956,6 +965,34 @@ mod tests { ); } + #[test] + fn extract_actual_pays_fee_works() { + let pre = DispatchInfo { weight: 1000, ..Default::default() }; + assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::No).into()), &pre), Pays::No); + assert_eq!( + extract_actual_pays_fee(&Err(DispatchError::BadOrigin.with_weight(9)), &pre), + Pays::Yes + ); + assert_eq!( + extract_actual_pays_fee( + &Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }, + error: DispatchError::BadOrigin, + }), + &pre + ), + Pays::No + ); + + let pre = DispatchInfo { weight: 1000, pays_fee: Pays::No, ..Default::default() }; + assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::No); + } + type Balance = u64; // 0.5x^3 + 2.333x^2 + 7x - 10_000 diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 3c6f514808b5d..94605c2da59bd 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -91,8 +91,8 @@ use frame_support::{ OriginTrait, PalletInfo, SortedMembers, StoredMap, TypedGet, }, weights::{ - extract_actual_weight, DispatchClass, DispatchInfo, PerDispatchClass, RuntimeDbWeight, - Weight, + extract_actual_pays_fee, extract_actual_weight, DispatchClass, DispatchInfo, + PerDispatchClass, RuntimeDbWeight, Weight, }, Parameter, }; @@ -1500,6 +1500,7 @@ impl Pallet { /// To be called immediately after an extrinsic has been applied. pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, mut info: DispatchInfo) { info.weight = extract_actual_weight(r, &info); + info.pays_fee = extract_actual_pays_fee(r, &info); Self::deposit_event(match r { Ok(_) => Event::ExtrinsicSuccess { dispatch_info: info }, Err(err) => { diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 0facd796b2a0c..417dca12045ee 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -17,7 +17,9 @@ use crate::*; use frame_support::{ - assert_noop, assert_ok, dispatch::PostDispatchInfo, weights::WithPostDispatchInfo, + assert_noop, assert_ok, + dispatch::PostDispatchInfo, + weights::{Pays, WithPostDispatchInfo}, }; use mock::{Origin, *}; use sp_core::H256; @@ -216,7 +218,7 @@ fn deposit_event_should_work() { } #[test] -fn deposit_event_uses_actual_weight() { +fn deposit_event_uses_actual_weight_and_pays_fee() { new_test_ext().execute_with(|| { System::reset_events(); System::initialize(&1, &[0u8; 32].into(), &Default::default()); @@ -230,8 +232,34 @@ fn deposit_event_uses_actual_weight() { &Ok(Some(1200).into()), pre_info, ); + System::note_applied_extrinsic(&Ok((Some(2_500_000), Pays::Yes).into()), pre_info); + System::note_applied_extrinsic(&Ok(Pays::No.into()), pre_info); + System::note_applied_extrinsic(&Ok((Some(2_500_000), Pays::No).into()), pre_info); + System::note_applied_extrinsic(&Ok((Some(500), Pays::No).into()), pre_info); System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.with_weight(999)), pre_info); + System::note_applied_extrinsic( + &Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { actual_weight: None, pays_fee: Pays::Yes }, + error: DispatchError::BadOrigin, + }), + pre_info, + ); + System::note_applied_extrinsic( + &Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { actual_weight: Some(800), pays_fee: Pays::Yes }, + error: DispatchError::BadOrigin, + }), + pre_info, + ); + System::note_applied_extrinsic( + &Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { actual_weight: Some(800), pays_fee: Pays::No }, + error: DispatchError::BadOrigin, + }), + pre_info, + ); + assert_eq!( System::events(), vec![ @@ -261,6 +289,54 @@ fn deposit_event_uses_actual_weight() { }, EventRecord { phase: Phase::ApplyExtrinsic(3), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: 1000, + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(4), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: 1000, + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(5), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: 1000, + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(6), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: 500, + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(7), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), dispatch_info: DispatchInfo { weight: 999, ..Default::default() }, @@ -268,6 +344,45 @@ fn deposit_event_uses_actual_weight() { .into(), topics: vec![] }, + EventRecord { + phase: Phase::ApplyExtrinsic(8), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: 1000, + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(9), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: 800, + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(10), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: 800, + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, ] ); }); From ca570cffee47d9fa4806715b0e8b284ec6db7e65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 23 Jul 2022 21:28:25 +0200 Subject: [PATCH 060/135] pallet-assets: Update docs (#11877) --- frame/assets/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 3f9146c5d229b..e0b00c5642c81 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -79,8 +79,6 @@ //! * `create`: Creates a new asset class, taking the required deposit. //! * `transfer`: Transfer sender's assets to another account. //! * `transfer_keep_alive`: Transfer sender's assets to another account, keeping the sender alive. -//! * `set_metadata`: Set the metadata of an asset class. -//! * `clear_metadata`: Remove the metadata of an asset class. //! * `approve_transfer`: Create or increase an delegated transfer. //! * `cancel_approval`: Rescind a previous approval. //! * `transfer_approved`: Transfer third-party's assets to another account. @@ -103,6 +101,8 @@ //! * `transfer_ownership`: Changes an asset class's Owner; called by the asset class's Owner. //! * `set_team`: Changes an asset class's Admin, Freezer and Issuer; called by the asset class's //! Owner. +//! * `set_metadata`: Set the metadata of an asset class; called by the asset class's Owner. +//! * `clear_metadata`: Remove the metadata of an asset class; called by the asset class's Owner. //! //! Please refer to the [`Call`] enum and its associated variants for documentation on each //! function. From fe3437e7ffd3207199f140ca14558f303567c955 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Mon, 25 Jul 2022 05:42:49 +0900 Subject: [PATCH 061/135] Fix typo in limits.rs (#11894) alway -> always --- frame/system/src/limits.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index d3c108afb6f32..6076414ba6bcb 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -369,7 +369,7 @@ impl BlockWeightsBuilder { /// /// This is to make sure that extrinsics don't stay forever in the pool, /// because they could seamingly fit the block (since they are below `max_block`), - /// but the cost of calling `on_initialize` alway prevents them from being included. + /// but the cost of calling `on_initialize` always prevents them from being included. pub fn avg_block_initialization(mut self, init_cost: Perbill) -> Self { self.init_cost = Some(init_cost); self From 64d77551cb9ae0a9572a955d8a678d0de41af4bf Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 25 Jul 2022 11:15:47 +0200 Subject: [PATCH 062/135] [ci] fix node-bench-regression-guard job (#11901) * [Do not merge] [ci] debug node-bench-regression-guard * debug * fix artifacts path * add debug job * debug * debug * fix job, return pipeline --- scripts/ci/gitlab/pipeline/test.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index dcec896ddc18b..d900d425ad494 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -129,7 +129,11 @@ node-bench-regression-guard: artifacts: true # polls artifact from master to compare with current result - project: $CI_PROJECT_PATH - job: cargo-check-benches + job: "cargo-check-benches 1/2" + ref: master + artifacts: true + - project: $CI_PROJECT_PATH + job: "cargo-check-benches 2/2" ref: master artifacts: true variables: From 618f26bc9fe1a5ae8ee0a485e4e24da3e5979af6 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 25 Jul 2022 12:19:43 +0200 Subject: [PATCH 063/135] Remove unused leaves-set fields (#11895) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove unused leaves-set fields * Fix undo_import method Old leaf sould be inserted using the displaced number * Fix leaves count * Apply code review suggestions Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- client/api/src/leaves.rs | 81 +++++++++++++++++++++++----------------- 1 file changed, 47 insertions(+), 34 deletions(-) diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index 5859290777433..2e5d4be3a5462 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -67,8 +67,6 @@ impl FinalizationDisplaced { #[derive(Debug, Clone, PartialEq, Eq)] pub struct LeafSet { storage: BTreeMap, Vec>, - pending_added: Vec<(H, N)>, - pending_removed: Vec, } impl LeafSet @@ -78,7 +76,7 @@ where { /// Construct a new, blank leaf set. pub fn new() -> Self { - Self { storage: BTreeMap::new(), pending_added: Vec::new(), pending_removed: Vec::new() } + Self { storage: BTreeMap::new() } } /// Read the leaf list from the DB, using given prefix for keys. @@ -97,21 +95,21 @@ where }, None => {}, } - Ok(Self { storage, pending_added: Vec::new(), pending_removed: Vec::new() }) + Ok(Self { storage }) } - /// update the leaf list on import. returns a displaced leaf if there was one. + /// Update the leaf list on import. + /// Returns a displaced leaf if there was one. pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option> { // avoid underflow for genesis. let displaced = if number != N::zero() { - let new_number = Reverse(number.clone() - N::one()); - let was_displaced = self.remove_leaf(&new_number, &parent_hash); + let parent_number = Reverse(number.clone() - N::one()); + let was_displaced = self.remove_leaf(&parent_number, &parent_hash); if was_displaced { - self.pending_removed.push(parent_hash.clone()); Some(ImportDisplaced { new_hash: hash.clone(), - displaced: LeafSetItem { hash: parent_hash, number: new_number }, + displaced: LeafSetItem { hash: parent_hash, number: parent_number }, }) } else { None @@ -121,7 +119,6 @@ where }; self.insert_leaf(Reverse(number.clone()), hash.clone()); - self.pending_added.push((hash, number)); displaced } @@ -140,8 +137,6 @@ where }; let below_boundary = self.storage.split_off(&Reverse(boundary)); - self.pending_removed - .extend(below_boundary.values().flat_map(|h| h.iter()).cloned()); FinalizationDisplaced { leaves: below_boundary } } @@ -188,8 +183,6 @@ where self.remove_leaf(number, hash), "item comes from an iterator over storage; qed", ); - - self.pending_removed.push(hash.clone()); } } @@ -203,7 +196,6 @@ where // this is an invariant of regular block import. if !leaves_contains_best { self.insert_leaf(best_number.clone(), best_hash.clone()); - self.pending_added.push((best_hash, best_number.0)); } } @@ -213,9 +205,9 @@ where self.storage.iter().flat_map(|(_, hashes)| hashes.iter()).cloned().collect() } - /// Number of known leaves + /// Number of known leaves. pub fn count(&self) -> usize { - self.storage.len() + self.storage.values().map(|level| level.len()).sum() } /// Write the leaf list to the database transaction. @@ -227,8 +219,6 @@ where ) { let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0.clone(), h.clone())).collect(); tx.set_from_vec(column, prefix, leaves.encode()); - self.pending_added.clear(); - self.pending_removed.clear(); } /// Check if given block is a leaf. @@ -242,7 +232,7 @@ where self.storage.entry(number).or_insert_with(Vec::new).push(hash); } - // returns true if this leaf was contained, false otherwise. + // Returns true if this leaf was contained, false otherwise. fn remove_leaf(&mut self, number: &Reverse, hash: &H) -> bool { let mut empty = false; let removed = self.storage.get_mut(number).map_or(false, |leaves| { @@ -285,7 +275,7 @@ where pub fn undo_import(&mut self, displaced: ImportDisplaced) { let new_number = Reverse(displaced.displaced.number.0.clone() + N::one()); self.inner.remove_leaf(&new_number, &displaced.new_hash); - self.inner.insert_leaf(new_number, displaced.displaced.hash); + self.inner.insert_leaf(displaced.displaced.number, displaced.displaced.hash); } /// Undo a finalization operation by providing the displaced leaves. @@ -294,13 +284,6 @@ where } } -impl<'a, H: 'a, N: 'a> Drop for Undo<'a, H, N> { - fn drop(&mut self) { - self.inner.pending_added.clear(); - self.inner.pending_removed.clear(); - } -} - #[cfg(test)] mod tests { use super::*; @@ -315,15 +298,20 @@ mod tests { set.import(2_1, 2, 1_1); set.import(3_1, 3, 2_1); + assert_eq!(set.count(), 1); assert!(set.contains(3, 3_1)); assert!(!set.contains(2, 2_1)); assert!(!set.contains(1, 1_1)); assert!(!set.contains(0, 0)); set.import(2_2, 2, 1_1); + set.import(1_2, 1, 0); + set.import(2_3, 2, 1_2); + assert_eq!(set.count(), 3); assert!(set.contains(3, 3_1)); assert!(set.contains(2, 2_2)); + assert!(set.contains(2, 2_3)); } #[test] @@ -392,17 +380,42 @@ mod tests { } #[test] - fn undo_finalization() { + fn undo_import() { let mut set = LeafSet::new(); set.import(10_1u32, 10u32, 0u32); - set.import(11_1, 11, 10_2); - set.import(11_2, 11, 10_2); - set.import(12_1, 12, 11_123); + set.import(11_1, 11, 10_1); + set.import(11_2, 11, 10_1); + + let displaced = set.import(12_1, 12, 11_1).unwrap(); + assert_eq!(set.count(), 2); + assert!(set.contains(11, 11_2)); + assert!(set.contains(12, 12_1)); + + set.undo().undo_import(displaced); + assert_eq!(set.count(), 2); + assert!(set.contains(11, 11_1)); + assert!(set.contains(11, 11_2)); + } + + #[test] + fn undo_finalization() { + let mut set = LeafSet::new(); + set.import(9_1u32, 9u32, 0u32); + set.import(10_1, 10, 9_1); + set.import(10_2, 10, 9_1); + set.import(11_1, 11, 10_1); + set.import(11_2, 11, 10_1); + set.import(12_1, 12, 11_2); let displaced = set.finalize_height(11); - assert!(!set.contains(10, 10_1)); + assert_eq!(set.count(), 2); + assert!(set.contains(11, 11_1)); + assert!(set.contains(12, 12_1)); set.undo().undo_finalization(displaced); - assert!(set.contains(10, 10_1)); + assert_eq!(set.count(), 3); + assert!(set.contains(11, 11_1)); + assert!(set.contains(12, 12_1)); + assert!(set.contains(10, 10_2)); } } From c38d02bfa2e5cc11e905bc1bc1ded8529f445f61 Mon Sep 17 00:00:00 2001 From: Falco Hirschenberger Date: Mon, 25 Jul 2022 16:53:12 +0200 Subject: [PATCH 064/135] Add era to `Unbonded` event (#11770) * Add era to `Unbonded` event fixes #11749 * Fix missing tests * Add comment for `era` field in `Unbonded` struct. Co-authored-by: parity-processbot <> --- frame/nomination-pools/src/lib.rs | 4 +- frame/nomination-pools/src/tests.rs | 131 ++++++++++++------ .../nomination-pools/test-staking/src/lib.rs | 60 ++++++-- 3 files changed, 134 insertions(+), 61 deletions(-) diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 514267e8bf4af..203c13f2f3496 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -1349,7 +1349,7 @@ pub mod pallet { /// pool. /// - `points` is the number of points that are issued as a result of `balance` being /// dissolved into the corresponding unbonding pool. - /// + /// - `era` is the era in which the balance will be unbonded. /// In the absence of slashing, these values will match. In the presence of slashing, the /// number of points that are issued in the unbonding pool will be less than the amount /// requested to be unbonded. @@ -1358,6 +1358,7 @@ pub mod pallet { pool_id: PoolId, balance: BalanceOf, points: BalanceOf, + era: EraIndex, }, /// A member has withdrawn from their pool. /// @@ -1683,6 +1684,7 @@ pub mod pallet { pool_id: member.pool_id, points: points_unbonded, balance: unbonding_balance, + era: unbond_era, }); // Now that we know everything has worked write the items to storage. diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index c971239bef507..b59f18cca72a2 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -869,7 +869,7 @@ mod claim_payout { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 11, pool_id: 1, bonded: 11, joined: true }, - Event::Unbonded { member: 11, pool_id: 1, points: 11, balance: 11 } + Event::Unbonded { member: 11, pool_id: 1, points: 11, balance: 11, era: 3 } ] ); }); @@ -1374,7 +1374,7 @@ mod claim_payout { assert_eq!( pool_events_since_last_call(), vec![ - Event::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10 }, + Event::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 3 }, Event::PaidOut { member: 10, pool_id: 1, payout: 50 }, Event::PaidOut { member: 20, pool_id: 1, payout: 50 }, ] @@ -1808,12 +1808,12 @@ mod claim_payout { Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, Event::Bonded { member: 30, pool_id: 1, bonded: 20, joined: true }, - Event::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10 }, + Event::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 3 }, Event::PaidOut { member: 30, pool_id: 1, payout: 15 }, - Event::Unbonded { member: 30, pool_id: 1, balance: 10, points: 10 }, - Event::Unbonded { member: 30, pool_id: 1, balance: 5, points: 5 }, + Event::Unbonded { member: 30, pool_id: 1, balance: 10, points: 10, era: 3 }, + Event::Unbonded { member: 30, pool_id: 1, balance: 5, points: 5, era: 3 }, Event::PaidOut { member: 20, pool_id: 1, payout: 7 }, - Event::Unbonded { member: 20, pool_id: 1, balance: 5, points: 5 }, + Event::Unbonded { member: 20, pool_id: 1, balance: 5, points: 5, era: 3 }, Event::PaidOut { member: 10, pool_id: 1, payout: 7 } ] ); @@ -1865,10 +1865,10 @@ mod claim_payout { pool_events_since_last_call(), vec![ Event::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, - Event::Unbonded { member: 20, pool_id: 1, balance: 20, points: 20 }, + Event::Unbonded { member: 20, pool_id: 1, balance: 20, points: 20, era: 3 }, Event::Withdrawn { member: 20, pool_id: 1, balance: 20, points: 20 }, Event::MemberRemoved { pool_id: 1, member: 20 }, - Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 6 }, Event::Withdrawn { member: 10, pool_id: 1, balance: 10, points: 10 }, Event::MemberRemoved { pool_id: 1, member: 10 }, Event::Destroyed { pool_id: 1 } @@ -2269,7 +2269,7 @@ mod unbond { Event::Bonded { member: 40, pool_id: 1, bonded: 40, joined: true }, Event::Bonded { member: 550, pool_id: 1, bonded: 550, joined: true }, Event::PaidOut { member: 40, pool_id: 1, payout: 40 }, - Event::Unbonded { member: 40, pool_id: 1, points: 6, balance: 6 } + Event::Unbonded { member: 40, pool_id: 1, points: 6, balance: 6, era: 3 } ] ); @@ -2311,7 +2311,13 @@ mod unbond { pool_events_since_last_call(), vec![ Event::PaidOut { member: 550, pool_id: 1, payout: 550 }, - Event::Unbonded { member: 550, pool_id: 1, points: 92, balance: 92 } + Event::Unbonded { + member: 550, + pool_id: 1, + points: 92, + balance: 92, + era: 3 + } ] ); @@ -2349,7 +2355,7 @@ mod unbond { Event::Withdrawn { member: 550, pool_id: 1, points: 92, balance: 92 }, Event::MemberRemoved { pool_id: 1, member: 550 }, Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, - Event::Unbonded { member: 10, pool_id: 1, points: 2, balance: 2 } + Event::Unbonded { member: 10, pool_id: 1, points: 2, balance: 2, era: 6 } ] ); }); @@ -2395,7 +2401,7 @@ mod unbond { vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10 } + Event::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10, era: 9 } ] ); }); @@ -2430,7 +2436,13 @@ mod unbond { Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, Event::Bonded { member: 200, pool_id: 1, bonded: 200, joined: true }, - Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100 }, + Event::Unbonded { + member: 100, + pool_id: 1, + points: 100, + balance: 100, + era: 3 + }, ] ); @@ -2439,7 +2451,13 @@ mod unbond { assert_eq!( pool_events_since_last_call(), - vec![Event::Unbonded { member: 200, pool_id: 1, points: 200, balance: 200 }] + vec![Event::Unbonded { + member: 200, + pool_id: 1, + points: 200, + balance: 200, + era: 3 + }] ); assert_eq!( @@ -2508,7 +2526,7 @@ mod unbond { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, - Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100 } + Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100, era: 3 } ] ); @@ -2659,7 +2677,7 @@ mod unbond { vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1 } + Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1, era: 3 } ] ); @@ -2685,7 +2703,7 @@ mod unbond { ); assert_eq!( pool_events_since_last_call(), - vec![Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5 }] + vec![Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5, era: 3 }] ); // when: casual further unbond, next era. @@ -2712,7 +2730,7 @@ mod unbond { ); assert_eq!( pool_events_since_last_call(), - vec![Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1 }] + vec![Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1, era: 4 }] ); // when: unbonding more than our active: error @@ -2747,7 +2765,7 @@ mod unbond { ); assert_eq!( pool_events_since_last_call(), - vec![Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3 }] + vec![Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3, era: 4 }] ); }); } @@ -2792,9 +2810,9 @@ mod unbond { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, - Event::Unbonded { member: 20, pool_id: 1, balance: 2, points: 2 }, - Event::Unbonded { member: 20, pool_id: 1, balance: 3, points: 3 }, - Event::Unbonded { member: 20, pool_id: 1, balance: 1, points: 1 } + Event::Unbonded { member: 20, pool_id: 1, points: 2, balance: 2, era: 3 }, + Event::Unbonded { member: 20, pool_id: 1, points: 3, balance: 3, era: 4 }, + Event::Unbonded { member: 20, pool_id: 1, points: 1, balance: 1, era: 5 } ] ); }) @@ -2827,7 +2845,7 @@ mod unbond { vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3 } + Event::Unbonded { member: 10, pool_id: 1, points: 3, balance: 3, era: 3 } ] ); }); @@ -2873,7 +2891,7 @@ mod unbond { Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, Event::PaidOut { member: 20, pool_id: 1, payout: 10 }, - Event::Unbonded { member: 20, pool_id: 1, balance: 2, points: 2 } + Event::Unbonded { member: 20, pool_id: 1, balance: 2, points: 2, era: 3 } ] ); @@ -2889,7 +2907,7 @@ mod unbond { vec![ // 2/3 of ed, which is 20's share. Event::PaidOut { member: 20, pool_id: 1, payout: 6 }, - Event::Unbonded { member: 20, pool_id: 1, points: 3, balance: 3 } + Event::Unbonded { member: 20, pool_id: 1, points: 3, balance: 3, era: 4 } ] ); @@ -2904,7 +2922,7 @@ mod unbond { pool_events_since_last_call(), vec![ Event::PaidOut { member: 20, pool_id: 1, payout: 3 }, - Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5 } + Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5, era: 5 } ] ); @@ -3003,7 +3021,6 @@ mod withdraw_unbonded { with_era: Default::default() } ); - assert_eq!( pool_events_since_last_call(), vec![ @@ -3011,8 +3028,14 @@ mod withdraw_unbonded { Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 40, pool_id: 1, bonded: 40, joined: true }, Event::Bonded { member: 550, pool_id: 1, bonded: 550, joined: true }, - Event::Unbonded { member: 550, pool_id: 1, points: 550, balance: 550 }, - Event::Unbonded { member: 40, pool_id: 1, points: 40, balance: 40 }, + Event::Unbonded { + member: 550, + pool_id: 1, + points: 550, + balance: 550, + era: 3 + }, + Event::Unbonded { member: 40, pool_id: 1, points: 40, balance: 40, era: 3 }, ] ); assert_eq!( @@ -3077,7 +3100,7 @@ mod withdraw_unbonded { assert_eq!( pool_events_since_last_call(), vec![ - Event::Unbonded { member: 10, pool_id: 1, balance: 5, points: 5 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 5, points: 5, era: 9 }, Event::Withdrawn { member: 10, pool_id: 1, balance: 5, points: 5 }, Event::MemberRemoved { pool_id: 1, member: 10 }, Event::Destroyed { pool_id: 1 } @@ -3122,8 +3145,14 @@ mod withdraw_unbonded { Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 40, pool_id: 1, bonded: 40, joined: true }, Event::Bonded { member: 550, pool_id: 1, bonded: 550, joined: true }, - Event::Unbonded { member: 40, pool_id: 1, balance: 20, points: 20 }, - Event::Unbonded { member: 550, pool_id: 1, balance: 275, points: 275 }, + Event::Unbonded { member: 40, pool_id: 1, balance: 20, points: 20, era: 3 }, + Event::Unbonded { + member: 550, + pool_id: 1, + balance: 275, + points: 275, + era: 3, + } ] ); assert_eq!( @@ -3199,7 +3228,7 @@ mod withdraw_unbonded { assert_eq!( pool_events_since_last_call(), vec![ - Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5 }, + Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5, era: 6 }, Event::Withdrawn { member: 10, pool_id: 1, points: 5, balance: 5 }, Event::MemberRemoved { pool_id: 1, member: 10 }, Event::Destroyed { pool_id: 1 } @@ -3313,8 +3342,20 @@ mod withdraw_unbonded { Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, Event::Bonded { member: 200, pool_id: 1, bonded: 200, joined: true }, - Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100 }, - Event::Unbonded { member: 200, pool_id: 1, points: 200, balance: 200 } + Event::Unbonded { + member: 100, + pool_id: 1, + points: 100, + balance: 100, + era: 3 + }, + Event::Unbonded { + member: 200, + pool_id: 1, + points: 200, + balance: 200, + era: 3 + } ] ); @@ -3391,7 +3432,7 @@ mod withdraw_unbonded { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, - Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100 }, + Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100, era: 3 }, Event::Withdrawn { member: 100, pool_id: 1, points: 100, balance: 100 }, Event::MemberRemoved { pool_id: 1, member: 100 } ] @@ -3431,8 +3472,8 @@ mod withdraw_unbonded { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: false }, - Event::Unbonded { member: 10, pool_id: 1, points: 6, balance: 6 }, - Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1 } + Event::Unbonded { member: 10, pool_id: 1, points: 6, balance: 6, era: 3 }, + Event::Unbonded { member: 10, pool_id: 1, points: 1, balance: 1, era: 4 } ] ); @@ -3518,8 +3559,8 @@ mod withdraw_unbonded { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 11, pool_id: 1, bonded: 10, joined: true }, - Event::Unbonded { member: 11, pool_id: 1, points: 6, balance: 6 }, - Event::Unbonded { member: 11, pool_id: 1, points: 1, balance: 1 } + Event::Unbonded { member: 11, pool_id: 1, points: 6, balance: 6, era: 3 }, + Event::Unbonded { member: 11, pool_id: 1, points: 1, balance: 1, era: 4 } ] ); @@ -3608,8 +3649,8 @@ mod withdraw_unbonded { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, - Event::Unbonded { member: 100, pool_id: 1, points: 75, balance: 75 }, - Event::Unbonded { member: 100, pool_id: 1, points: 25, balance: 25 }, + Event::Unbonded { member: 100, pool_id: 1, points: 75, balance: 75, era: 3 }, + Event::Unbonded { member: 100, pool_id: 1, points: 25, balance: 25, era: 4 }, Event::Withdrawn { member: 100, pool_id: 1, points: 75, balance: 75 }, ] ); @@ -3680,9 +3721,9 @@ mod withdraw_unbonded { Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: false }, - Event::Unbonded { member: 10, pool_id: 1, balance: 7, points: 7 }, - Event::Unbonded { member: 10, pool_id: 1, balance: 3, points: 3 }, - Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 7, points: 7, era: 3 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 3, points: 3, era: 4 }, + Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 }, Event::Withdrawn { member: 10, pool_id: 1, balance: 7, points: 7 } ] ); diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index eed8e5fd390cd..5a7cd494362ca 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -95,8 +95,8 @@ fn pool_lifecycle_e2e() { pool_events_since_last_call(), vec![ PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, - PoolsEvent::Unbonded { member: 20, pool_id: 1, points: 10, balance: 10 }, - PoolsEvent::Unbonded { member: 21, pool_id: 1, points: 10, balance: 10 }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, points: 10, balance: 10, era: 3 }, + PoolsEvent::Unbonded { member: 21, pool_id: 1, points: 10, balance: 10, era: 3 }, ] ); @@ -159,7 +159,7 @@ fn pool_lifecycle_e2e() { ); assert_eq!( pool_events_since_last_call(), - vec![PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 50, balance: 50 }] + vec![PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 50, balance: 50, era: 6 }] ); // waiting another bonding duration: @@ -237,8 +237,8 @@ fn pool_slash_e2e() { assert_eq!( pool_events_since_last_call(), vec![ - PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10 }, - PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10 } + PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 4 } ] ); @@ -262,9 +262,9 @@ fn pool_slash_e2e() { assert_eq!( pool_events_since_last_call(), vec![ - PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10 }, - PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10 }, - PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 10, points: 10 }, + PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 5 }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 5 }, + PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 10, points: 10, era: 5 }, ] ); @@ -305,7 +305,7 @@ fn pool_slash_e2e() { assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Unbonded(POOL1_BONDED, 5)]); assert_eq!( pool_events_since_last_call(), - vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5 }] + vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5, era: 6 }] ); // now we start withdrawing. we do it all at once, at era 6 where 20 and 21 are fully free. @@ -342,7 +342,7 @@ fn pool_slash_e2e() { pool_events_since_last_call(), vec![ PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, - PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10 } + PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10, era: 9 } ] ); @@ -432,7 +432,13 @@ fn pool_slash_proportional() { ); assert_eq!( pool_events_since_last_call(), - vec![PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: bond, points: bond }] + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] ); CurrentEra::::set(Some(100)); @@ -443,7 +449,13 @@ fn pool_slash_proportional() { ); assert_eq!( pool_events_since_last_call(), - vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: bond, points: bond }] + vec![PoolsEvent::Unbonded { + member: 21, + pool_id: 1, + balance: bond, + points: bond, + era: 128 + }] ); CurrentEra::::set(Some(101)); @@ -454,7 +466,13 @@ fn pool_slash_proportional() { ); assert_eq!( pool_events_since_last_call(), - vec![PoolsEvent::Unbonded { member: 22, pool_id: 1, balance: bond, points: bond }] + vec![PoolsEvent::Unbonded { + member: 22, + pool_id: 1, + balance: bond, + points: bond, + era: 129 + }] ); // Apply a slash that happened in era 100. This is typically applied with a delay. @@ -531,7 +549,13 @@ fn pool_slash_non_proportional_only_bonded_pool() { ); assert_eq!( pool_events_since_last_call(), - vec![PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: bond, points: bond }] + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] ); // slash for 30. This will be deducted only from the bonded pool. @@ -598,7 +622,13 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { ); assert_eq!( pool_events_since_last_call(), - vec![PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: bond, points: bond }] + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] ); // slash 50. This will be deducted only from the bonded pool and one of the unbonding pools. From eefb4fad378181d8916c28f320a1ba383157c990 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 25 Jul 2022 16:20:19 +0100 Subject: [PATCH 065/135] remove FunctionOf (#11897) * remove FunctionOf * fix docs Co-authored-by: parity-processbot <> --- frame/support/src/weights.rs | 89 +++--------------------------------- 1 file changed, 6 insertions(+), 83 deletions(-) diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index dd49409ccc912..c37a72536bddf 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -91,27 +91,19 @@ //! # fn main() {} //! ``` //! -//! ### 2. Define weights as a function of input arguments using `FunctionOf` tuple struct. +//! ### 2. Define weights as a function of input arguments. //! -//! This struct works in a similar manner as above. 3 items must be provided and each can be either -//! a fixed value or a function/closure with the same parameters list as the dispatchable function -//! itself, wrapper in a tuple. -//! -//! Using this only makes sense if you want to use a function for at least one of the elements. If -//! all 3 are static values, providing a raw tuple is easier. +//! The arguments of the dispatch are available in the weight expressions as a borrowed value. //! //! ``` //! # use frame_system::Config; -//! # use frame_support::weights::{DispatchClass, FunctionOf, Pays}; +//! # use frame_support::weights::{DispatchClass, Pays}; //! frame_support::decl_module! { //! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = FunctionOf( -//! // weight, function. -//! |args: (&u32, &u64)| *args.0 as u64 + args.1, -//! // class, fixed. +//! #[weight = ( +//! *a as u64 + *b, //! DispatchClass::Operational, -//! // pays fee, function. -//! |args: (&u32, &u64)| if *args.0 > 1000 { Pays::Yes } else { Pays::No }, +//! if *a > 1000 { Pays::Yes } else { Pays::No } //! )] //! fn dispatching(origin, a: u32, b: u64) { unimplemented!() } //! } @@ -508,75 +500,6 @@ impl PaysFee for (Weight, Pays) { } } -/// A struct to represent a weight which is a function of the input arguments. The given items have -/// the following types: -/// -/// - `WD`: a raw `Weight` value or a closure that returns a `Weight` with the same argument list as -/// the dispatched, wrapped in a tuple. -/// - `CD`: a raw `DispatchClass` value or a closure that returns a `DispatchClass` with the same -/// argument list as the dispatched, wrapped in a tuple. -/// - `PF`: a `Pays` variant for whether this dispatch pays fee or not or a closure that returns a -/// `Pays` variant with the same argument list as the dispatched, wrapped in a tuple. -#[deprecated = "Function arguments are available directly inside the annotation now."] -pub struct FunctionOf(pub WD, pub CD, pub PF); - -// `WeighData` as a raw value -#[allow(deprecated)] -impl WeighData for FunctionOf { - fn weigh_data(&self, _: Args) -> Weight { - self.0 - } -} - -// `WeighData` as a closure -#[allow(deprecated)] -impl WeighData for FunctionOf -where - WD: Fn(Args) -> Weight, -{ - fn weigh_data(&self, args: Args) -> Weight { - (self.0)(args) - } -} - -// `ClassifyDispatch` as a raw value -#[allow(deprecated)] -impl ClassifyDispatch for FunctionOf { - fn classify_dispatch(&self, _: Args) -> DispatchClass { - self.1 - } -} - -// `ClassifyDispatch` as a raw value -#[allow(deprecated)] -impl ClassifyDispatch for FunctionOf -where - CD: Fn(Args) -> DispatchClass, -{ - fn classify_dispatch(&self, args: Args) -> DispatchClass { - (self.1)(args) - } -} - -// `PaysFee` as a raw value -#[allow(deprecated)] -impl PaysFee for FunctionOf { - fn pays_fee(&self, _: Args) -> Pays { - self.2 - } -} - -// `PaysFee` as a closure -#[allow(deprecated)] -impl PaysFee for FunctionOf -where - PF: Fn(Args) -> Pays, -{ - fn pays_fee(&self, args: Args) -> Pays { - (self.2)(args) - } -} - /// Implementation for unchecked extrinsic. impl GetDispatchInfo for UncheckedExtrinsic From 720093428b90363cb909162ef7c05e5bd51ae626 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 25 Jul 2022 17:48:01 +0200 Subject: [PATCH 066/135] contracts: Allow `ChainExtension::call()` to access `&mut self` (#11874) * Give chain extensions the ability to store some temporary values * Update frame/contracts/src/wasm/runtime.rs Co-authored-by: Hernando Castano * Rename func_id -> id * Replace `id` param by two functions on `env` Co-authored-by: Hernando Castano --- frame/contracts/fixtures/chain_extension.wat | 4 +- .../fixtures/chain_extension_temp_storage.wat | 85 ++++++++++ frame/contracts/src/chain_extension.rs | 41 +++-- frame/contracts/src/lib.rs | 2 +- frame/contracts/src/tests.rs | 150 +++++++++++++----- frame/contracts/src/wasm/runtime.rs | 28 +++- 6 files changed, 254 insertions(+), 56 deletions(-) create mode 100644 frame/contracts/fixtures/chain_extension_temp_storage.wat diff --git a/frame/contracts/fixtures/chain_extension.wat b/frame/contracts/fixtures/chain_extension.wat index 9b2534c540ab8..7cc7335052e90 100644 --- a/frame/contracts/fixtures/chain_extension.wat +++ b/frame/contracts/fixtures/chain_extension.wat @@ -31,14 +31,14 @@ ;; the chain extension passes through the input and returns it as output (call $seal_call_chain_extension - (i32.load (i32.const 4)) ;; func_id + (i32.load (i32.const 4)) ;; id (i32.const 4) ;; input_ptr (i32.load (i32.const 0)) ;; input_len (i32.const 16) ;; output_ptr (i32.const 12) ;; output_len_ptr ) - ;; the chain extension passes through the func_id + ;; the chain extension passes through the id (call $assert (i32.eq (i32.load (i32.const 4)))) (call $seal_return (i32.const 0) (i32.const 16) (i32.load (i32.const 12))) diff --git a/frame/contracts/fixtures/chain_extension_temp_storage.wat b/frame/contracts/fixtures/chain_extension_temp_storage.wat new file mode 100644 index 0000000000000..b481abb5bc7c9 --- /dev/null +++ b/frame/contracts/fixtures/chain_extension_temp_storage.wat @@ -0,0 +1,85 @@ +;; Call chain extension two times with the specified func_ids +;; It then calls itself once +(module + (import "seal0" "seal_call_chain_extension" + (func $seal_call_chain_extension (param i32 i32 i32 i32 i32) (result i32)) + ) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_address" (func $seal_address (param i32 i32))) + (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 16 16)) + + (func $assert (param i32) + (block $ok + (br_if $ok (get_local 0)) + (unreachable) + ) + ) + + ;; [0, 4) len of input buffer: 8 byte (func_ids) + 1byte (stop_recurse) + (data (i32.const 0) "\09") + + ;; [4, 16) buffer for input + + ;; [16, 48] buffer for self address + + ;; [48, 52] len of self address buffer + (data (i32.const 48) "\20") + + (func (export "deploy")) + + (func (export "call") + ;; input: (func_id1: i32, func_id2: i32, stop_recurse: i8) + (call $seal_input (i32.const 4) (i32.const 0)) + + (call $seal_call_chain_extension + (i32.load (i32.const 4)) ;; id + (i32.const 0) ;; input_ptr + (i32.const 0) ;; input_len + (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; output_len_ptr + ) + drop + + (call $seal_call_chain_extension + (i32.load (i32.const 8)) ;; _id + (i32.const 0) ;; input_ptr + (i32.const 0) ;; input_len + (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; output_len_ptr + ) + drop + + (if (i32.eqz (i32.load8_u (i32.const 12))) + (then + ;; stop recursion + (i32.store8 (i32.const 12) (i32.const 1)) + + ;; load own address into buffer + (call $seal_address (i32.const 16) (i32.const 48)) + + ;; call function 2 + 3 of chainext 3 next time + ;; (3 << 16) | 2 + ;; (3 << 16) | 3 + (i32.store (i32.const 4) (i32.const 196610)) + (i32.store (i32.const 8) (i32.const 196611)) + + ;; call self + (call $seal_call + (i32.const 8) ;; Set ALLOW_REENTRY + (i32.const 16) ;; Pointer to "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 512) ;; Pointer to the buffer with value to transfer + (i32.const 4) ;; Pointer to input data buffer address + (i32.load (i32.const 0)) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + + ;; check that call succeeded of call + (call $assert (i32.eqz)) + ) + (else) + ) + ) +) diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index 536d58c94f68f..23242a2a542c1 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -33,7 +33,7 @@ //! //! Often there is a need for having multiple chain extensions. This is often the case when //! some generally useful off-the-shelf extensions should be included. To have multiple chain -//! extensions they can be put into a tuple which is then passed to `[Config::ChainExtension]` like +//! extensions they can be put into a tuple which is then passed to [`Config::ChainExtension`] like //! this `type Extensions = (ExtensionA, ExtensionB)`. //! //! However, only extensions implementing [`RegisteredChainExtension`] can be put into a tuple. @@ -94,6 +94,12 @@ pub type Result = sp_std::result::Result; /// In order to create a custom chain extension this trait must be implemented and supplied /// to the pallet contracts configuration trait as the associated type of the same name. /// Consult the [module documentation](self) for a general explanation of chain extensions. +/// +/// # Lifetime +/// +/// The extension will be [`Default`] initialized at the beginning of each call +/// (**not** per call stack) and dropped afterwards. Hence any value held inside the extension +/// can be used as a per-call scratch buffer. pub trait ChainExtension { /// Call the chain extension logic. /// @@ -102,8 +108,6 @@ pub trait ChainExtension { /// imported wasm function. /// /// # Parameters - /// - `func_id`: The first argument to `seal_call_chain_extension`. Usually used to determine - /// which function to realize. /// - `env`: Access to the remaining arguments and the execution environment. /// /// # Return @@ -111,7 +115,7 @@ pub trait ChainExtension { /// In case of `Err` the contract execution is immediately suspended and the passed error /// is returned to the caller. Otherwise the value of [`RetVal`] determines the exit /// behaviour. - fn call(func_id: u32, env: Environment) -> Result + fn call(&mut self, env: Environment) -> Result where E: Ext, ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>; @@ -132,7 +136,7 @@ pub trait ChainExtension { /// /// An extension that implements this trait can be put in a tuple in order to have multiple /// extensions available. The tuple implementation routes requests based on the first two -/// most significant bytes of the `func_id` passed to `call`. +/// most significant bytes of the `id` passed to `call`. /// /// If this extensions is to be used by multiple runtimes consider /// [registering it](https://github.com/paritytech/chainextension-registry) to ensure that there @@ -150,15 +154,15 @@ pub trait RegisteredChainExtension: ChainExtension { #[impl_trait_for_tuples::impl_for_tuples(10)] #[tuple_types_custom_trait_bound(RegisteredChainExtension)] impl ChainExtension for Tuple { - fn call(func_id: u32, mut env: Environment) -> Result + fn call(&mut self, mut env: Environment) -> Result where E: Ext, ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { for_tuples!( #( - if (Tuple::ID == (func_id >> 16) as u16) && Tuple::enabled() { - return Tuple::call(func_id, env); + if (Tuple::ID == env.ext_id()) && Tuple::enabled() { + return Tuple.call(env); } )* ); @@ -206,6 +210,22 @@ impl<'a, 'b, E: Ext, S: state::State> Environment<'a, 'b, E, S> where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { + /// The function id within the `id` passed by a contract. + /// + /// It returns the two least significant bytes of the `id` passed by a contract as the other + /// two bytes represent the chain extension itself (the code which is calling this function). + pub fn func_id(&self) -> u16 { + (self.inner.id & 0x00FF) as u16 + } + + /// The chain extension id within the `id` passed by a contract. + /// + /// It returns the two most significant bytes of the `id` passed by a contract which represent + /// the chain extension itself (the code which is calling this function). + pub fn ext_id(&self) -> u16 { + (self.inner.id >> 16) as u16 + } + /// Charge the passed `amount` of weight from the overall limit. /// /// It returns `Ok` when there the remaining weight budget is larger than the passed @@ -251,13 +271,14 @@ impl<'a, 'b, E: Ext> Environment<'a, 'b, E, state::Init> { /// ever create this type. Chain extensions merely consume it. pub(crate) fn new( runtime: &'a mut Runtime<'b, E>, + id: u32, input_ptr: u32, input_len: u32, output_ptr: u32, output_len_ptr: u32, ) -> Self { Environment { - inner: Inner { runtime, input_ptr, input_len, output_ptr, output_len_ptr }, + inner: Inner { runtime, id, input_ptr, input_len, output_ptr, output_len_ptr }, phantom: PhantomData, } } @@ -406,6 +427,8 @@ struct Inner<'a, 'b, E: Ext> { /// The runtime contains all necessary functions to interact with the running contract. runtime: &'a mut Runtime<'b, E>, /// Verbatim argument passed to `seal_call_chain_extension`. + id: u32, + /// Verbatim argument passed to `seal_call_chain_extension`. input_ptr: u32, /// Verbatim argument passed to `seal_call_chain_extension`. input_len: u32, diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 60b30ffa25005..319bacaab7789 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -280,7 +280,7 @@ pub mod pallet { type WeightInfo: WeightInfo; /// Type that allows the runtime authors to add new host functions for a contract to call. - type ChainExtension: chain_extension::ChainExtension; + type ChainExtension: chain_extension::ChainExtension + Default; /// Cost schedule and limits. #[pallet::constant] diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 85a0e9977d2d7..0febfec929b6e 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -118,10 +118,17 @@ pub struct TestExtension { last_seen_inputs: (u32, u32, u32, u32), } +#[derive(Default)] pub struct RevertingExtension; +#[derive(Default)] pub struct DisabledExtension; +#[derive(Default)] +pub struct TempStorageExtension { + storage: u32, +} + impl TestExtension { fn disable() { TEST_EXTENSION.with(|e| e.borrow_mut().enabled = false) @@ -143,18 +150,20 @@ impl Default for TestExtension { } impl ChainExtension for TestExtension { - fn call(func_id: u32, env: Environment) -> ExtensionResult + fn call(&mut self, env: Environment) -> ExtensionResult where E: Ext, ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { + let func_id = env.func_id(); + let id = env.ext_id() as u32 | func_id as u32; match func_id { 0 => { let mut env = env.buf_in_buf_out(); let input = env.read(8)?; env.write(&input, false, None)?; TEST_EXTENSION.with(|e| e.borrow_mut().last_seen_buffer = input); - Ok(RetVal::Converging(func_id)) + Ok(RetVal::Converging(id)) }, 1 => { let env = env.only_in(); @@ -162,17 +171,17 @@ impl ChainExtension for TestExtension { e.borrow_mut().last_seen_inputs = (env.val0(), env.val1(), env.val2(), env.val3()) }); - Ok(RetVal::Converging(func_id)) + Ok(RetVal::Converging(id)) }, 2 => { let mut env = env.buf_in_buf_out(); let weight = env.read(5)?[4].into(); env.charge_weight(weight)?; - Ok(RetVal::Converging(func_id)) + Ok(RetVal::Converging(id)) }, 3 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), _ => { - panic!("Passed unknown func_id to test chain extension: {}", func_id); + panic!("Passed unknown id to test chain extension: {}", func_id); }, } } @@ -187,7 +196,7 @@ impl RegisteredChainExtension for TestExtension { } impl ChainExtension for RevertingExtension { - fn call(_func_id: u32, _env: Environment) -> ExtensionResult + fn call(&mut self, _env: Environment) -> ExtensionResult where E: Ext, ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, @@ -205,7 +214,7 @@ impl RegisteredChainExtension for RevertingExtension { } impl ChainExtension for DisabledExtension { - fn call(_func_id: u32, _env: Environment) -> ExtensionResult + fn call(&mut self, _env: Environment) -> ExtensionResult where E: Ext, ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, @@ -222,6 +231,37 @@ impl RegisteredChainExtension for DisabledExtension { const ID: u16 = 2; } +impl ChainExtension for TempStorageExtension { + fn call(&mut self, env: Environment) -> ExtensionResult + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, + { + let func_id = env.func_id(); + match func_id { + 0 => self.storage = 42, + 1 => assert_eq!(self.storage, 42, "Storage is preserved inside the same call."), + 2 => { + assert_eq!(self.storage, 0, "Storage is different for different calls."); + self.storage = 99; + }, + 3 => assert_eq!(self.storage, 99, "Storage is preserved inside the same call."), + _ => { + panic!("Passed unknown id to test chain extension: {}", func_id); + }, + } + Ok(RetVal::Converging(0)) + } + + fn enabled() -> bool { + TEST_EXTENSION.with(|e| e.borrow().enabled) + } +} + +impl RegisteredChainExtension for TempStorageExtension { + const ID: u16 = 3; +} + parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); @@ -325,7 +365,8 @@ impl Config for Test { type CallStack = [Frame; 31]; type WeightPrice = Self; type WeightInfo = (); - type ChainExtension = (TestExtension, DisabledExtension, RevertingExtension); + type ChainExtension = + (TestExtension, DisabledExtension, RevertingExtension, TempStorageExtension); type DeletionQueueDepth = ConstU32<1024>; type DeletionWeightLimit = ConstU64<500_000_000_000>; type Schedule = MySchedule; @@ -396,6 +437,29 @@ fn initialize_block(number: u64) { System::initialize(&number, &[0u8; 32].into(), &Default::default()); } +struct ExtensionInput<'a> { + extension_id: u16, + func_id: u16, + extra: &'a [u8], +} + +impl<'a> ExtensionInput<'a> { + fn to_vec(&self) -> Vec { + ((self.extension_id as u32) << 16 | (self.func_id as u32)) + .to_le_bytes() + .iter() + .chain(self.extra) + .cloned() + .collect() + } +} + +impl<'a> From> for Vec { + fn from(input: ExtensionInput) -> Vec { + input.to_vec() + } +} + // Perform a call to a plain account. // The actual transfer fails because we can only call contracts. // Then we check that at least the base costs where charged (no runtime gas costs.) @@ -1567,23 +1631,6 @@ fn disabled_chain_extension_errors_on_call() { #[test] fn chain_extension_works() { - struct Input<'a> { - extension_id: u16, - func_id: u16, - extra: &'a [u8], - } - - impl<'a> From> for Vec { - fn from(input: Input) -> Vec { - ((input.extension_id as u32) << 16 | (input.func_id as u32)) - .to_le_bytes() - .iter() - .chain(input.extra) - .cloned() - .collect() - } - } - let (code, hash) = compile_module::("chain_extension").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); @@ -1599,12 +1646,8 @@ fn chain_extension_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - // The contract takes a up to 2 byte buffer where the first byte passed is used as - // as func_id to the chain extension which behaves differently based on the - // func_id. - // 0 = read input buffer and pass it through as output - let input: Vec = Input { extension_id: 0, func_id: 0, extra: &[99] }.into(); + let input: Vec = ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); assert_eq!(TestExtension::last_seen_buffer(), input); @@ -1617,7 +1660,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - Input { extension_id: 0, func_id: 1, extra: &[] }.into(), + ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into(), false, ) .result @@ -1632,7 +1675,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - Input { extension_id: 0, func_id: 2, extra: &[0] }.into(), + ExtensionInput { extension_id: 0, func_id: 2, extra: &[0] }.into(), false, ); assert_ok!(result.result); @@ -1643,7 +1686,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - Input { extension_id: 0, func_id: 2, extra: &[42] }.into(), + ExtensionInput { extension_id: 0, func_id: 2, extra: &[42] }.into(), false, ); assert_ok!(result.result); @@ -1654,7 +1697,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - Input { extension_id: 0, func_id: 2, extra: &[95] }.into(), + ExtensionInput { extension_id: 0, func_id: 2, extra: &[95] }.into(), false, ); assert_ok!(result.result); @@ -1667,7 +1710,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - Input { extension_id: 0, func_id: 3, extra: &[] }.into(), + ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into(), false, ) .result @@ -1684,7 +1727,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - Input { extension_id: 1, func_id: 0, extra: &[] }.into(), + ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into(), false, ) .result @@ -1701,13 +1744,46 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - Input { extension_id: 2, func_id: 0, extra: &[] }.into(), + ExtensionInput { extension_id: 2, func_id: 0, extra: &[] }.into(), ), Error::::NoChainExtension, ); }); } +#[test] +fn chain_extension_temp_storage_works() { + let (code, hash) = compile_module::("chain_extension_temp_storage").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = ::Currency::minimum_balance(); + let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + min_balance * 100, + GAS_LIMIT, + None, + code, + vec![], + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + + // Call func 0 and func 1 back to back. + let stop_recursion = 0u8; + let mut input: Vec = ExtensionInput { extension_id: 3, func_id: 0, extra: &[] }.into(); + input.extend_from_slice( + ExtensionInput { extension_id: 3, func_id: 1, extra: &[stop_recursion] } + .to_vec() + .as_ref(), + ); + + assert_ok!( + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false) + .result + ); + }) +} + #[test] fn lazy_removal_works() { let (code, hash) = compile_module::("self_destruct").unwrap(); diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index c1757ba06412a..3d5e62f2c1333 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -65,7 +65,7 @@ impl KeyType { /// This enum can be extended in the future: New codes can be added but existing codes /// will not be changed or removed. This means that any contract **must not** exhaustively /// match return codes. Instead, contracts should prepare for unknown variants and deal with -/// those errors gracefuly in order to be forward compatible. +/// those errors gracefully in order to be forward compatible. #[repr(u32)] pub enum ReturnCode { /// API call successful. @@ -101,8 +101,9 @@ pub enum ReturnCode { } impl ConvertibleToWasm for ReturnCode { - type NativeType = Self; const VALUE_TYPE: ValueType = ValueType::I32; + type NativeType = Self; + fn to_typed_value(self) -> sp_sandbox::Value { sp_sandbox::Value::I32(self as i32) } @@ -439,6 +440,7 @@ pub struct Runtime<'a, E: Ext + 'a> { input_data: Option>, memory: sp_sandbox::default_executor::Memory, trap_reason: Option, + chain_extension: Option::ChainExtension>>, } impl<'a, E> Runtime<'a, E> @@ -452,7 +454,13 @@ where input_data: Vec, memory: sp_sandbox::default_executor::Memory, ) -> Self { - Runtime { ext, input_data: Some(input_data), memory, trap_reason: None } + Runtime { + ext, + input_data: Some(input_data), + memory, + trap_reason: None, + chain_extension: Some(Box::new(Default::default())), + } } /// Converts the sandbox result and the runtime state into the execution outcome. @@ -2006,7 +2014,7 @@ define_env!(Env, , // module error. [seal0] seal_call_chain_extension( ctx, - func_id: u32, + id: u32, input_ptr: u32, input_len: u32, output_ptr: u32, @@ -2016,14 +2024,20 @@ define_env!(Env, , if !::ChainExtension::enabled() { return Err(Error::::NoChainExtension.into()); } - let env = Environment::new(ctx, input_ptr, input_len, output_ptr, output_len_ptr); - match ::ChainExtension::call(func_id, env)? { + let mut chain_extension = ctx.chain_extension.take().expect( + "Constructor initializes with `Some`. This is the only place where it is set to `None`.\ + It is always reset to `Some` afterwards. qed" + ); + let env = Environment::new(ctx, id, input_ptr, input_len, output_ptr, output_len_ptr); + let ret = match chain_extension.call(env)? { RetVal::Converging(val) => Ok(val), RetVal::Diverging{flags, data} => Err(TrapReason::Return(ReturnData { flags: flags.bits(), data, })), - } + }; + ctx.chain_extension = Some(chain_extension); + ret }, // Emit a custom debug message. From 9b139a5117081dc1d70890c02e5a09a46bc95fb6 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Tue, 26 Jul 2022 00:52:11 +0800 Subject: [PATCH 067/135] Pruned duplicated dependencies (#11900) * Update comfy-table v5.0.1 => v6.0.0 Signed-off-by: koushiro * Update strum v0.23.0 => v0.24.1 Signed-off-by: koushiro * Update h2 v0.3.9 => v0.3.13 Signed-off-by: koushiro * Update file-per-thread-logger v0.1.4 => v0.1.5 Signed-off-by: koushiro * Update mio v0.8.0 => v0.8.4 Signed-off-by: koushiro * revert twox-hash Signed-off-by: koushiro * Update secp256k1 v0.21.2 => v0.24.0 Signed-off-by: koushiro --- Cargo.lock | 324 +++--------------- client/beefy/Cargo.toml | 2 +- .../election-provider-multi-phase/Cargo.toml | 9 +- primitives/core/Cargo.toml | 2 +- primitives/core/src/ecdsa.rs | 2 +- primitives/io/Cargo.toml | 2 +- primitives/keyring/Cargo.toml | 2 +- utils/frame/benchmarking-cli/Cargo.toml | 2 +- utils/wasm-builder/Cargo.toml | 2 +- 9 files changed, 65 insertions(+), 282 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3d5bfe5e51201..8c19c80bde1a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -230,7 +230,7 @@ dependencies = [ "parking", "polling", "slab", - "socket2 0.4.4", + "socket2", "waker-fn", "winapi", ] @@ -310,7 +310,7 @@ dependencies = [ "futures-io", "futures-util", "pin-utils", - "socket2 0.4.4", + "socket2", "trust-dns-resolver", ] @@ -382,12 +382,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" - [[package]] name = "autocfg" version = "1.0.1" @@ -512,7 +506,7 @@ name = "beefy-merkle-tree" version = "4.0.0-dev" dependencies = [ "beefy-primitives", - "env_logger 0.9.0", + "env_logger", "hex", "hex-literal", "log", @@ -1005,7 +999,7 @@ version = "3.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro-error", "proc-macro2", "quote", @@ -1021,15 +1015,6 @@ dependencies = [ "os_str_bytes", ] -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags", -] - [[package]] name = "cmake" version = "0.1.46" @@ -1041,9 +1026,9 @@ dependencies = [ [[package]] name = "comfy-table" -version = "5.0.1" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b103d85ca6e209388771bfb7aa6b68a7aeec4afbf6f0a0264bfbf50360e5212e" +checksum = "121d8a5b0346092c18a4b2fd6f620d7a06f0eb7ac0a45860939a0884bc579c56" dependencies = [ "strum", "strum_macros", @@ -1831,7 +1816,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro2", "quote", "syn", @@ -1898,19 +1883,6 @@ dependencies = [ "syn", ] -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime 1.3.0", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.9.0" @@ -1918,7 +1890,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" dependencies = [ "atty", - "humantime 2.1.0", + "humantime", "log", "regex", "termcolor", @@ -2008,11 +1980,11 @@ dependencies = [ [[package]] name = "file-per-thread-logger" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" +checksum = "21e16290574b39ee41c71aeb90ae960c504ebaf1e2a1c87bd52aa56ed6e1a02f" dependencies = [ - "env_logger 0.7.1", + "env_logger", "log", ] @@ -2445,12 +2417,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - [[package]] name = "funty" version = "2.0.0" @@ -2751,9 +2717,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.9" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f072413d126e57991455e0a922b31e4c8ba7c2ffbebf6b78b4f8521397d65cd" +checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" dependencies = [ "bytes", "fnv", @@ -2764,7 +2730,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.6.7", + "tokio-util", "tracing", ] @@ -2821,15 +2787,6 @@ dependencies = [ "ahash", ] -[[package]] -name = "heck" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "heck" version = "0.4.0" @@ -2950,15 +2907,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error 1.2.3", -] - [[package]] name = "humantime" version = "2.1.0" @@ -2982,7 +2930,7 @@ dependencies = [ "httpdate", "itoa 0.4.8", "pin-project-lite 0.2.6", - "socket2 0.4.4", + "socket2", "tokio", "tower-service", "tracing", @@ -3084,7 +3032,7 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ - "autocfg 1.0.1", + "autocfg", "hashbrown 0.11.2", "serde", ] @@ -3131,7 +3079,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" dependencies = [ - "socket2 0.4.4", + "socket2", "widestring", "winapi", "winreg", @@ -3213,7 +3161,7 @@ dependencies = [ "thiserror", "tokio", "tokio-rustls", - "tokio-util 0.7.1", + "tokio-util", "tracing", "webpki-roots", ] @@ -3316,7 +3264,7 @@ dependencies = [ "soketto", "tokio", "tokio-stream", - "tokio-util 0.7.1", + "tokio-util", "tracing", ] @@ -3695,7 +3643,7 @@ dependencies = [ "log", "rand 0.8.4", "smallvec", - "socket2 0.4.4", + "socket2", "void", ] @@ -3913,7 +3861,7 @@ dependencies = [ "libc", "libp2p-core", "log", - "socket2 0.4.4", + "socket2", ] [[package]] @@ -4276,7 +4224,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" dependencies = [ - "autocfg 1.0.1", + "autocfg", ] [[package]] @@ -4321,30 +4269,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", - "autocfg 1.0.1", + "autocfg", ] [[package]] name = "mio" -version = "0.8.0" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba272f85fa0b41fc91872be579b3bbe0f56b792aa361a380eb669469f68dafb2" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", - "miow", - "ntapi", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2 0.3.19", - "winapi", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.36.1", ] [[package]] @@ -5015,22 +4952,13 @@ dependencies = [ "version_check", ] -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi", -] - [[package]] name = "num-bigint" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "autocfg 1.0.1", + "autocfg", "num-integer", "num-traits", ] @@ -5060,7 +4988,7 @@ version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ - "autocfg 1.0.1", + "autocfg", "num-traits", ] @@ -5070,7 +4998,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ - "autocfg 1.0.1", + "autocfg", "num-bigint", "num-integer", "num-traits", @@ -5082,7 +5010,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" dependencies = [ - "autocfg 1.0.1", + "autocfg", "num-integer", "num-traits", ] @@ -5093,7 +5021,7 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ - "autocfg 1.0.1", + "autocfg", "libm", ] @@ -5500,7 +5428,7 @@ version = "4.0.0-dev" dependencies = [ "assert_matches", "bitflags", - "env_logger 0.9.0", + "env_logger", "frame-benchmarking", "frame-support", "frame-system", @@ -5866,7 +5794,7 @@ name = "pallet-mmr" version = "4.0.0-dev" dependencies = [ "ckb-merkle-mountain-range", - "env_logger 0.9.0", + "env_logger", "frame-benchmarking", "frame-support", "frame-system", @@ -7082,7 +7010,7 @@ dependencies = [ "bytes", "cfg-if 1.0.0", "cmake", - "heck 0.4.0", + "heck", "itertools", "lazy_static", "log", @@ -7207,25 +7135,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.7", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg 0.1.2", - "rand_xorshift", - "winapi", -] - [[package]] name = "rand" version = "0.7.3" @@ -7252,16 +7161,6 @@ dependencies = [ "rand_hc 0.3.0", ] -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.3.1", -] - [[package]] name = "rand_chacha" version = "0.2.2" @@ -7282,21 +7181,6 @@ dependencies = [ "rand_core 0.6.2", ] -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - [[package]] name = "rand_core" version = "0.5.1" @@ -7325,15 +7209,6 @@ dependencies = [ "rand 0.8.4", ] -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "rand_hc" version = "0.2.0" @@ -7352,50 +7227,6 @@ dependencies = [ "rand_core 0.6.2", ] -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.4.2", -] - [[package]] name = "rand_pcg" version = "0.2.1" @@ -7414,15 +7245,6 @@ dependencies = [ "rand_core 0.6.2", ] -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "rawpointer" version = "0.2.1" @@ -7435,7 +7257,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" dependencies = [ - "autocfg 1.0.1", + "autocfg", "crossbeam-deque", "either", "rayon-core", @@ -7454,15 +7276,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "redox_syscall" version = "0.2.10" @@ -7580,7 +7393,7 @@ dependencies = [ name = "remote-externalities" version = "0.10.0-dev" dependencies = [ - "env_logger 0.9.0", + "env_logger", "frame-support", "jsonrpsee", "log", @@ -8317,7 +8130,7 @@ name = "sc-executor" version = "0.10.0-dev" dependencies = [ "criterion", - "env_logger 0.9.0", + "env_logger", "hex-literal", "lazy_static", "lru", @@ -8747,7 +8560,7 @@ name = "sc-rpc" version = "4.0.0-dev" dependencies = [ "assert_matches", - "env_logger 0.9.0", + "env_logger", "futures", "hash-db", "jsonrpsee", @@ -9189,19 +9002,18 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.21.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7883017d5b21f011ef8040ea9c6c7ac90834c0df26a69e4c0b06276151f125" +checksum = "b7649a0b3ffb32636e60c7ce0d70511eda9c52c658cd0634e194d5a19943aeff" dependencies = [ - "rand 0.6.5", "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.4.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" +checksum = "7058dc8eaf3f2810d7828680320acda0b25a288f6d288e19278e249bbf74226b" dependencies = [ "cc", ] @@ -9510,17 +9322,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.4.4" @@ -10427,20 +10228,20 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strum" -version = "0.23.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cae14b91c7d11c9a851d3fbc80a963198998c2a64eec840477fa92d8ce9b70bb" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ "strum_macros", ] [[package]] name = "strum_macros" -version = "0.23.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb0dc7ee9c15cea6199cde9a127fa16a4c5819af85395457ad72d68edc85a38" +checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b" dependencies = [ - "heck 0.3.2", + "heck", "proc-macro2", "quote", "rustversion", @@ -10947,7 +10748,7 @@ dependencies = [ "parking_lot 0.12.0", "pin-project-lite 0.2.6", "signal-hook-registry", - "socket2 0.4.4", + "socket2", "tokio-macros", "winapi", ] @@ -10998,20 +10799,6 @@ dependencies = [ "tokio-stream", ] -[[package]] -name = "tokio-util" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.6", - "tokio", -] - [[package]] name = "tokio-util" version = "0.7.1" @@ -11024,6 +10811,7 @@ dependencies = [ "futures-sink", "pin-project-lite 0.2.6", "tokio", + "tracing", ] [[package]] @@ -11347,12 +11135,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" - [[package]] name = "unicode-width" version = "0.1.8" @@ -11493,6 +11275,12 @@ version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "wasm-bindgen" version = "0.2.77" diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index bd25496f2dfea..b910f7ce307a5 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -39,7 +39,7 @@ sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] serde = "1.0.136" -strum = { version = "0.23", features = ["derive"] } +strum = { version = "0.24.1", features = ["derive"] } tempfile = "3.1.0" tokio = "1.17.0" sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index 6c8e537a72c64..bbb0adf02e366 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -36,13 +36,8 @@ frame-election-provider-support = { version = "4.0.0-dev", default-features = fa # Optional imports for benchmarking frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } pallet-election-provider-support-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../election-provider-support/benchmarking", optional = true } -rand = { version = "0.7.3", default-features = false, optional = true, features = [ - "alloc", - "small_rng", -] } -strum = { optional = true, default-features = false, version = "0.23.0", features = [ - "derive", -] } +rand = { version = "0.7.3", default-features = false, features = ["alloc", "small_rng"], optional = true } +strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } [dev-dependencies] parking_lot = "0.12.0" diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index cdc12c677e4f3..fced678293140 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -56,7 +56,7 @@ schnorrkel = { version = "0.9.1", features = [ hex = { version = "0.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } -secp256k1 = { version = "0.21.2", default-features = false, features = ["recovery", "alloc"], optional = true } +secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } ss58-registry = { version = "1.18.0", default-features = false } sp-core-hashing = { version = "4.0.0", path = "./hashing", default-features = false, optional = true } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" } diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index 485e39d3a71db..d56f65fd289e7 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -499,7 +499,7 @@ impl TraitPair for Pair { impl Pair { /// Get the seed for this key. pub fn seed(&self) -> Seed { - self.secret.serialize_secret() + self.secret.secret_bytes() } /// Exactly as `from_string` except that if no matches are found then, the the first 32 diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 03cd8535665ac..77ba4be677f31 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -30,7 +30,7 @@ sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" log = { version = "0.4.17", optional = true } futures = { version = "0.3.21", features = ["thread-pool"], optional = true } parking_lot = { version = "0.12.0", optional = true } -secp256k1 = { version = "0.21.2", features = ["recovery", "global-context"], optional = true } +secp256k1 = { version = "0.24.0", features = ["recovery", "global-context"], optional = true } tracing = { version = "0.1.29", default-features = false } tracing-core = { version = "0.1.28", default-features = false} diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index 8f1adb6cf81f3..982abfd09a553 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -15,6 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] lazy_static = "1.4.0" -strum = { version = "0.23.0", features = ["derive"] } +strum = { version = "0.24.1", features = ["derive"] } sp-core = { version = "6.0.0", path = "../core" } sp-runtime = { version = "6.0.0", path = "../runtime" } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index ea26bf0c9261c..31b8f1332a653 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] chrono = "0.4" clap = { version = "3.1.18", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } -comfy-table = { version = "5.0.1", default-features = false } +comfy-table = { version = "6.0.0", default-features = false } handlebars = "4.2.2" hash-db = "0.15.2" hex = "0.4.3" diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 8f887e45ec176..ac0ba5dbdb85a 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] ansi_term = "0.12.1" build-helper = "0.1.1" cargo_metadata = "0.14.2" -strum = { version = "0.23.0", features = ["derive"] } +strum = { version = "0.24.1", features = ["derive"] } tempfile = "3.1.0" toml = "0.5.4" walkdir = "2.3.2" From 134985fdf880e16de6ba5e8bf2bf9a7d8b768fb3 Mon Sep 17 00:00:00 2001 From: Sacha Lansky Date: Mon, 25 Jul 2022 23:09:50 +0100 Subject: [PATCH 068/135] Fix typos (#11914) --- frame/transaction-payment/src/lib.rs | 2 +- test-utils/runtime/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 0f5c0321130be..fe37acb214452 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -609,7 +609,7 @@ where /// and user-included tip. /// /// The priority is based on the amount of `tip` the user is willing to pay per unit of either - /// `weight` or `length`, depending which one is more limitting. For `Operational` extrinsics + /// `weight` or `length`, depending which one is more limiting. For `Operational` extrinsics /// we add a "virtual tip" to the calculations. /// /// The formula should simply be `tip / bounded_{weight|length}`, but since we are using diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index ea62f2ac84f3d..e5cfae49da56d 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -237,7 +237,7 @@ impl sp_runtime::traits::Dispatchable for Extrinsic { type Info = (); type PostInfo = (); fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { - panic!("This implemention should not be used for actual dispatch."); + panic!("This implementation should not be used for actual dispatch."); } } From 5709dfa7095659d398323fb2c9515f66b2617b17 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Tue, 26 Jul 2022 11:06:48 +0200 Subject: [PATCH 069/135] Rpc for pending rewards (#11831) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * rpc pending rewards * commit * remove unused imports * fix * fix * fmt * fix * fmt * fix * docs * docs & formatting * better formatting * temporary fix * error handling * fix? * fmt * use to_string * fmt * fixed error handling * fix * rpc added to client * Update Cargo.toml * Update Cargo.toml * fix wrong reward counter * expose function * move implementation * docs * docs * docs * Update lib.rs * Update lib.rs * unexpose functions * unused dependency * update Cargo.lock * Update frame/nomination-pools/src/lib.rs * Update lib.rs * Update lib.rs * Update frame/nomination-pools/rpc/runtime-api/src/lib.rs Co-authored-by: Bastian Köcher * remove rpc * remove rpc directory * final fix Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Bastian Köcher --- Cargo.lock | 10 ++++++ Cargo.toml | 1 + bin/node/runtime/Cargo.toml | 2 ++ bin/node/runtime/src/lib.rs | 6 ++++ frame/nomination-pools/runtime-api/Cargo.toml | 26 +++++++++++++++ frame/nomination-pools/runtime-api/README.md | 3 ++ frame/nomination-pools/runtime-api/src/lib.rs | 33 +++++++++++++++++++ frame/nomination-pools/src/lib.rs | 16 ++++++++- 8 files changed, 96 insertions(+), 1 deletion(-) create mode 100644 frame/nomination-pools/runtime-api/Cargo.toml create mode 100644 frame/nomination-pools/runtime-api/README.md create mode 100644 frame/nomination-pools/runtime-api/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 8c19c80bde1a9..50603c6235a23 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4759,6 +4759,7 @@ dependencies = [ "pallet-multisig", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", + "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-preimage", @@ -5911,6 +5912,15 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-nomination-pools-runtime-api" +version = "1.0.0-dev" +dependencies = [ + "parity-scale-codec", + "sp-api", + "sp-std", +] + [[package]] name = "pallet-nomination-pools-test-staking" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index 9909e6f893877..1f22343c002a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,6 +116,7 @@ members = [ "frame/nomination-pools", "frame/nomination-pools/benchmarking", "frame/nomination-pools/test-staking", + "frame/nomination-pools/runtime-api", "frame/randomness-collective-flip", "frame/ranked-collective", "frame/recovery", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index ca971f29e93c9..d3138ce51de9c 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -79,6 +79,7 @@ pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../.. pallet-multisig = { version = "4.0.0-dev", default-features = false, path = "../../../frame/multisig" } pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../../../frame/nomination-pools"} pallet-nomination-pools-benchmarking = { version = "1.0.0", default-features = false, optional = true, path = "../../../frame/nomination-pools/benchmarking" } +pallet-nomination-pools-runtime-api = { version = "1.0.0-dev", default-features = false, path = "../../../frame/nomination-pools/runtime-api" } pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../../frame/offences" } pallet-offences-benchmarking = { version = "4.0.0-dev", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } pallet-preimage = { version = "4.0.0-dev", default-features = false, path = "../../../frame/preimage" } @@ -145,6 +146,7 @@ std = [ "pallet-mmr/std", "pallet-multisig/std", "pallet-nomination-pools/std", + "pallet-nomination-pools-runtime-api/std", "pallet-identity/std", "pallet-scheduler/std", "node-primitives/std", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index b2efcb196787d..037211155cf0e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1835,6 +1835,12 @@ impl_runtime_apis! { } } + impl pallet_nomination_pools_runtime_api::NominationPoolsApi for Runtime { + fn pending_rewards(member_account: AccountId) -> Balance { + NominationPools::pending_rewards(member_account) + } + } + impl sp_consensus_babe::BabeApi for Runtime { fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration { // The choice of `c` parameter (where `1 - c` represents the diff --git a/frame/nomination-pools/runtime-api/Cargo.toml b/frame/nomination-pools/runtime-api/Cargo.toml new file mode 100644 index 0000000000000..dde925c62fe0d --- /dev/null +++ b/frame/nomination-pools/runtime-api/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "pallet-nomination-pools-runtime-api" +version = "1.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Runtime API for nomination-pools FRAME pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } + +[features] +default = ["std"] +std = [ + "codec/std", + "sp-api/std", + "sp-std/std", +] diff --git a/frame/nomination-pools/runtime-api/README.md b/frame/nomination-pools/runtime-api/README.md new file mode 100644 index 0000000000000..af90b31733b0b --- /dev/null +++ b/frame/nomination-pools/runtime-api/README.md @@ -0,0 +1,3 @@ +Runtime API definition for nomination-pools pallet. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/nomination-pools/runtime-api/src/lib.rs b/frame/nomination-pools/runtime-api/src/lib.rs new file mode 100644 index 0000000000000..aa3ca57ca5b8b --- /dev/null +++ b/frame/nomination-pools/runtime-api/src/lib.rs @@ -0,0 +1,33 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Runtime API definition for nomination-pools pallet. +//! Currently supports only one rpc endpoint. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::Codec; + +sp_api::decl_runtime_apis! { + /// Runtime api for accessing information about nomination pools. + pub trait NominationPoolsApi + where AccountId: Codec, Balance: Codec + { + /// Returns the pending rewards for the member that the AccountId was given for. + fn pending_rewards(member: AccountId) -> Balance; + } +} diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 203c13f2f3496..09f1746b8e5ba 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -939,7 +939,7 @@ pub struct RewardPool { impl RewardPool { /// Getter for [`RewardPool::last_recorded_reward_counter`]. - fn last_recorded_reward_counter(&self) -> T::RewardCounter { + pub(crate) fn last_recorded_reward_counter(&self) -> T::RewardCounter { self.last_recorded_reward_counter } @@ -2147,6 +2147,20 @@ pub mod pallet { } impl Pallet { + /// Returns the pending rewards for the specified `member_account`. + /// + /// In the case of error the function returns balance of zero. + pub fn pending_rewards(member_account: T::AccountId) -> BalanceOf { + if let Some(pool_member) = PoolMembers::::get(member_account) { + if let Some(reward_pool) = RewardPools::::get(pool_member.pool_id) { + return pool_member + .pending_rewards(reward_pool.last_recorded_reward_counter()) + .unwrap_or_default() + } + } + BalanceOf::::default() + } + /// The amount of bond that MUST REMAIN IN BONDED in ALL POOLS. /// /// It is the responsibility of the depositor to put these funds into the pool initially. Upon From 531d7c3a5b26f5849aa6e23518c45a56d67faccb Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 26 Jul 2022 11:37:54 +0200 Subject: [PATCH 070/135] [ci] remove cargo-check-nixos job (#11873) * [ci] remove cargo-check-nixos job * remove shell.nix --- scripts/ci/gitlab/pipeline/test.yml | 24 ++++-------------------- shell.nix | 27 --------------------------- 2 files changed, 4 insertions(+), 47 deletions(-) delete mode 100644 shell.nix diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index d900d425ad494..b2daf5bada24f 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -51,26 +51,6 @@ cargo-clippy: - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo +nightly clippy --all-targets - rusty-cachier cache upload -cargo-check-nixos: - stage: test - # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs - needs: - - job: cargo-clippy - artifacts: false - extends: - - .docker-env - - .test-refs - before_script: [] - # Don't use CI_IMAGE here because it breaks nightly checks of paritytech/ci-linux image - image: nixos/nix - variables: - SNAP: "DUMMY" - WS_API: "DUMMY" - script: - - nix-channel --update - - nix-shell shell.nix - - nix-shell --run "cargo check --workspace --all-targets --all-features" - cargo-check-benches: stage: test variables: @@ -337,6 +317,8 @@ test-linux-stable-int: time cargo test -p node-cli --release --verbose --locked -- --ignored - rusty-cachier cache upload +# more information about this job can be found here: +# https://github.com/paritytech/substrate/pull/6916 check-tracing: stage: test # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs @@ -355,6 +337,8 @@ check-tracing: - time cargo +nightly test --manifest-path ./primitives/tracing/Cargo.toml --no-default-features --features=with-tracing - rusty-cachier cache upload +# more information about this job can be found here: +# https://github.com/paritytech/substrate/pull/3778 test-full-crypto-feature: stage: test # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs diff --git a/shell.nix b/shell.nix deleted file mode 100644 index c318995605a38..0000000000000 --- a/shell.nix +++ /dev/null @@ -1,27 +0,0 @@ -let - mozillaOverlay = - import (builtins.fetchGit { - url = "https://github.com/mozilla/nixpkgs-mozilla.git"; - rev = "15b7a05f20aab51c4ffbefddb1b448e862dccb7d"; - }); - nixpkgs = import { overlays = [ mozillaOverlay ]; }; - rust-nightly = with nixpkgs; ((rustChannelOf { date = "2022-04-20"; channel = "nightly"; }).rust.override { - extensions = [ "rust-src" ]; - targets = [ "wasm32-unknown-unknown" ]; - }); -in -with nixpkgs; pkgs.mkShell { - buildInputs = [ - clang - openssl.dev - pkg-config - rust-nightly - ] ++ lib.optionals stdenv.isDarwin [ - darwin.apple_sdk.frameworks.Security - ]; - - RUST_SRC_PATH = "${rust-nightly}/lib/rustlib/src/rust/src"; - LIBCLANG_PATH = "${llvmPackages.libclang.lib}/lib"; - PROTOC = "${protobuf}/bin/protoc"; - ROCKSDB_LIB_DIR = "${rocksdb}/lib"; -} From 9428b0ea492d405f320753fcd93bc59ebb3bf33b Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 26 Jul 2022 14:37:05 +0200 Subject: [PATCH 071/135] Prepare for rust 1.62.1 (#11903) * Update UI test output for rust 1.62.1 * switch ci to staging image to check that everything works * fix artifacts node-bench-regression-guard * Imeplement `scale_info::TypeInfo` manually to silence aggressive rust warning * Fix more clippy lints * Make clippy happy by relying on auto-deref were possible * Add tracking issue to the comments * pin ci image Co-authored-by: alvicsam --- .gitlab-ci.yml | 3 +- bin/node/bench/src/simple_trie.rs | 2 +- bin/node/cli/benches/transaction_pool.rs | 8 +-- client/beefy/src/tests.rs | 14 ++--- client/beefy/src/worker.rs | 4 +- client/cli/src/commands/run_cmd.rs | 2 +- client/consensus/common/src/block_import.rs | 4 +- client/db/src/lib.rs | 2 +- .../finality-grandpa/src/communication/mod.rs | 6 +- client/finality-grandpa/src/import.rs | 2 +- client/finality-grandpa/src/observer.rs | 2 +- client/rpc/src/offchain/mod.rs | 4 +- client/rpc/src/state/state_full.rs | 2 +- client/service/src/client/client.rs | 4 +- client/tracing/src/logging/stderr_writer.rs | 2 +- client/transaction-pool/tests/pool.rs | 14 ++--- frame/contracts/src/exec.rs | 2 +- frame/executive/src/lib.rs | 4 +- frame/offences/src/mock.rs | 2 +- .../procedural/src/pallet/parse/call.rs | 2 +- .../no_std_genesis_config.stderr | 13 ++++ .../undefined_event_part.stderr | 26 ++++++++ .../undefined_genesis_config_part.stderr | 13 ++++ .../undefined_origin_part.stderr | 26 ++++++++ ...ed_keyword_two_times_integrity_test.stderr | 14 ++--- frame/support/test/tests/decl_storage.rs | 2 +- .../derive_no_bound_ui/partial_eq.stderr | 4 +- .../call_argument_invalid_bound.stderr | 5 -- .../call_argument_invalid_bound_2.stderr | 5 -- .../call_argument_invalid_bound_3.stderr | 4 ++ .../error_does_not_derive_pallet_error.stderr | 10 ++++ .../pallet_ui/event_field_not_member.stderr | 5 -- .../genesis_default_not_satisfied.stderr | 4 ++ ...age_ensure_span_are_ok_on_wrong_gen.stderr | 60 +++++++++++++++++++ ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 60 +++++++++++++++++++ .../pallet_ui/storage_info_unsatisfied.stderr | 10 ++++ .../storage_info_unsatisfied_nmap.stderr | 10 ++++ .../api/proc-macro/src/impl_runtime_apis.rs | 2 +- primitives/runtime/src/bounded/bounded_vec.rs | 40 ++++++++++++- primitives/runtime/src/curve.rs | 32 +++++++++- primitives/runtime/src/traits.rs | 2 +- primitives/trie/src/lib.rs | 12 ++-- .../proc-macro/src/decl_runtime_version.rs | 2 +- rustfmt.toml | 1 + utils/frame/remote-externalities/src/lib.rs | 2 +- 45 files changed, 365 insertions(+), 84 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9608d88e9554d..d1de666009bf1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -47,7 +47,8 @@ variables: &default-vars CARGO_INCREMENTAL: 0 DOCKER_OS: "debian:stretch" ARCH: "x86_64" - CI_IMAGE: "paritytech/ci-linux:production" + # change to production when this image is published + CI_IMAGE: "paritytech/ci-linux:staging@sha256:2c90b67f1452ed2d7236c2cd13f6224053a833d521b3630650b679f00874e0a9" RUSTY_CACHIER_SINGLE_BRANCH: master RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true" diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index c59389570e534..aa9c96a1cbd3f 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -33,7 +33,7 @@ pub struct SimpleTrie<'a> { impl<'a> AsHashDB for SimpleTrie<'a> { fn as_hash_db(&self) -> &dyn hash_db::HashDB { - &*self + self } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index 5b96355b9ca70..d88c3a65b6138 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -243,25 +243,25 @@ fn transaction_pool_benchmarks(c: &mut Criterion) { move |b| { b.iter_batched( || { - let prepare_extrinsics = create_account_extrinsics(&*node.client, &accounts); + let prepare_extrinsics = create_account_extrinsics(&node.client, &accounts); runtime.block_on(future::join_all(prepare_extrinsics.into_iter().map(|tx| { submit_tx_and_wait_for_inclusion( &node.transaction_pool, tx, - &*node.client, + &node.client, true, ) }))); - create_benchmark_extrinsics(&*node.client, &accounts, extrinsics_per_account) + create_benchmark_extrinsics(&node.client, &accounts, extrinsics_per_account) }, |extrinsics| { runtime.block_on(future::join_all(extrinsics.into_iter().map(|tx| { submit_tx_and_wait_for_inclusion( &node.transaction_pool, tx, - &*node.client, + &node.client, false, ) }))); diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 8090c425e71db..78e697a6ada81 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -467,7 +467,7 @@ fn finalize_block_and_wait_for_beefy( finalize_targets: &[u64], expected_beefy: &[u64], ) { - let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); for block in finalize_targets { let finalize = BlockId::number(*block); @@ -555,7 +555,7 @@ fn lagging_validators() { // Alice finalizes #25, Bob lags behind let finalize = BlockId::number(25); - let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); @@ -563,7 +563,7 @@ fn lagging_validators() { streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); // Bob catches up and also finalizes #25 - let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); @@ -577,7 +577,7 @@ fn lagging_validators() { // validator set). // Alice finalizes session-boundary mandatory block #60, Bob lags behind - let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); let finalize = BlockId::number(60); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY @@ -586,7 +586,7 @@ fn lagging_validators() { streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) - let (best_blocks, signed_commitments) = get_beefy_streams(&mut *net.lock(), peers); + let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // verify beefy skips intermediary votes, and successfully finalizes mandatory block #40 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); @@ -629,7 +629,7 @@ fn correct_beefy_payload() { finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[1, 9]); let (best_blocks, signed_commitments) = - get_beefy_streams(&mut *net.lock(), &[BeefyKeyring::Alice]); + get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); // now 2 good validators and 1 bad one are voting net.lock() @@ -658,7 +658,7 @@ fn correct_beefy_payload() { // 3rd good validator catches up and votes as well let (best_blocks, signed_commitments) = - get_beefy_streams(&mut *net.lock(), &[BeefyKeyring::Alice]); + get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); net.lock() .peer(2) .client() diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 735dea0170a62..dccf7ba7504dd 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -439,7 +439,7 @@ where let commitment = Commitment { payload, block_number: target_number, validator_set_id }; let encoded_commitment = commitment.encode(); - let signature = match self.key_store.sign(&authority_id, &*encoded_commitment) { + let signature = match self.key_store.sign(&authority_id, &encoded_commitment) { Ok(sig) => sig, Err(err) => { warn!(target: "beefy", "🥩 Error signing commitment: {:?}", err); @@ -451,7 +451,7 @@ where target: "beefy", "🥩 Produced signature using {:?}, is_valid: {:?}", authority_id, - BeefyKeystore::verify(&authority_id, &signature, &*encoded_commitment) + BeefyKeystore::verify(&authority_id, &signature, &encoded_commitment) ); let message = VoteMessage { commitment, id: authority_id, signature }; diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 99eaa4be1cd0f..3a74fdd9700f2 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -556,7 +556,7 @@ impl std::error::Error for TelemetryParsingError {} impl std::fmt::Display for TelemetryParsingError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match &*self { + match self { TelemetryParsingError::MissingVerbosity => write!(f, "Verbosity level missing"), TelemetryParsingError::VerbosityParsingError(e) => write!(f, "{}", e), } diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index f81c8eb7e8dee..10739f63ef779 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -433,10 +433,10 @@ impl JustificationSyncLink for () { impl> JustificationSyncLink for Arc { fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - L::request_justification(&*self, hash, number); + L::request_justification(self, hash, number); } fn clear_justification_requests(&self) { - L::clear_justification_requests(&*self); + L::clear_justification_requests(self); } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index f1adbd3df1a0f..7dd49f9831f1c 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -2292,7 +2292,7 @@ impl sc_client_api::backend::Backend for Backend { } fn get_import_lock(&self) -> &RwLock<()> { - &*self.import_lock + &self.import_lock } fn requires_full_sync(&self) -> bool { diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 0555a03db149f..378501cffdd62 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -320,7 +320,7 @@ impl> NetworkBridge { voters: Arc>, has_voted: HasVoted, ) -> (impl Stream> + Unpin, OutgoingMessages) { - self.note_round(round, set_id, &*voters); + self.note_round(round, set_id, &voters); let keystore = keystore.and_then(|ks| { let id = ks.local_id(); @@ -637,9 +637,9 @@ fn incoming_global( .filter_map(move |(notification, msg)| { future::ready(match msg { GossipMessage::Commit(msg) => - process_commit(msg, notification, &gossip_engine, &gossip_validator, &*voters), + process_commit(msg, notification, &gossip_engine, &gossip_validator, &voters), GossipMessage::CatchUp(msg) => - process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &*voters), + process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &voters), _ => { debug!(target: "afg", "Skipping unknown message type"); None diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index eefb3d3f0aee4..b5a0d7be70f19 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -284,7 +284,7 @@ where impl<'a, H, N> InnerGuard<'a, H, N> { fn as_mut(&mut self) -> &mut AuthoritySet { - &mut **self.guard.as_mut().expect("only taken on deconstruction; qed") + self.guard.as_mut().expect("only taken on deconstruction; qed") } fn set_old(&mut self, old: AuthoritySet) { diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 85d80dcfe7cde..9bcb03c0555c2 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -289,7 +289,7 @@ where network.note_round( crate::communication::Round(round), crate::communication::SetId(set_id), - &*voters, + &voters, ) } }; diff --git a/client/rpc/src/offchain/mod.rs b/client/rpc/src/offchain/mod.rs index b66b78274a64e..6896b82619166 100644 --- a/client/rpc/src/offchain/mod.rs +++ b/client/rpc/src/offchain/mod.rs @@ -57,7 +57,7 @@ impl OffchainApiServer for Offchain { StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, StorageKind::LOCAL => return Err(JsonRpseeError::from(Error::UnavailableStorageKind)), }; - self.storage.write().set(prefix, &*key, &*value); + self.storage.write().set(prefix, &key, &value); Ok(()) } @@ -69,6 +69,6 @@ impl OffchainApiServer for Offchain { StorageKind::LOCAL => return Err(JsonRpseeError::from(Error::UnavailableStorageKind)), }; - Ok(self.storage.read().get(prefix, &*key).map(Into::into)) + Ok(self.storage.read().get(prefix, &key).map(Into::into)) } } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index c58638c870ab3..5a8a83cdd4851 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -199,7 +199,7 @@ where .call( &BlockId::Hash(block), &method, - &*call_data, + &call_data, self.client.execution_extensions().strategies().other, None, ) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index fb73b4c34c040..d61d7f7fa3781 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1614,11 +1614,11 @@ where RA: Send + Sync, { fn header(&self, id: BlockId) -> sp_blockchain::Result> { - (**self).backend.blockchain().header(id) + self.backend.blockchain().header(id) } fn info(&self) -> blockchain::Info { - (**self).backend.blockchain().info() + self.backend.blockchain().info() } fn status(&self, id: BlockId) -> sp_blockchain::Result { diff --git a/client/tracing/src/logging/stderr_writer.rs b/client/tracing/src/logging/stderr_writer.rs index e62c5e82c1ac7..de78a61af41a2 100644 --- a/client/tracing/src/logging/stderr_writer.rs +++ b/client/tracing/src/logging/stderr_writer.rs @@ -89,7 +89,7 @@ fn flush_logs(mut buffer: parking_lot::lock_api::MutexGuard>(); assert_eq!(expected_ready, ready); - let event = block_event_with_retracted(f1_header, f0, &*pool.api()); + let event = block_event_with_retracted(f1_header, f0, pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 3); diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index d93c646872c6f..5ca74e681e5dd 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1315,7 +1315,7 @@ where fn deposit_event(topics: Vec, event: Event) { >::deposit_event_indexed( - &*topics, + &topics, ::Event::from(event).into(), ) } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index c40fdf94806aa..cd3e1c500db26 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -573,7 +573,7 @@ mod tests { use pallet_balances::Call as BalancesCall; use pallet_transaction_payment::CurrencyAdapter; - const TEST_KEY: &[u8] = &*b":test:key:"; + const TEST_KEY: &[u8] = b":test:key:"; #[frame_support::pallet] mod custom { @@ -806,7 +806,7 @@ mod tests { type TestUncheckedExtrinsic = TestXt; // Will contain `true` when the custom runtime logic was called. - const CUSTOM_ON_RUNTIME_KEY: &[u8] = &*b":custom:on_runtime"; + const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; struct CustomOnRuntimeUpgrade; impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index b3dfbdd90b19d..6a69b54b3cca0 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -67,7 +67,7 @@ impl offence::OnOffenceHandler } pub fn with_on_offence_fractions) -> R>(f: F) -> R { - ON_OFFENCE_PERBILL.with(|fractions| f(&mut *fractions.borrow_mut())) + ON_OFFENCE_PERBILL.with(|fractions| f(&mut fractions.borrow_mut())) } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index 75c85474dcfe7..d8a81d699b8c2 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -188,7 +188,7 @@ impl CallDef { return Err(syn::Error::new(method.sig.span(), msg)) }, Some(syn::FnArg::Typed(arg)) => { - check_dispatchable_first_arg_type(&*arg.ty)?; + check_dispatchable_first_arg_type(&arg.ty)?; }, } diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr index 404c0c3627b7b..1f08ab87c1f79 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -29,6 +29,19 @@ help: consider importing this struct | 1 | use frame_system::GenesisConfig; | +help: if you import `GenesisConfig`, refer to it directly + | +40 - construct_runtime! { +41 - pub enum Runtime where +42 - Block = Block, +43 - NodeBlock = Block, +44 - UncheckedExtrinsic = UncheckedExtrinsic +45 - { +46 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +47 - Pallet: test_pallet::{Pallet, Config}, +48 - } +49 - } + | error[E0283]: type annotations needed --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index 31229f8c93cb6..2af4d3fb15000 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -32,6 +32,19 @@ help: consider importing this enum | 1 | use frame_system::Event; | +help: if you import `Event`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Event}, +57 - } +58 - } + | error[E0412]: cannot find type `Event` in module `pallet` --> tests/construct_runtime_ui/undefined_event_part.rs:49:1 @@ -52,3 +65,16 @@ help: consider importing one of these items | 1 | use frame_system::Event; | +help: if you import `Event`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Event}, +57 - } +58 - } + | diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index fa1cee1ac7e2f..1bc109a45ac57 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -32,6 +32,19 @@ help: consider importing this struct | 1 | use frame_system::GenesisConfig; | +help: if you import `GenesisConfig`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Config}, +57 - } +58 - } + | error[E0283]: type annotations needed --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:49:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index 06e845618d44f..c692cd61bae8b 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -32,6 +32,19 @@ help: consider importing this type alias | 1 | use frame_system::Origin; | +help: if you import `Origin`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Origin}, +57 - } +58 - } + | error[E0412]: cannot find type `Origin` in module `pallet` --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 @@ -52,6 +65,19 @@ help: consider importing one of these items | 1 | use frame_system::Origin; | +help: if you import `Origin`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Origin}, +57 - } +58 - } + | error[E0282]: type annotations needed --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 86c427d8080be..880695d9b77e2 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -1,5 +1,5 @@ error: `integrity_test` can only be passed once as input. - --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 + --> tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { 2 | | pub struct Module for enum Call where origin: T::Origin, system=self { @@ -13,13 +13,7 @@ error: `integrity_test` can only be passed once as input. = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0601]: `main` function not found in crate `$CRATE` - --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 + --> tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs:7:2 | -1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { -3 | | fn integrity_test() {} -4 | | -5 | | fn integrity_test() {} -6 | | } -7 | | } - | |_^ consider adding a `main` function to `$DIR/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs` +7 | } + | ^ consider adding a `main` function to `$DIR/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs` diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index be5d70be17f69..7ce43cd5d44d1 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -797,7 +797,7 @@ mod test_append_and_len { TestExternalities::default().execute_with(|| { let key = JustVec::hashed_key(); // Set it to some invalid value. - frame_support::storage::unhashed::put_raw(&key, &*b"1"); + frame_support::storage::unhashed::put_raw(&key, b"1"); assert!(JustVec::get().is_empty()); assert_eq!(frame_support::storage::unhashed::get_raw(&key), Some(b"1".to_vec())); diff --git a/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr b/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr index 64f844e547be0..1c230db376a49 100644 --- a/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr @@ -1,7 +1,5 @@ error[E0369]: binary operation `==` cannot be applied to type `::C` - --> $DIR/partial_eq.rs:7:2 + --> tests/derive_no_bound_ui/partial_eq.rs:7:2 | 7 | c: T::C, | ^ - | - = note: the trait `std::cmp::PartialEq` is not implemented for `::C` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 3a636d9f659c7..1d581ea7ed572 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -19,8 +19,3 @@ error[E0369]: binary operation `==` cannot be applied to type `&, bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^ - | -help: consider further restricting this bound - | -17 | #[pallet::call + std::cmp::PartialEq] - | +++++++++++++++++++++ diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index f182382d18f11..b1487776eac50 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -19,11 +19,6 @@ error[E0369]: binary operation `==` cannot be applied to type `&, bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^ - | -help: consider further restricting this bound - | -17 | #[pallet::call + std::cmp::PartialEq] - | +++++++++++++++++++++ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:20:36 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index c196b28a31ce6..a0418760ba7e2 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -8,3 +8,7 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` = note: required for the cast to the object type `dyn std::fmt::Debug` +help: consider annotating `Bar` with `#[derive(Debug)]` + | +17 | #[derive(Debug)] + | diff --git a/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr b/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr index 0f2ea7e161c4e..7edb55a62d401 100644 --- a/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr +++ b/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr @@ -4,4 +4,14 @@ error[E0277]: the trait bound `MyError: PalletError` is not satisfied 1 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `PalletError` is not implemented for `MyError` | + = help: the following other types implement trait `PalletError`: + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + and 36 others = note: this error originates in the derive macro `frame_support::PalletError` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index 3db258a819fcb..92623e0329fe3 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -9,11 +9,6 @@ error[E0369]: binary operation `==` cannot be applied to type `& { - | +++++++++++++++++++++ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` --> tests/pallet_ui/event_field_not_member.rs:23:7 diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr index e01f66fa3d19d..80903928585b4 100644 --- a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -9,3 +9,7 @@ note: required by a bound in `GenesisBuild` | | pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { | ^^^^^^^ required by this bound in `GenesisBuild` +help: consider annotating `pallet::GenesisConfig` with `#[derive(Default)]` + | +19 | #[derive(Default)] + | diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 87528751a0a7a..e1c1b32d65c39 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -4,6 +4,11 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeDecode`: + Arc + Box + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -14,6 +19,16 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | + = help: the following other types implement trait `EncodeLike`: + <&&T as EncodeLike> + <&T as EncodeLike> + <&T as EncodeLike> + <&[(K, V)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[T] as EncodeLike>> + and 273 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -24,6 +39,16 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc + Box + Cow<'a, T> + Rc + Vec + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + and 2 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` @@ -35,6 +60,16 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` | + = help: the following other types implement trait `TypeInfo`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and 158 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -44,6 +79,11 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeDecode`: + Arc + Box + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -54,6 +94,16 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | + = help: the following other types implement trait `EncodeLike`: + <&&T as EncodeLike> + <&T as EncodeLike> + <&T as EncodeLike> + <&[(K, V)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[T] as EncodeLike>> + and 273 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -64,6 +114,16 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc + Box + Cow<'a, T> + Rc + Vec + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + and 2 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 6c3f6dc662fbc..7d8c448f00193 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -4,6 +4,11 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeDecode`: + Arc + Box + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -14,6 +19,16 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | + = help: the following other types implement trait `EncodeLike`: + <&&T as EncodeLike> + <&T as EncodeLike> + <&T as EncodeLike> + <&[(K, V)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[T] as EncodeLike>> + and 273 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -24,6 +39,16 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 10 | #[pallet::without_storage_info] | ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc + Box + Cow<'a, T> + Rc + Vec + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + and 2 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` @@ -35,6 +60,16 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` | + = help: the following other types implement trait `TypeInfo`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and 158 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -44,6 +79,11 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeDecode`: + Arc + Box + Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -54,6 +94,16 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | + = help: the following other types implement trait `EncodeLike`: + <&&T as EncodeLike> + <&T as EncodeLike> + <&T as EncodeLike> + <&[(K, V)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[(T,)] as EncodeLike>> + <&[T] as EncodeLike>> + and 273 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -64,6 +114,16 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 21 | #[pallet::storage] | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | + = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc + Box + Cow<'a, T> + Rc + Vec + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + and 2 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 68856f122c7ac..d87e6900197f5 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -4,4 +4,14 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied 9 | #[pallet::pallet] | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | + = help: the following other types implement trait `MaxEncodedLen`: + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + and 70 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 226cb40f1d48b..bc8e99cd65006 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -4,5 +4,15 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied 12 | #[pallet::pallet] | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | + = help: the following other types implement trait `MaxEncodedLen`: + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + and 70 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 0ac3cfbe1244e..02ef37370ffeb 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -613,7 +613,7 @@ fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result let mut visitor = ApiRuntimeImplToApiRuntimeApiImpl { runtime_block, runtime_mod_path: &runtime_mod_path, - runtime_type: &*runtime_type, + runtime_type, trait_generic_arguments: &trait_generic_arguments, impl_trait: &impl_trait.ident, }; diff --git a/primitives/runtime/src/bounded/bounded_vec.rs b/primitives/runtime/src/bounded/bounded_vec.rs index c9c9f851d3249..10d9fc608c273 100644 --- a/primitives/runtime/src/bounded/bounded_vec.rs +++ b/primitives/runtime/src/bounded/bounded_vec.rs @@ -104,10 +104,46 @@ where /// A bounded slice. /// /// Similar to a `BoundedVec`, but not owned and cannot be decoded. -#[derive(Encode, scale_info::TypeInfo)] -#[scale_info(skip_type_params(S))] +#[derive(Encode)] pub struct BoundedSlice<'a, T, S>(pub(super) &'a [T], PhantomData); +// This can be replaced with +// #[derive(scale_info::TypeInfo)] +// #[scale_info(skip_type_params(S))] +// again once this issue is fixed in the rust compiler: https://github.com/rust-lang/rust/issues/96956 +// Tracking issues: https://github.com/paritytech/substrate/issues/11915 +impl<'a, T, S> scale_info::TypeInfo for BoundedSlice<'a, T, S> +where + &'a [T]: scale_info::TypeInfo + 'static, + PhantomData: scale_info::TypeInfo + 'static, + T: scale_info::TypeInfo + 'static, + S: 'static, +{ + type Identity = Self; + + fn type_info() -> ::scale_info::Type { + scale_info::Type::builder() + .path(scale_info::Path::new("BoundedSlice", "sp_runtime::bounded::bounded_vec")) + .type_params(<[_]>::into_vec(Box::new([ + scale_info::TypeParameter::new( + "T", + core::option::Option::Some(::scale_info::meta_type::()), + ), + scale_info::TypeParameter::new("S", ::core::option::Option::None), + ]))) + .docs(&[ + "A bounded slice.", + "", + "Similar to a `BoundedVec`, but not owned and cannot be decoded.", + ]) + .composite( + scale_info::build::Fields::unnamed() + .field(|f| f.ty::<&'static [T]>().type_name("&'static[T]").docs(&[])) + .field(|f| f.ty::>().type_name("PhantomData").docs(&[])), + ) + } +} + // `BoundedSlice`s encode to something which will always decode into a `BoundedVec`, // `WeakBoundedVec`, or a `Vec`. impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index c6bfa66017870..99733dbbe9a55 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -24,7 +24,7 @@ use crate::{ use core::ops::Sub; /// Piecewise Linear function in [0, 1] -> [0, 1]. -#[derive(PartialEq, Eq, sp_core::RuntimeDebug, scale_info::TypeInfo)] +#[derive(PartialEq, Eq, sp_core::RuntimeDebug)] pub struct PiecewiseLinear<'a> { /// Array of points. Must be in order from the lowest abscissas to the highest. pub points: &'a [(Perbill, Perbill)], @@ -32,6 +32,36 @@ pub struct PiecewiseLinear<'a> { pub maximum: Perbill, } +// This can be replaced with +// #[derive(scale_info::TypeInfo)] +// #[scale_info(skip_type_params(S))] +// again once this issue is fixed in the rust compiler: https://github.com/rust-lang/rust/issues/96956 +// Tracking issues: https://github.com/paritytech/substrate/issues/11915 +impl scale_info::TypeInfo for PiecewiseLinear<'static> { + type Identity = Self; + fn type_info() -> ::scale_info::Type { + scale_info::Type::builder() + .path(scale_info::Path::new("PiecewiseLinear", "sp_runtime::curve")) + .type_params(crate::Vec::new()) + .docs(&["Piecewise Linear function in [0, 1] -> [0, 1]."]) + .composite( + scale_info::build::Fields::named() + .field(|f| { + f.ty::<&'static[(Perbill, Perbill)]>() + .name("points") + .type_name("&'static[(Perbill, Perbill)]") + .docs(&["Array of points. Must be in order from the lowest abscissas to the highest."]) + }) + .field(|f| { + f.ty::() + .name("maximum") + .type_name("Perbill") + .docs(&["The maximum value that can be returned."]) + }), + ) + } +} + fn abs_sub + Clone>(a: N, b: N) -> N where { a.clone().max(b.clone()) - a.min(b) } diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index fba3117c41617..a82ae1d62f56a 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -55,7 +55,7 @@ pub trait Lazy { impl<'a> Lazy<[u8]> for &'a [u8] { fn get(&mut self) -> &[u8] { - &**self + self } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 1dca617acf912..7a17d44aa5b69 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -280,7 +280,7 @@ where L: TrieConfiguration, DB: hash_db::HashDBRef, { - TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) + TrieDB::::new(db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) } /// Read a value from the trie with given Query. @@ -295,7 +295,7 @@ where Q: Query, DB: hash_db::HashDBRef, { - TrieDB::::new(&*db, root)? + TrieDB::::new(db, root)? .get_with(key, query) .map(|x| x.map(|val| val.to_vec())) } @@ -354,7 +354,7 @@ pub fn record_all_keys( where DB: hash_db::HashDBRef, { - let trie = TrieDB::::new(&*db, root)?; + let trie = TrieDB::::new(db, root)?; let iter = trie.iter()?; for x in iter { @@ -379,7 +379,7 @@ pub fn read_child_trie_value( where DB: hash_db::HashDBRef, { - let db = KeySpacedDB::new(&*db, keyspace); + let db = KeySpacedDB::new(db, keyspace); TrieDB::::new(&db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) } @@ -400,7 +400,7 @@ where // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_slice); - let db = KeySpacedDB::new(&*db, keyspace); + let db = KeySpacedDB::new(db, keyspace); TrieDB::::new(&db, &root)? .get_with(key, query) .map(|x| x.map(|val| val.to_vec())) @@ -501,7 +501,7 @@ where T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { fn as_hash_db(&self) -> &dyn hash_db::HashDB { - &*self + self } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { diff --git a/primitives/version/proc-macro/src/decl_runtime_version.rs b/primitives/version/proc-macro/src/decl_runtime_version.rs index ee81940b53993..9a25adfa5fca2 100644 --- a/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -37,7 +37,7 @@ pub fn decl_runtime_version_impl(input: proc_macro::TokenStream) -> proc_macro:: } fn decl_runtime_version_impl_inner(item: ItemConst) -> Result { - let runtime_version = ParseRuntimeVersion::parse_expr(&*item.expr)?.build(item.expr.span())?; + let runtime_version = ParseRuntimeVersion::parse_expr(&item.expr)?.build(item.expr.span())?; let link_section = generate_emit_link_section_decl(&runtime_version.encode(), "runtime_version"); diff --git a/rustfmt.toml b/rustfmt.toml index 441913f619cdc..f6fbe80064fce 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -21,3 +21,4 @@ match_block_trailing_comma = true trailing_comma = "Vertical" trailing_semicolon = false use_field_init_shorthand = true +edition = "2021" diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 1d9dc5dcd11da..202560f18cf84 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -130,7 +130,7 @@ pub enum Transport { impl Transport { fn as_client(&self) -> Option<&WsClient> { match self { - Self::RemoteClient(client) => Some(&*client), + Self::RemoteClient(client) => Some(client), _ => None, } } From fdfdc73f9e64dc47934b72eb9af3e1989e4ba699 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 26 Jul 2022 13:44:48 +0100 Subject: [PATCH 072/135] Properly defer slashes (#11823) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * initial draft of fixing slashing * fix test * Update frame/staking/src/tests.rs Co-authored-by: Piotr Mikołajczyk * last touches * add more detail about unbonding * add migration * fmt Co-authored-by: Piotr Mikołajczyk Co-authored-by: parity-processbot <> --- frame/staking/src/lib.rs | 11 +-- frame/staking/src/migrations.rs | 23 ++++++ frame/staking/src/mock.rs | 15 ++++ frame/staking/src/pallet/impls.rs | 47 ++++++------ frame/staking/src/pallet/mod.rs | 4 - frame/staking/src/tests.rs | 118 +++++++++++++++++++++++++++--- 6 files changed, 177 insertions(+), 41 deletions(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 50c033bdc7e28..3fff6312f333f 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -872,11 +872,12 @@ enum Releases { V2_0_0, V3_0_0, V4_0_0, - V5_0_0, // blockable validators. - V6_0_0, // removal of all storage associated with offchain phragmen. - V7_0_0, // keep track of number of nominators / validators in map - V8_0_0, // populate `VoterList`. - V9_0_0, // inject validators into `VoterList` as well. + V5_0_0, // blockable validators. + V6_0_0, // removal of all storage associated with offchain phragmen. + V7_0_0, // keep track of number of nominators / validators in map + V8_0_0, // populate `VoterList`. + V9_0_0, // inject validators into `VoterList` as well. + V10_0_0, // remove `EarliestUnappliedSlash`. } impl Default for Releases { diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 14846da8a5d54..101cac0a31348 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -20,6 +20,29 @@ use super::*; use frame_election_provider_support::SortedListProvider; use frame_support::traits::OnRuntimeUpgrade; +pub mod v10 { + use super::*; + use frame_support::storage_alias; + + #[storage_alias] + type EarliestUnappliedSlash = StorageValue, EraIndex>; + + pub struct MigrateToV10(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV10 { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + if StorageVersion::::get() == Releases::V9_0_0 { + EarliestUnappliedSlash::::kill(); + StorageVersion::::put(Releases::V10_0_0); + + T::DbWeight::get().reads_writes(1, 1) + } else { + log!(warn, "MigrateToV10 should be removed."); + T::DbWeight::get().reads(1) + } + } + } +} + pub mod v9 { use super::*; diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index bd2d8cdc32ce9..70d00c2b648d8 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -549,6 +549,7 @@ impl ExtBuilder { ext } pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + sp_tracing::try_init_simple(); let mut ext = self.build(); ext.execute_with(test); ext.execute_with(post_conditions); @@ -884,6 +885,20 @@ pub(crate) fn staking_events() -> Vec> { .collect() } +parameter_types! { + static StakingEventsIndex: usize = 0; +} + +pub(crate) fn staking_events_since_last_call() -> Vec> { + let all: Vec<_> = System::events() + .into_iter() + .filter_map(|r| if let Event::Staking(inner) = r.event { Some(inner) } else { None }) + .collect(); + let seen = StakingEventsIndex::get(); + StakingEventsIndex::set(all.len()); + all.into_iter().skip(seen).collect() +} + pub(crate) fn balances(who: &AccountId) -> (Balance, Balance) { (Balances::free_balance(who), Balances::reserved_balance(who)) } diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 7656eec80a5ff..68aa97db8a324 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -32,7 +32,7 @@ use frame_support::{ use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::historical; use sp_runtime::{ - traits::{Bounded, Convert, SaturatedConversion, Saturating, StaticLookup, Zero}, + traits::{Bounded, Convert, One, SaturatedConversion, Saturating, StaticLookup, Zero}, Perbill, }; use sp_staking::{ @@ -599,20 +599,17 @@ impl Pallet { /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. fn apply_unapplied_slashes(active_era: EraIndex) { - let slash_defer_duration = T::SlashDeferDuration::get(); - ::EarliestUnappliedSlash::mutate(|earliest| { - if let Some(ref mut earliest) = earliest { - let keep_from = active_era.saturating_sub(slash_defer_duration); - for era in (*earliest)..keep_from { - let era_slashes = ::UnappliedSlashes::take(&era); - for slash in era_slashes { - slashing::apply_slash::(slash, era); - } - } - - *earliest = (*earliest).max(keep_from) - } - }) + let era_slashes = ::UnappliedSlashes::take(&active_era); + log!( + debug, + "found {} slashes scheduled to be executed in era {:?}", + era_slashes.len(), + active_era, + ); + for slash in era_slashes { + let slash_era = active_era.saturating_sub(T::SlashDeferDuration::get()); + slashing::apply_slash::(slash, slash_era); + } } /// Add reward points to validators using their stash account ID. @@ -1209,11 +1206,6 @@ where } }; - ::EarliestUnappliedSlash::mutate(|earliest| { - if earliest.is_none() { - *earliest = Some(active_era) - } - }); add_db_reads_writes(1, 1); let slash_defer_duration = T::SlashDeferDuration::get(); @@ -1263,9 +1255,18 @@ where } } else { // Defer to end of some `slash_defer_duration` from now. - ::UnappliedSlashes::mutate(active_era, move |for_later| { - for_later.push(unapplied) - }); + log!( + debug, + "deferring slash of {:?}% happened in {:?} (reported in {:?}) to {:?}", + slash_fraction, + slash_era, + active_era, + slash_era + slash_defer_duration + 1, + ); + ::UnappliedSlashes::mutate( + slash_era.saturating_add(slash_defer_duration).saturating_add(One::one()), + move |for_later| for_later.push(unapplied), + ); add_db_reads_writes(1, 1); } } else { diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index e53464195de23..c999020c28167 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -477,10 +477,6 @@ pub mod pallet { ValueQuery, >; - /// The earliest era for which we have a pending, unapplied slash. - #[pallet::storage] - pub(crate) type EarliestUnappliedSlash = StorageValue<_, EraIndex>; - /// The last planned session scheduled by the session pallet. /// /// This is basically in sync with the call to [`pallet_session::SessionManager::new_session`]. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index b76126f0c5d04..d14d8c4a75f2e 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -2777,12 +2777,103 @@ fn deferred_slashes_are_deferred() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); + System::reset_events(); + // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. mock::start_active_era(4); assert_eq!(Balances::free_balance(11), 900); assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid(3, 11075, 33225), + Event::Slashed(11, 100), + Event::Slashed(101, 12) + ] + ); + }) +} + +#[test] +fn retroactive_deferred_slashes_two_eras_before() { + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + assert_eq!(BondingDuration::get(), 3); + + mock::start_active_era(1); + let exposure_11_at_era1 = Staking::eras_stakers(active_era(), 11); + + mock::start_active_era(3); + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], + &[Perbill::from_percent(10)], + 1, // should be deferred for two full eras, and applied at the beginning of era 4. + DisableStrategy::Never, + ); + System::reset_events(); + + mock::start_active_era(4); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid(3, 7100, 21300), + Event::Slashed(11, 100), + Event::Slashed(101, 12) + ] + ); + }) +} + +#[test] +fn retroactive_deferred_slashes_one_before() { + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + assert_eq!(BondingDuration::get(), 3); + + mock::start_active_era(1); + let exposure_11_at_era1 = Staking::eras_stakers(active_era(), 11); + + // unbond at slash era. + mock::start_active_era(2); + assert_ok!(Staking::chill(Origin::signed(10))); + assert_ok!(Staking::unbond(Origin::signed(10), 100)); + + mock::start_active_era(3); + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], + &[Perbill::from_percent(10)], + 2, // should be deferred for two full eras, and applied at the beginning of era 5. + DisableStrategy::Never, + ); + System::reset_events(); + + mock::start_active_era(4); + assert_eq!( + staking_events_since_last_call(), + vec![Event::StakersElected, Event::EraPaid(3, 11075, 33225)] + ); + + assert_eq!(Staking::ledger(10).unwrap().total, 1000); + // slash happens after the next line. + mock::start_active_era(5); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid(4, 11075, 33225), + Event::Slashed(11, 100), + Event::Slashed(101, 12) + ] + ); + + // their ledger has already been slashed. + assert_eq!(Staking::ledger(10).unwrap().total, 900); + assert_ok!(Staking::unbond(Origin::signed(10), 1000)); + assert_eq!(Staking::ledger(10).unwrap().total, 900); }) } @@ -2871,6 +2962,7 @@ fn remove_deferred() { assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + // deferred to start of era 4. on_offence_now( &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(10)], @@ -2881,6 +2973,7 @@ fn remove_deferred() { mock::start_active_era(2); + // reported later, but deferred to start of era 4 as well. on_offence_in_era( &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(15)], @@ -2894,7 +2987,8 @@ fn remove_deferred() { Error::::EmptyTargets ); - assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 1, vec![0])); + // cancel one of them. + assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 4, vec![0])); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); @@ -2906,13 +3000,19 @@ fn remove_deferred() { // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. + System::reset_events(); mock::start_active_era(4); - // the first slash for 10% was cancelled, so no effect. - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - - mock::start_active_era(5); + // the first slash for 10% was cancelled, but the 15% one + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid(3, 11075, 33225), + Event::Slashed(11, 50), + Event::Slashed(101, 7) + ] + ); let slash_10 = Perbill::from_percent(10); let slash_15 = Perbill::from_percent(15); @@ -2965,7 +3065,7 @@ fn remove_multi_deferred() { &[Perbill::from_percent(25)], ); - assert_eq!(::UnappliedSlashes::get(&1).len(), 5); + assert_eq!(::UnappliedSlashes::get(&4).len(), 5); // fails if list is not sorted assert_noop!( @@ -2983,9 +3083,9 @@ fn remove_multi_deferred() { Error::::InvalidSlashIndex ); - assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 1, vec![0, 2, 4])); + assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 4, vec![0, 2, 4])); - let slashes = ::UnappliedSlashes::get(&1); + let slashes = ::UnappliedSlashes::get(&4); assert_eq!(slashes.len(), 2); assert_eq!(slashes[0].validator, 21); assert_eq!(slashes[1].validator, 42); From 7db3c4fc5221d1f3fde36f1a5ef3042725a0f616 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 26 Jul 2022 18:03:50 +0200 Subject: [PATCH 073/135] [ci] Return production image (#11920) --- .gitlab-ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d1de666009bf1..9608d88e9554d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -47,8 +47,7 @@ variables: &default-vars CARGO_INCREMENTAL: 0 DOCKER_OS: "debian:stretch" ARCH: "x86_64" - # change to production when this image is published - CI_IMAGE: "paritytech/ci-linux:staging@sha256:2c90b67f1452ed2d7236c2cd13f6224053a833d521b3630650b679f00874e0a9" + CI_IMAGE: "paritytech/ci-linux:production" RUSTY_CACHIER_SINGLE_BRANCH: master RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true" From 9acfbc0e88f5a6df873d222a3e01c2b59868b2e3 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Tue, 26 Jul 2022 18:49:11 +0100 Subject: [PATCH 074/135] Make New Storage Layer Truly Default (#11918) * with storage layer truly default * fmt Co-authored-by: parity-processbot <> --- frame/nomination-pools/src/tests.rs | 4 ++-- .../src/construct_runtime/expand/call.rs | 13 ------------- .../procedural/src/pallet/expand/call.rs | 4 ++-- frame/support/src/dispatch.rs | 7 +++---- frame/support/src/traits.rs | 5 ++--- frame/support/src/traits/dispatch.rs | 17 ----------------- 6 files changed, 9 insertions(+), 41 deletions(-) diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index b59f18cca72a2..5aa8e97266e0d 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -2735,7 +2735,7 @@ mod unbond { // when: unbonding more than our active: error assert_noop!( - frame_support::storage::in_storage_layer(|| Pools::unbond( + frame_support::storage::with_storage_layer(|| Pools::unbond( Origin::signed(10), 10, 5 @@ -2787,7 +2787,7 @@ mod unbond { // when CurrentEra::set(2); assert_noop!( - frame_support::storage::in_storage_layer(|| Pools::unbond( + frame_support::storage::with_storage_layer(|| Pools::unbond( Origin::signed(20), 20, 4 diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs index c8c5d5ff0ee43..801b69035121d 100644 --- a/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -163,19 +163,6 @@ pub fn expand_outer_dispatch( } } } - impl #scrate::traits::DispatchableWithStorageLayer for Call { - type Origin = Origin; - fn dispatch_with_storage_layer(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { - #scrate::storage::with_storage_layer(|| { - #scrate::dispatch::Dispatchable::dispatch(self, origin) - }) - } - fn dispatch_bypass_filter_with_storage_layer(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { - #scrate::storage::with_storage_layer(|| { - #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(self, origin) - }) - } - } #( impl #scrate::traits::IsSubType<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index a5038ec399e2b..fe7589a8275d2 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -267,9 +267,9 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::sp_tracing::enter_span!( #frame_support::sp_tracing::trace_span!(stringify!(#fn_name)) ); - // We execute all dispatchable in at least one storage layer, allowing them + // We execute all dispatchable in a new storage layer, allowing them // to return an error at any point, and undoing any storage changes. - #frame_support::storage::in_storage_layer(|| { + #frame_support::storage::with_storage_layer(|| { <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) .map(Into::into).map_err(Into::into) }) diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index c175998956ff2..ae4230efc63f8 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -29,8 +29,7 @@ pub use crate::{ result, }, traits::{ - CallMetadata, DispatchableWithStorageLayer, GetCallMetadata, GetCallName, - GetStorageVersion, UnfilteredDispatchable, + CallMetadata, GetCallMetadata, GetCallName, GetStorageVersion, UnfilteredDispatchable, }, weights::{ ClassifyDispatch, DispatchInfo, GetDispatchInfo, PaysFee, PostDispatchInfo, @@ -1473,9 +1472,9 @@ macro_rules! decl_module { $ignore:ident $mod_type:ident<$trait_instance:ident $(, $instance:ident)?> $fn_name:ident $origin:ident $system:ident [ $( $param_name:ident),* ] ) => { - // We execute all dispatchable in at least one storage layer, allowing them + // We execute all dispatchable in a new storage layer, allowing them // to return an error at any point, and undoing any storage changes. - $crate::storage::in_storage_layer(|| { + $crate::storage::with_storage_layer(|| { <$mod_type<$trait_instance $(, $instance)?>>::$fn_name( $origin $(, $param_name )* ).map(Into::into).map_err(Into::into) }) }; diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index db8c8da4f869b..72d6d6682f14a 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -96,9 +96,8 @@ mod dispatch; #[allow(deprecated)] pub use dispatch::EnsureOneOf; pub use dispatch::{ - AsEnsureOriginWithArg, DispatchableWithStorageLayer, EitherOf, EitherOfDiverse, EnsureOrigin, - EnsureOriginWithArg, MapSuccess, NeverEnsureOrigin, OriginTrait, TryMapSuccess, - UnfilteredDispatchable, + AsEnsureOriginWithArg, EitherOf, EitherOfDiverse, EnsureOrigin, EnsureOriginWithArg, + MapSuccess, NeverEnsureOrigin, OriginTrait, TryMapSuccess, UnfilteredDispatchable, }; mod voting; diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs index 7e97be12e449e..afc819aa454e5 100644 --- a/frame/support/src/traits/dispatch.rs +++ b/frame/support/src/traits/dispatch.rs @@ -236,23 +236,6 @@ pub trait UnfilteredDispatchable { fn dispatch_bypass_filter(self, origin: Self::Origin) -> DispatchResultWithPostInfo; } -/// Type that can be dispatched with an additional storage layer which is used to execute the call. -pub trait DispatchableWithStorageLayer { - /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). - type Origin; - - /// Same as `dispatch` from the [`frame_support::dispatch::Dispatchable`] trait, but - /// specifically spawns a new storage layer to execute the call inside of. - fn dispatch_with_storage_layer(self, origin: Self::Origin) -> DispatchResultWithPostInfo; - - /// Same as `dispatch_bypass_filter` from the [`UnfilteredDispatchable`] trait, but specifically - /// spawns a new storage layer to execute the call inside of. - fn dispatch_bypass_filter_with_storage_layer( - self, - origin: Self::Origin, - ) -> DispatchResultWithPostInfo; -} - /// Methods available on `frame_system::Config::Origin`. pub trait OriginTrait: Sized { /// Runtime call type, as in `frame_system::Config::Call` From dddfed3d9260cf03244f15ba3db4edf9af7467e9 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 28 Jul 2022 00:35:33 +0200 Subject: [PATCH 075/135] Remove `retain_mut` crate (#11926) * Remove retain_mut crate * Remove reain_mut crate from babe-consensus --- Cargo.lock | 8 ----- client/consensus/babe/Cargo.toml | 1 - client/consensus/babe/src/lib.rs | 22 +++++++------- client/transaction-pool/Cargo.toml | 1 - .../src/graph/validated_pool.rs | 30 +++++++++---------- 5 files changed, 24 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 50603c6235a23..3b0246168632c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7446,12 +7446,6 @@ dependencies = [ "quick-error 1.2.3", ] -[[package]] -name = "retain_mut" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "448296241d034b96c11173591deaa1302f2c17b56092106c1f92c1bc0183a8c9" - [[package]] name = "rfc6979" version = "0.1.0" @@ -7965,7 +7959,6 @@ dependencies = [ "parking_lot 0.12.0", "rand 0.7.3", "rand_chacha 0.2.2", - "retain_mut", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -8877,7 +8870,6 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "parking_lot 0.12.0", - "retain_mut", "sc-block-builder", "sc-client-api", "sc-transaction-pool-api", diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 765fc367f424f..59d7854619fc0 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -26,7 +26,6 @@ num-rational = "0.2.2" num-traits = "0.2.8" parking_lot = "0.12.0" rand = "0.7.2" -retain_mut = "0.1.4" schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 8478122375319..f61ba23d920f3 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -87,7 +87,6 @@ use futures::{ use log::{debug, info, log, trace, warn}; use parking_lot::Mutex; use prometheus_endpoint::Registry; -use retain_mut::RetainMut; use schnorrkel::SignatureError; use sc_client_api::{ @@ -835,17 +834,16 @@ where slot: Slot, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) { - RetainMut::retain_mut(&mut *self.slot_notification_sinks.lock(), |sink| { - match sink.try_send((slot, epoch_descriptor.clone())) { - Ok(()) => true, - Err(e) => - if e.is_full() { - warn!(target: "babe", "Trying to notify a slot but the channel is full"); - true - } else { - false - }, - } + let sinks = &mut self.slot_notification_sinks.lock(); + sinks.retain_mut(|sink| match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: "babe", "Trying to notify a slot but the channel is full"); + true + } else { + false + }, }); } diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index cabde4b6307ae..3fdcde48d9e22 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -20,7 +20,6 @@ linked-hash-map = "0.5.4" log = "0.4.17" parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } parking_lot = "0.12.0" -retain_mut = "0.1.4" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0.30" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/transaction-pool/src/graph/validated_pool.rs b/client/transaction-pool/src/graph/validated_pool.rs index e59c5afbdc512..dcb8195073733 100644 --- a/client/transaction-pool/src/graph/validated_pool.rs +++ b/client/transaction-pool/src/graph/validated_pool.rs @@ -24,7 +24,6 @@ use std::{ use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; -use retain_mut::RetainMut; use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; use serde::Serialize; use sp_runtime::{ @@ -204,21 +203,20 @@ impl ValidatedPool { let imported = self.pool.write().import(tx)?; if let base::Imported::Ready { ref hash, .. } = imported { - RetainMut::retain_mut(&mut *self.import_notification_sinks.lock(), |sink| { - match sink.try_send(*hash) { - Ok(()) => true, - Err(e) => - if e.is_full() { - log::warn!( - target: "txpool", - "[{:?}] Trying to notify an import but the channel is full", - hash, - ); - true - } else { - false - }, - } + let sinks = &mut self.import_notification_sinks.lock(); + sinks.retain_mut(|sink| match sink.try_send(*hash) { + Ok(()) => true, + Err(e) => + if e.is_full() { + log::warn!( + target: "txpool", + "[{:?}] Trying to notify an import but the channel is full", + hash, + ); + true + } else { + false + }, }); } From 782e08ed36e3c3e5ffa3a48484267b02ec8b7b6d Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 28 Jul 2022 11:44:02 +0100 Subject: [PATCH 076/135] Fix slashing migration to v10 (#11924) * Fix slashing migration to v10 * add some logs * Fix default version * fmt * Move doc to struct Co-authored-by: Wilfried Kopp --- frame/staking/src/lib.rs | 2 +- frame/staking/src/migrations.rs | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 3fff6312f333f..ab0ab685e6911 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -882,7 +882,7 @@ enum Releases { impl Default for Releases { fn default() -> Self { - Releases::V8_0_0 + Releases::V10_0_0 } } diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 101cac0a31348..7e3bf6ccb93e1 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -27,13 +27,29 @@ pub mod v10 { #[storage_alias] type EarliestUnappliedSlash = StorageValue, EraIndex>; + /// Apply any pending slashes that where queued. + /// + /// That means we might slash someone a bit too early, but we will definitely + /// won't forget to slash them. The cap of 512 is somewhat randomly taken to + /// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`. pub struct MigrateToV10(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV10 { fn on_runtime_upgrade() -> frame_support::weights::Weight { if StorageVersion::::get() == Releases::V9_0_0 { + let pending_slashes = as Store>::UnappliedSlashes::iter().take(512); + for (era, slashes) in pending_slashes { + for slash in slashes { + // in the old slashing scheme, the slash era was the key at which we read + // from `UnappliedSlashes`. + log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era); + slashing::apply_slash::(slash, era); + } + } + EarliestUnappliedSlash::::kill(); StorageVersion::::put(Releases::V10_0_0); + log!(info, "MigrateToV10 executed successfully"); T::DbWeight::get().reads_writes(1, 1) } else { log!(warn, "MigrateToV10 should be removed."); From b8591d63c998a8a80376018c426a5b1fd75714db Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Thu, 28 Jul 2022 23:31:47 +0800 Subject: [PATCH 077/135] Enrich the sync log on handling the block request (#11747) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add more info to the log to help debug the issue like https://github.com/paritytech/substrate/issues/11732. Co-authored-by: Bastian Köcher --- .../network/sync/src/block_request_handler.rs | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 2a847a8bf36ec..30cbb32289d66 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -226,16 +226,13 @@ where debug!( target: LOG_TARGET, - "Handling block request from {}: Starting at `{:?}` with maximum blocks \ - of `{}`, direction `{:?}` and attributes `{:?}`.", - peer, - from_block_id, - max_blocks, - direction, - attributes, + "Handling block request from {peer}: Starting at `{from_block_id:?}` with \ + maximum blocks of `{max_blocks}`, reputation_change: `{reputation_change:?}`, \ + small_request `{small_request:?}`, direction `{direction:?}` and \ + attributes `{attributes:?}`.", ); - let result = if reputation_change.is_none() || small_request { + let maybe_block_response = if reputation_change.is_none() || small_request { let block_response = self.get_block_response( attributes, from_block_id, @@ -259,9 +256,22 @@ where } } + Some(block_response) + } else { + None + }; + + debug!( + target: LOG_TARGET, + "Sending result of block request from {peer} starting at `{from_block_id:?}`: \ + blocks: {:?}, data: {:?}", + maybe_block_response.as_ref().map(|res| res.blocks.len()), + maybe_block_response.as_ref().map(|res| res.encoded_len()), + ); + + let result = if let Some(block_response) = maybe_block_response { let mut data = Vec::with_capacity(block_response.encoded_len()); block_response.encode(&mut data)?; - Ok(data) } else { Err(()) From 3ca8a42954e284bfb8771ed93d37cced3a7809b9 Mon Sep 17 00:00:00 2001 From: Koute Date: Fri, 29 Jul 2022 16:46:15 +0900 Subject: [PATCH 078/135] Prevent double allocation of the payload when calling `sp_io::storage::get` (#11523) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Expose allocation stats in `FreeingBumpHeapAllocator` * Return allocation stats when calling into the runtime * Bump `parity-scale-codec` to 3.1.3 (fork) * Prevent double allocation of the payload when calling `sp_io::storage::get` * Fix tests * Remove unnecessary `mut` * Enable the `bytes` feature for `parity-scale-codec` in `sp-runtime-interface` * Update client/allocator/src/freeing_bump.rs Co-authored-by: Bastian Köcher * Bump `parity-scale-codec` to 3.1.3 * Fix some of the UI tests Co-authored-by: Bastian Köcher --- Cargo.lock | 4 + client/allocator/src/freeing_bump.rs | 97 +++++++++++-------- client/allocator/src/lib.rs | 2 +- client/executor/common/src/wasm_runtime.rs | 17 +++- client/executor/src/native_executor.rs | 49 +++++++++- client/executor/wasmi/src/lib.rs | 26 ++++- client/executor/wasmtime/src/host.rs | 6 +- client/executor/wasmtime/src/runtime.rs | 34 +++++-- frame/support/src/storage/unhashed.rs | 2 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 12 +-- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 12 +-- primitives/io/Cargo.toml | 3 +- primitives/io/src/lib.rs | 10 +- primitives/runtime-interface/Cargo.toml | 3 +- primitives/runtime-interface/src/pass_by.rs | 6 +- .../runtime-interface/test-wasm/Cargo.toml | 1 + .../runtime-interface/test-wasm/src/lib.rs | 20 ++++ primitives/runtime-interface/test/src/lib.rs | 72 +++++++++++--- 18 files changed, 284 insertions(+), 92 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3b0246168632c..926d6d4cdf19a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6516,6 +6516,7 @@ dependencies = [ "arrayvec 0.7.2", "bitvec", "byte-slice-cast", + "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", "serde", @@ -9731,6 +9732,7 @@ dependencies = [ name = "sp-io" version = "6.0.0" dependencies = [ + "bytes", "futures", "hash-db", "libsecp256k1", @@ -9889,6 +9891,7 @@ dependencies = [ name = "sp-runtime-interface" version = "6.0.0" dependencies = [ + "bytes", "impl-trait-for-tuples", "parity-scale-codec", "primitive-types", @@ -9938,6 +9941,7 @@ dependencies = [ name = "sp-runtime-interface-test-wasm" version = "2.0.0" dependencies = [ + "bytes", "sp-core", "sp-io", "sp-runtime-interface", diff --git a/client/allocator/src/freeing_bump.rs b/client/allocator/src/freeing_bump.rs index 79d6fca6f91b6..13dc6dca0dcd6 100644 --- a/client/allocator/src/freeing_bump.rs +++ b/client/allocator/src/freeing_bump.rs @@ -314,27 +314,50 @@ impl IndexMut for FreeLists { } } +/// Memory allocation stats gathered during the lifetime of the allocator. +#[derive(Clone, Debug, Default)] +#[non_exhaustive] +pub struct AllocationStats { + /// The current number of bytes allocated. + /// + /// This represents how many bytes are allocated *right now*. + pub bytes_allocated: u32, + + /// The peak number of bytes ever allocated. + /// + /// This is the maximum the `bytes_allocated` ever reached. + pub bytes_allocated_peak: u32, + + /// The sum of every allocation ever made. + /// + /// This increases every time a new allocation is made. + pub bytes_allocated_sum: u32, + + /// The amount of address space (in bytes) used by the allocator. + /// + /// This is calculated as the difference between the allocator's bumper + /// and the heap base. + /// + /// Currently the bumper's only ever incremented, so this is simultaneously + /// the current value as well as the peak value. + pub address_space_used: u32, +} + /// An implementation of freeing bump allocator. /// /// Refer to the module-level documentation for further details. pub struct FreeingBumpHeapAllocator { + original_heap_base: u32, bumper: u32, free_lists: FreeLists, - total_size: u32, poisoned: bool, - max_total_size: u32, - max_bumper: u32, last_observed_memory_size: u32, + stats: AllocationStats, } impl Drop for FreeingBumpHeapAllocator { fn drop(&mut self) { - log::debug!( - target: LOG_TARGET, - "allocator being destroyed, max_total_size {}, max_bumper {}", - self.max_total_size, - self.max_bumper, - ) + log::debug!(target: LOG_TARGET, "allocator dropped: {:?}", self.stats) } } @@ -348,13 +371,12 @@ impl FreeingBumpHeapAllocator { let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; FreeingBumpHeapAllocator { + original_heap_base: aligned_heap_base, bumper: aligned_heap_base, free_lists: FreeLists::new(), - total_size: 0, poisoned: false, - max_total_size: 0, - max_bumper: aligned_heap_base, last_observed_memory_size: 0, + stats: AllocationStats::default(), } } @@ -412,22 +434,13 @@ impl FreeingBumpHeapAllocator { // Write the order in the occupied header. Header::Occupied(order).write_into(mem, header_ptr)?; - self.total_size += order.size() + HEADER_SIZE; - - log::trace!( - target: LOG_TARGET, - "after allocation, total_size = {}, bumper = {}.", - self.total_size, - self.bumper, - ); + self.stats.bytes_allocated += order.size() + HEADER_SIZE; + self.stats.bytes_allocated_sum += order.size() + HEADER_SIZE; + self.stats.bytes_allocated_peak = + std::cmp::max(self.stats.bytes_allocated_peak, self.stats.bytes_allocated); + self.stats.address_space_used = self.bumper - self.original_heap_base; - // update trackers if needed. - if self.total_size > self.max_total_size { - self.max_total_size = self.total_size; - } - if self.bumper > self.max_bumper { - self.max_bumper = self.bumper; - } + log::trace!(target: LOG_TARGET, "after allocation: {:?}", self.stats); bomb.disarm(); Ok(Pointer::new(header_ptr + HEADER_SIZE)) @@ -469,21 +482,23 @@ impl FreeingBumpHeapAllocator { let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr)); Header::Free(prev_head).write_into(mem, header_ptr)?; - // Do the total_size book keeping. - self.total_size = self - .total_size + self.stats.bytes_allocated = self + .stats + .bytes_allocated .checked_sub(order.size() + HEADER_SIZE) - .ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?; - log::trace!( - "after deallocation, total_size = {}, bumper = {}.", - self.total_size, - self.bumper, - ); + .ok_or_else(|| error("underflow of the currently allocated bytes count"))?; + + log::trace!("after deallocation: {:?}", self.stats); bomb.disarm(); Ok(()) } + /// Returns the allocation stats for this allocator. + pub fn stats(&self) -> AllocationStats { + self.stats.clone() + } + /// Increases the `bumper` by `size`. /// /// Returns the `bumper` from before the increase. Returns an `Error::AllocatorOutOfSpace` if @@ -791,13 +806,13 @@ mod tests { let ptr1 = heap.allocate(&mut mem[..], 32).unwrap(); assert_eq!(ptr1, to_pointer(HEADER_SIZE)); heap.deallocate(&mut mem[..], ptr1).expect("failed freeing ptr1"); - assert_eq!(heap.total_size, 0); + assert_eq!(heap.stats.bytes_allocated, 0); assert_eq!(heap.bumper, 40); let ptr2 = heap.allocate(&mut mem[..], 16).unwrap(); assert_eq!(ptr2, to_pointer(48)); heap.deallocate(&mut mem[..], ptr2).expect("failed freeing ptr2"); - assert_eq!(heap.total_size, 0); + assert_eq!(heap.stats.bytes_allocated, 0); assert_eq!(heap.bumper, 64); // when @@ -825,7 +840,7 @@ mod tests { heap.allocate(&mut mem[..], 9).unwrap(); // then - assert_eq!(heap.total_size, HEADER_SIZE + 16); + assert_eq!(heap.stats.bytes_allocated, HEADER_SIZE + 16); } #[test] @@ -840,7 +855,7 @@ mod tests { heap.deallocate(&mut mem[..], ptr).unwrap(); // then - assert_eq!(heap.total_size, 0); + assert_eq!(heap.stats.bytes_allocated, 0); } #[test] @@ -856,7 +871,7 @@ mod tests { } // then - assert_eq!(heap.total_size, 0); + assert_eq!(heap.stats.bytes_allocated, 0); } #[test] diff --git a/client/allocator/src/lib.rs b/client/allocator/src/lib.rs index 2c3e3b2ae841d..2fe63a1ec392c 100644 --- a/client/allocator/src/lib.rs +++ b/client/allocator/src/lib.rs @@ -26,4 +26,4 @@ mod error; mod freeing_bump; pub use error::Error; -pub use freeing_bump::FreeingBumpHeapAllocator; +pub use freeing_bump::{AllocationStats, FreeingBumpHeapAllocator}; diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index d43ad758b144c..d0cc8926144be 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -21,6 +21,8 @@ use crate::error::Error; use sp_wasm_interface::Value; +pub use sc_allocator::AllocationStats; + /// A method to be used to find the entrypoint when calling into the runtime /// /// Contains variants on how to resolve wasm function that will be invoked. @@ -78,7 +80,20 @@ pub trait WasmInstance: Send { /// Before execution, instance is reset. /// /// Returns the encoded result on success. - fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error>; + fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error> { + self.call_with_allocation_stats(method, data).0 + } + + /// Call a method on this WASM instance. + /// + /// Before execution, instance is reset. + /// + /// Returns the encoded result on success. + fn call_with_allocation_stats( + &mut self, + method: InvokeMethod, + data: &[u8], + ) -> (Result, Error>, Option); /// Call an exported method on this WASM instance. /// diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index ea060a89c13df..d805949d26fd9 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -37,7 +37,7 @@ use std::{ use codec::{Decode, Encode}; use sc_executor_common::{ runtime_blob::RuntimeBlob, - wasm_runtime::{InvokeMethod, WasmInstance, WasmModule}, + wasm_runtime::{AllocationStats, InvokeMethod, WasmInstance, WasmModule}, }; use sp_core::{ traits::{CodeExecutor, Externalities, RuntimeCode, RuntimeSpawn, RuntimeSpawnExt}, @@ -219,6 +219,47 @@ where allow_missing_host_functions: bool, export_name: &str, call_data: &[u8], + ) -> std::result::Result, Error> { + self.uncached_call_impl( + runtime_blob, + ext, + allow_missing_host_functions, + export_name, + call_data, + &mut None, + ) + } + + /// Same as `uncached_call`, except it also returns allocation statistics. + #[doc(hidden)] // We use this function in tests. + pub fn uncached_call_with_allocation_stats( + &self, + runtime_blob: RuntimeBlob, + ext: &mut dyn Externalities, + allow_missing_host_functions: bool, + export_name: &str, + call_data: &[u8], + ) -> (std::result::Result, Error>, Option) { + let mut allocation_stats = None; + let result = self.uncached_call_impl( + runtime_blob, + ext, + allow_missing_host_functions, + export_name, + call_data, + &mut allocation_stats, + ); + (result, allocation_stats) + } + + fn uncached_call_impl( + &self, + runtime_blob: RuntimeBlob, + ext: &mut dyn Externalities, + allow_missing_host_functions: bool, + export_name: &str, + call_data: &[u8], + allocation_stats_out: &mut Option, ) -> std::result::Result, Error> { let module = crate::wasm_runtime::create_wasm_runtime_with_code::( self.method, @@ -235,10 +276,14 @@ where let mut instance = AssertUnwindSafe(instance); let mut ext = AssertUnwindSafe(ext); let module = AssertUnwindSafe(module); + let mut allocation_stats_out = AssertUnwindSafe(allocation_stats_out); with_externalities_safe(&mut **ext, move || { preregister_builtin_ext(module.clone()); - instance.call_export(export_name, call_data) + let (result, allocation_stats) = + instance.call_with_allocation_stats(export_name.into(), call_data); + **allocation_stats_out = allocation_stats; + result }) .and_then(|r| r) } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 199fe431580ed..e17707e158321 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -29,6 +29,7 @@ use wasmi::{ }; use codec::{Decode, Encode}; +use sc_allocator::AllocationStats; use sc_executor_common::{ error::{Error, MessageWithBacktrace, WasmError}, runtime_blob::{DataSegmentsSnapshot, RuntimeBlob}, @@ -489,6 +490,7 @@ fn call_in_wasm_module( host_functions: Arc>, allow_missing_func_imports: bool, missing_functions: Arc>, + allocation_stats: &mut Option, ) -> Result, Error> { // Initialize FunctionExecutor. let table: Option = module_instance @@ -561,6 +563,8 @@ fn call_in_wasm_module( }, }; + *allocation_stats = Some(function_executor.heap.borrow().stats()); + match result { Ok(Some(I64(r))) => { let (ptr, length) = unpack_ptr_and_len(r as u64); @@ -761,8 +765,13 @@ pub struct WasmiInstance { // `self.instance` unsafe impl Send for WasmiInstance {} -impl WasmInstance for WasmiInstance { - fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error> { +impl WasmiInstance { + fn call_impl( + &mut self, + method: InvokeMethod, + data: &[u8], + allocation_stats: &mut Option, + ) -> Result, Error> { // We reuse a single wasm instance for multiple calls and a previous call (if any) // altered the state. Therefore, we need to restore the instance to original state. @@ -790,8 +799,21 @@ impl WasmInstance for WasmiInstance { self.host_functions.clone(), self.allow_missing_func_imports, self.missing_functions.clone(), + allocation_stats, ) } +} + +impl WasmInstance for WasmiInstance { + fn call_with_allocation_stats( + &mut self, + method: InvokeMethod, + data: &[u8], + ) -> (Result, Error>, Option) { + let mut allocation_stats = None; + let result = self.call_impl(method, data, &mut allocation_stats); + (result, allocation_stats) + } fn get_global_const(&mut self, name: &str) -> Result, Error> { match self.instance.export_by_name(name) { diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index b494795ae9dc1..a54254810b68b 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -23,7 +23,7 @@ use log::trace; use wasmtime::{Caller, Func, Val}; use codec::{Decode, Encode}; -use sc_allocator::FreeingBumpHeapAllocator; +use sc_allocator::{AllocationStats, FreeingBumpHeapAllocator}; use sc_executor_common::{ error::Result, sandbox::{self, SupervisorFuncIndex}, @@ -66,6 +66,10 @@ impl HostState { pub fn take_panic_message(&mut self) -> Option { self.panic_message.take() } + + pub(crate) fn allocation_stats(&self) -> AllocationStats { + self.allocator.stats() + } } /// A `HostContext` implements `FunctionContext` for making host calls from a Wasmtime diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index dcf07c454e46d..2feb9d0eea502 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -24,7 +24,7 @@ use crate::{ util::{self, replace_strategy_if_broken}, }; -use sc_allocator::FreeingBumpHeapAllocator; +use sc_allocator::{AllocationStats, FreeingBumpHeapAllocator}; use sc_executor_common::{ error::{Result, WasmError}, runtime_blob::{ @@ -184,8 +184,13 @@ pub struct WasmtimeInstance { strategy: Strategy, } -impl WasmInstance for WasmtimeInstance { - fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result> { +impl WasmtimeInstance { + fn call_impl( + &mut self, + method: InvokeMethod, + data: &[u8], + allocation_stats: &mut Option, + ) -> Result> { match &mut self.strategy { Strategy::LegacyInstanceReuse { ref mut instance_wrapper, @@ -205,7 +210,8 @@ impl WasmInstance for WasmtimeInstance { globals_snapshot.apply(&mut InstanceGlobals { instance: instance_wrapper }); let allocator = FreeingBumpHeapAllocator::new(*heap_base); - let result = perform_call(data, instance_wrapper, entrypoint, allocator); + let result = + perform_call(data, instance_wrapper, entrypoint, allocator, allocation_stats); // Signal to the OS that we are done with the linear memory and that it can be // reclaimed. @@ -219,10 +225,22 @@ impl WasmInstance for WasmtimeInstance { let entrypoint = instance_wrapper.resolve_entrypoint(method)?; let allocator = FreeingBumpHeapAllocator::new(heap_base); - perform_call(data, &mut instance_wrapper, entrypoint, allocator) + perform_call(data, &mut instance_wrapper, entrypoint, allocator, allocation_stats) }, } } +} + +impl WasmInstance for WasmtimeInstance { + fn call_with_allocation_stats( + &mut self, + method: InvokeMethod, + data: &[u8], + ) -> (Result>, Option) { + let mut allocation_stats = None; + let result = self.call_impl(method, data, &mut allocation_stats); + (result, allocation_stats) + } fn get_global_const(&mut self, name: &str) -> Result> { match &mut self.strategy { @@ -741,6 +759,7 @@ fn perform_call( instance_wrapper: &mut InstanceWrapper, entrypoint: EntryPoint, mut allocator: FreeingBumpHeapAllocator, + allocation_stats: &mut Option, ) -> Result> { let (data_ptr, data_len) = inject_input_data(instance_wrapper, &mut allocator, data)?; @@ -754,7 +773,10 @@ fn perform_call( .map(unpack_ptr_and_len); // Reset the host state - instance_wrapper.store_mut().data_mut().host_state = None; + let host_state = instance_wrapper.store_mut().data_mut().host_state.take().expect( + "the host state is always set before calling into WASM so it can't be None here; qed", + ); + *allocation_stats = Some(host_state.allocation_stats()); let (output_ptr, output_len) = ret?; let output = extract_output_data(instance_wrapper, output_ptr, output_len)?; diff --git a/frame/support/src/storage/unhashed.rs b/frame/support/src/storage/unhashed.rs index fc6d8ae79c57d..089230bc9bfdf 100644 --- a/frame/support/src/storage/unhashed.rs +++ b/frame/support/src/storage/unhashed.rs @@ -155,7 +155,7 @@ pub fn clear_prefix( /// Get a Vec of bytes from storage. pub fn get_raw(key: &[u8]) -> Option> { - sp_io::storage::get(key) + sp_io::storage::get(key).map(|value| value.to_vec()) } /// Put a raw byte slice into storage. diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index e1c1b32d65c39..45cdfad67b8ae 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 273 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -47,8 +47,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Cow<'a, T> Rc Vec - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - and 2 others + bytes::bytes::Bytes + and 3 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 273 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -122,8 +122,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Cow<'a, T> Rc Vec - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - and 2 others + bytes::bytes::Bytes + and 3 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 7d8c448f00193..d7441e8b18562 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 273 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -47,8 +47,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Cow<'a, T> Rc Vec - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - and 2 others + bytes::bytes::Bytes + and 3 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 273 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -122,8 +122,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Cow<'a, T> Rc Vec - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - and 2 others + bytes::bytes::Bytes + and 3 others = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 77ba4be677f31..fda6b7fdd11dd 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -15,7 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +bytes = { version = "1.1.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.1.3", default-features = false, features = ["bytes"] } hash-db = { version = "0.15.2", default-features = false } sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-keystore = { version = "0.12.0", default-features = false, optional = true, path = "../keystore" } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index edffc37351147..9bf9345e594c3 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -125,8 +125,8 @@ impl From for KillStorageResult { #[runtime_interface] pub trait Storage { /// Returns the data for `key` in the storage or `None` if the key can not be found. - fn get(&self, key: &[u8]) -> Option> { - self.storage(key).map(|s| s.to_vec()) + fn get(&self, key: &[u8]) -> Option { + self.storage(key).map(|s| bytes::Bytes::from(s.to_vec())) } /// Get `key` from storage, placing the value into `value_out` and return the number of @@ -1787,7 +1787,7 @@ mod tests { t.execute_with(|| { assert_eq!(storage::get(b"hello"), None); storage::set(b"hello", b"world"); - assert_eq!(storage::get(b"hello"), Some(b"world".to_vec())); + assert_eq!(storage::get(b"hello"), Some(b"world".to_vec().into())); assert_eq!(storage::get(b"foo"), None); storage::set(b"foo", &[1, 2, 3][..]); }); @@ -1799,7 +1799,7 @@ mod tests { t.execute_with(|| { assert_eq!(storage::get(b"hello"), None); - assert_eq!(storage::get(b"foo"), Some(b"bar".to_vec())); + assert_eq!(storage::get(b"foo"), Some(b"bar".to_vec().into())); }); let value = vec![7u8; 35]; @@ -1809,7 +1809,7 @@ mod tests { t.execute_with(|| { assert_eq!(storage::get(b"hello"), None); - assert_eq!(storage::get(b"foo00"), Some(value.clone())); + assert_eq!(storage::get(b"foo00"), Some(value.clone().into())); }); } diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index 8cd31bab559ea..a657c98381c9a 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -14,12 +14,13 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +bytes = { version = "1.1.0", default-features = false } sp-wasm-interface = { version = "6.0.0", path = "../wasm-interface", default-features = false } sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" } sp-runtime-interface-proc-macro = { version = "5.0.0", path = "proc-macro" } sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["bytes"] } static_assertions = "1.0.0" primitive-types = { version = "0.11.1", default-features = false } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } diff --git a/primitives/runtime-interface/src/pass_by.rs b/primitives/runtime-interface/src/pass_by.rs index 5d895ff5b3f82..ac6f0def9cad0 100644 --- a/primitives/runtime-interface/src/pass_by.rs +++ b/primitives/runtime-interface/src/pass_by.rs @@ -248,12 +248,12 @@ impl PassByImpl for Codec { let len = len as usize; let encoded = if len == 0 { - Vec::new() + bytes::Bytes::new() } else { - unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) } + bytes::Bytes::from(unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }) }; - T::decode(&mut &encoded[..]).expect("Host to wasm values are encoded correctly; qed") + codec::decode_from_bytes(encoded).expect("Host to wasm values are encoded correctly; qed") } } diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index f0e78e0e536b9..e9b2937227db6 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -13,6 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] +bytes = { version = "1.1.0", default-features = false } sp-core = { version = "6.0.0", default-features = false, path = "../../core" } sp-io = { version = "6.0.0", default-features = false, path = "../../io" } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../" } diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index f518a2e17498c..1305aef66cacc 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -60,6 +60,18 @@ pub trait TestApi { vec![0; 4 * 1024] } + fn return_option_vec() -> Option> { + let mut vec = Vec::new(); + vec.resize(16 * 1024, 0xAA); + Some(vec) + } + + fn return_option_bytes() -> Option { + let mut vec = Vec::new(); + vec.resize(16 * 1024, 0xAA); + Some(vec.into()) + } + /// Set the storage at key with value. fn set_storage(&mut self, key: &[u8], data: &[u8]) { self.place_storage(key.to_vec(), Some(data.to_vec())); @@ -300,4 +312,12 @@ wasm_export_functions! { assert_eq!(c, res.2); assert_eq!(d, res.3); } + + fn test_return_option_vec() { + test_api::return_option_vec(); + } + + fn test_return_option_bytes() { + test_api::return_option_bytes(); + } } diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index 1ab8dbfbbff22..d1db3e064295e 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -23,7 +23,7 @@ use sp_runtime_interface::*; use sp_runtime_interface_test_wasm::{test_api::HostFunctions, wasm_binary_unwrap}; use sp_runtime_interface_test_wasm_deprecated::wasm_binary_unwrap as wasm_binary_deprecated_unwrap; -use sc_executor_common::runtime_blob::RuntimeBlob; +use sc_executor_common::{runtime_blob::RuntimeBlob, wasm_runtime::AllocationStats}; use sp_wasm_interface::{ExtendedHostFunctions, HostFunctions as HostFunctionsT}; use std::{ @@ -36,7 +36,7 @@ type TestExternalities = sp_state_machine::TestExternalities( binary: &[u8], method: &str, -) -> Result { +) -> (Result, Option) { let mut ext = TestExternalities::default(); let mut ext_ext = ext.ext(); @@ -44,20 +44,21 @@ fn call_wasm_method_with_result( ExtendedHostFunctions, >::new(sc_executor::WasmExecutionMethod::Interpreted, Some(8), 8, None, 2); - executor - .uncached_call( - RuntimeBlob::uncompress_if_needed(binary).expect("Failed to parse binary"), - &mut ext_ext, - false, - method, - &[], - ) - .map_err(|e| format!("Failed to execute `{}`: {}", method, e))?; - Ok(ext) + let (result, allocation_stats) = executor.uncached_call_with_allocation_stats( + RuntimeBlob::uncompress_if_needed(binary).expect("Failed to parse binary"), + &mut ext_ext, + false, + method, + &[], + ); + let result = result + .map_err(|e| format!("Failed to execute `{}`: {}", method, e)) + .map(|_| ext); + (result, allocation_stats) } fn call_wasm_method(binary: &[u8], method: &str) -> TestExternalities { - call_wasm_method_with_result::(binary, method).unwrap() + call_wasm_method_with_result::(binary, method).0.unwrap() } #[test] @@ -103,8 +104,9 @@ fn test_return_input_public_key() { #[test] fn host_function_not_found() { - let err = - call_wasm_method_with_result::<()>(wasm_binary_unwrap(), "test_return_data").unwrap_err(); + let err = call_wasm_method_with_result::<()>(wasm_binary_unwrap(), "test_return_data") + .0 + .unwrap_err(); assert!(err.contains("Instantiation: Export ")); assert!(err.contains(" not found")); @@ -236,3 +238,43 @@ fn test_tracing() { fn test_return_input_as_tuple() { call_wasm_method::(wasm_binary_unwrap(), "test_return_input_as_tuple"); } + +#[test] +fn test_returning_option_bytes_from_a_host_function_is_efficient() { + let (result, stats_vec) = call_wasm_method_with_result::( + wasm_binary_unwrap(), + "test_return_option_vec", + ); + result.unwrap(); + let (result, stats_bytes) = call_wasm_method_with_result::( + wasm_binary_unwrap(), + "test_return_option_bytes", + ); + result.unwrap(); + + let stats_vec = stats_vec.unwrap(); + let stats_bytes = stats_bytes.unwrap(); + + // The way we currently implement marshaling of `Option>` through + // the WASM FFI boundary from the host to the runtime requires that it is + // marshaled through SCALE. This is quite inefficient as it requires two + // memory allocations inside of the runtime: + // + // 1) the first allocation to copy the SCALE-encoded blob into the runtime; + // 2) and another allocation for the resulting `Vec` when decoding that blob. + // + // Both of these allocations are are as big as the `Vec` which is being + // passed to the runtime. This is especially bad when fetching big values + // from storage, as it can lead to an out-of-memory situation. + // + // Our `Option` marshaling is better; it still must go through SCALE, + // and it still requires two allocations, however since `Bytes` is zero-copy + // only the first allocation is `Vec`-sized, and the second allocation + // which creates the deserialized `Bytes` is tiny, and is only necessary because + // the underlying `Bytes` buffer from which we're deserializing gets automatically + // turned into an `Arc`. + // + // So this assertion tests that deserializing `Option` allocates less than + // deserializing `Option>`. + assert_eq!(stats_bytes.bytes_allocated_sum + 16 * 1024 + 8, stats_vec.bytes_allocated_sum); +} From 479c84cacee0009eb5632754295c039bf96f706d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 29 Jul 2022 10:22:49 +0100 Subject: [PATCH 079/135] construct_runtime!: Support parsing `struct Runtime` (#11932) * construct_runtime!: Support parsing `struct Runtime` * FMT --- bin/node-template/runtime/src/lib.rs | 5 +++-- frame/balances/src/tests_local.rs | 2 +- frame/support/procedural/src/construct_runtime/parse.rs | 9 ++++++++- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index a2f595f571a90..a4382cc9038ce 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -272,10 +272,11 @@ impl pallet_template::Config for Runtime { // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( - pub enum Runtime where + pub struct Runtime + where Block = Block, NodeBlock = opaque::Block, - UncheckedExtrinsic = UncheckedExtrinsic + UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system, RandomnessCollectiveFlip: pallet_randomness_collective_flip, diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 6f4c50d90153a..48c6574c3b39f 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -34,7 +34,7 @@ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where + pub struct Test where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index a2cda6a0777b8..711da85c10cfc 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -73,7 +73,14 @@ pub struct ExplicitRuntimeDeclaration { impl Parse for RuntimeDeclaration { fn parse(input: ParseStream) -> Result { input.parse::()?; - input.parse::()?; + + // Support either `enum` or `struct`. + if input.peek(Token![struct]) { + input.parse::()?; + } else { + input.parse::()?; + } + let name = input.parse::()?; let where_section = input.parse()?; let pallets = From 47c19a570d4703732541decf97e22e3b2b8881d9 Mon Sep 17 00:00:00 2001 From: yjh Date: Fri, 29 Jul 2022 17:32:31 +0800 Subject: [PATCH 080/135] feat: add propose method for SimpleSlotWorker (#11692) * feat: add propose method for SimpleSlotWorker * remove param slot * improve code * fmt --- client/consensus/slots/src/lib.rs | 117 ++++++++++++++++++------------ 1 file changed, 69 insertions(+), 48 deletions(-) diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index a6fbc4bebc796..7c3de13444c1a 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -38,7 +38,7 @@ use log::{debug, info, warn}; use sc_consensus::{BlockImport, JustificationSyncLink}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN}; use sp_arithmetic::traits::BaseArithmetic; -use sp_consensus::{CanAuthorWith, Proposer, SelectChain, SyncOracle}; +use sp_consensus::{CanAuthorWith, Proposal, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::CreateInherentDataProviders; use sp_runtime::{ @@ -103,7 +103,7 @@ pub trait SimpleSlotWorker { type Proposer: Proposer + Send; /// Data associated with a slot claim. - type Claim: Send + 'static; + type Claim: Send + Sync + 'static; /// Epoch data necessary for authoring. type EpochData: Send + Sync + 'static; @@ -183,6 +183,70 @@ pub trait SimpleSlotWorker { /// Remaining duration for proposing. fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration; + /// Propose a block by `Proposer`. + async fn propose( + &mut self, + proposer: Self::Proposer, + claim: &Self::Claim, + slot_info: SlotInfo, + proposing_remaining: Delay, + ) -> Option< + Proposal< + B, + >::Transaction, + >::Proof, + >, + > { + let slot = slot_info.slot; + let telemetry = self.telemetry(); + let logging_target = self.logging_target(); + let proposing_remaining_duration = self.proposing_remaining_duration(&slot_info); + let logs = self.pre_digest_data(slot, claim); + + // deadline our production to 98% of the total time left for proposing. As we deadline + // the proposing below to the same total time left, the 2% margin should be enough for + // the result to be returned. + let proposing = proposer + .propose( + slot_info.inherent_data, + sp_runtime::generic::Digest { logs }, + proposing_remaining_duration.mul_f32(0.98), + None, + ) + .map_err(|e| sp_consensus::Error::ClientImport(e.to_string())); + + let proposal = match futures::future::select(proposing, proposing_remaining).await { + Either::Left((Ok(p), _)) => p, + Either::Left((Err(err), _)) => { + warn!(target: logging_target, "Proposing failed: {}", err); + + return None + }, + Either::Right(_) => { + info!( + target: logging_target, + "⌛️ Discarding proposal for slot {}; block production took too long", slot, + ); + // If the node was compiled with debug, tell the user to use release optimizations. + #[cfg(build_type = "debug")] + info!( + target: logging_target, + "👉 Recompile your node in `--release` mode to mitigate this problem.", + ); + telemetry!( + telemetry; + CONSENSUS_INFO; + "slots.discarding_proposal_took_too_long"; + "slot" => *slot, + ); + + return None + }, + }; + + Some(proposal) + } + /// Implements [`SlotWorker::on_slot`]. async fn on_slot( &mut self, @@ -256,10 +320,8 @@ pub trait SimpleSlotWorker { } debug!( - target: self.logging_target(), - "Starting authorship at slot {}; timestamp = {}", - slot, - *timestamp, + target: logging_target, + "Starting authorship at slot {}; timestamp = {}", slot, *timestamp, ); telemetry!( @@ -287,48 +349,7 @@ pub trait SimpleSlotWorker { }, }; - let logs = self.pre_digest_data(slot, &claim); - - // deadline our production to 98% of the total time left for proposing. As we deadline - // the proposing below to the same total time left, the 2% margin should be enough for - // the result to be returned. - let proposing = proposer - .propose( - slot_info.inherent_data, - sp_runtime::generic::Digest { logs }, - proposing_remaining_duration.mul_f32(0.98), - None, - ) - .map_err(|e| sp_consensus::Error::ClientImport(e.to_string())); - - let proposal = match futures::future::select(proposing, proposing_remaining).await { - Either::Left((Ok(p), _)) => p, - Either::Left((Err(err), _)) => { - warn!(target: logging_target, "Proposing failed: {}", err); - - return None - }, - Either::Right(_) => { - info!( - target: logging_target, - "⌛️ Discarding proposal for slot {}; block production took too long", slot, - ); - // If the node was compiled with debug, tell the user to use release optimizations. - #[cfg(build_type = "debug")] - info!( - target: logging_target, - "👉 Recompile your node in `--release` mode to mitigate this problem.", - ); - telemetry!( - telemetry; - CONSENSUS_INFO; - "slots.discarding_proposal_took_too_long"; - "slot" => *slot, - ); - - return None - }, - }; + let proposal = self.propose(proposer, &claim, slot_info, proposing_remaining).await?; let (block, storage_proof) = (proposal.block, proposal.proof); let (header, body) = block.deconstruct(); From bafd81148ca71faf73abfdb124430de1c994abc7 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Fri, 29 Jul 2022 12:56:08 +0200 Subject: [PATCH 081/135] Require Alliance Initialisation Before Joining (#11917) * require alliance initialisation before joining * use noop * make one definition of initialization * rename event * add todo comment * update doc --- frame/alliance/src/lib.rs | 34 +++++++++++++++++++++++++++------- frame/alliance/src/mock.rs | 8 +++++++- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index f9e85e270af16..111ea5dc6e507 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -311,7 +311,9 @@ pub mod pallet { #[pallet::error] pub enum Error { /// The founders/fellows/allies have already been initialized. - MembersAlreadyInitialized, + AllianceAlreadyInitialized, + /// The Alliance has not been initialized yet, therefore accounts cannot join it. + AllianceNotYetInitialized, /// Account is already a member. AlreadyMember, /// Account is not a member. @@ -434,6 +436,11 @@ pub mod pallet { Members::::insert(MemberRole::Fellow, members); } if !self.allies.is_empty() { + // Only allow Allies if the Alliance is "initialized". + assert!( + Pallet::::is_initialized(), + "Alliance must have Founders or Fellows to have Allies" + ); let members: BoundedVec = self.allies.clone().try_into().expect("Too many genesis allies"); Members::::insert(MemberRole::Ally, members); @@ -612,6 +619,11 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; + // Cannot be called if the Alliance already has Founders or Fellows. + // TODO: Remove check and allow Root to set members at any time. + // https://github.com/paritytech/substrate/issues/11928 + ensure!(!Self::is_initialized(), Error::::AllianceAlreadyInitialized); + let mut founders: BoundedVec = founders.try_into().map_err(|_| Error::::TooManyMembers)?; let mut fellows: BoundedVec = @@ -619,12 +631,6 @@ pub mod pallet { let mut allies: BoundedVec = allies.try_into().map_err(|_| Error::::TooManyMembers)?; - ensure!( - !Self::has_member(MemberRole::Founder) && - !Self::has_member(MemberRole::Fellow) && - !Self::has_member(MemberRole::Ally), - Error::::MembersAlreadyInitialized - ); for member in founders.iter().chain(fellows.iter()).chain(allies.iter()) { Self::has_identity(member)?; } @@ -705,6 +711,15 @@ pub mod pallet { pub fn join_alliance(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; + // We don't want anyone to join as an Ally before the Alliance has been initialized via + // Root call. The reasons are two-fold: + // + // 1. There is no `Rule` or admission criteria, so the joiner would be an ally to + // nought, and + // 2. It adds complexity to the initialization, namely deciding to overwrite accounts + // that already joined as an Ally. + ensure!(Self::is_initialized(), Error::::AllianceNotYetInitialized); + // Unscrupulous accounts are non grata. ensure!(!Self::is_unscrupulous_account(&who), Error::::AccountNonGrata); ensure!(!Self::is_member(&who), Error::::AlreadyMember); @@ -867,6 +882,11 @@ pub mod pallet { } impl, I: 'static> Pallet { + /// Check if the Alliance has been initialized. + fn is_initialized() -> bool { + Self::has_member(MemberRole::Founder) || Self::has_member(MemberRole::Fellow) + } + /// Check if a given role has any members. fn has_member(role: MemberRole) -> bool { Members::::decode_len(role).unwrap_or_default() > 0 diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index d6e9a92a10dec..91986300aa2e1 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -26,7 +26,7 @@ pub use sp_runtime::{ use sp_std::convert::{TryFrom, TryInto}; pub use frame_support::{ - assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, ord_parameter_types, parameter_types, traits::{EitherOfDiverse, GenesisBuild, SortedMembers}, BoundedVec, }; @@ -291,6 +291,12 @@ pub fn new_test_ext() -> sp_io::TestExternalities { assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 5, Judgement::KnownGood)); assert_ok!(Identity::set_identity(Origin::signed(6), Box::new(info.clone()))); + // Joining before init should fail. + assert_noop!( + Alliance::join_alliance(Origin::signed(1)), + Error::::AllianceNotYetInitialized + ); + assert_ok!(Alliance::init_members(Origin::root(), vec![1, 2], vec![3], vec![])); System::set_block_number(1); From e0d7147c672afe45c47e82d1ad2ce3b8fc733984 Mon Sep 17 00:00:00 2001 From: Mak Date: Fri, 29 Jul 2022 15:16:30 +0300 Subject: [PATCH 082/135] Integrate automatic update of substrate-node-template (#11931) * Integrate automatic update of substrate-node-template * Update scripts/ci/gitlab/pipeline/publish.yml Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> --- scripts/ci/gitlab/pipeline/publish.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/scripts/ci/gitlab/pipeline/publish.yml b/scripts/ci/gitlab/pipeline/publish.yml index c02b50e0cbc1d..8f7a619f8b196 100644 --- a/scripts/ci/gitlab/pipeline/publish.yml +++ b/scripts/ci/gitlab/pipeline/publish.yml @@ -147,3 +147,17 @@ publish-draft-release: script: - ./scripts/ci/gitlab/publish_draft_release.sh allow_failure: true + +# Ref: https://github.com/paritytech/opstooling/issues/111 +update-node-template: + stage: publish + extends: .kubernetes-env + rules: + - if: $CI_COMMIT_REF_NAME =~ /^polkadot-v[0-9]+\.[0-9]+.*$/ # i.e. polkadot-v1.0.99, polkadot-v2.1rc1 + script: + - git clone --depth=1 --branch="$PIPELINE_SCRIPTS_TAG" https://github.com/paritytech/pipeline-scripts + - ./pipeline-scripts/update_substrate_template.sh + --repo-name "substrate-node-template" + --template-path "bin/node-template" + --github-api-token "$GITHUB_TOKEN" + --polkadot-branch "$CI_COMMIT_REF_NAME" From 969a344684d9b79b6bb6bab591ac73df4017fea9 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 29 Jul 2022 15:36:48 +0300 Subject: [PATCH 083/135] Always allocate slots for reserved nodes (#11909) * Always allocate slots for reserved nodes * minor: replace no-slot peer counter with a set --- client/network/src/protocol.rs | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 3698a6b936ed5..1c933fabcbb5d 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -179,6 +179,10 @@ pub struct Protocol { /// List of nodes for which we perform additional logging because they are important for the /// user. important_peers: HashSet, + /// List of nodes that should never occupy peer slots. + default_peers_set_no_slot_peers: HashSet, + /// Actual list of connected no-slot nodes. + default_peers_set_no_slot_connected_peers: HashSet, /// Value that was passed as part of the configuration. Used to cap the number of full nodes. default_peers_set_num_full: usize, /// Number of slots to allocate to light nodes. @@ -304,6 +308,17 @@ where imp_p }; + let default_peers_set_no_slot_peers = { + let mut no_slot_p: HashSet = network_config + .default_peers_set + .reserved_nodes + .iter() + .map(|reserved| reserved.peer_id) + .collect(); + no_slot_p.shrink_to_fit(); + no_slot_p + }; + let mut known_addresses = Vec::new(); let (peerset, peerset_handle) = { @@ -404,6 +419,8 @@ where genesis_hash: info.genesis_hash, chain_sync, important_peers, + default_peers_set_no_slot_peers, + default_peers_set_no_slot_connected_peers: HashSet::new(), default_peers_set_num_full: network_config.default_peers_set_num_full as usize, default_peers_set_num_light: { let total = network_config.default_peers_set.out_peers + @@ -542,6 +559,7 @@ where self.pending_messages .push_back(CustomMessageOutcome::BlockImport(origin, blocks)); } + self.default_peers_set_no_slot_connected_peers.remove(&peer); Ok(()) } else { Err(()) @@ -723,7 +741,14 @@ where } } - if status.roles.is_full() && self.chain_sync.num_peers() >= self.default_peers_set_num_full + let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who); + let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; + + if status.roles.is_full() && + self.chain_sync.num_peers() >= + self.default_peers_set_num_full + + self.default_peers_set_no_slot_connected_peers.len() + + this_peer_reserved_slot { debug!(target: "sync", "Too many full nodes, rejecting {}", who); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); @@ -767,6 +792,9 @@ where debug!(target: "sync", "Connected {}", who); self.peers.insert(who, peer); + if no_slot_peer { + self.default_peers_set_no_slot_connected_peers.insert(who); + } self.pending_messages .push_back(CustomMessageOutcome::PeerNewBest(who, status.best_number)); From 494a2883e14a389e1c090475077d0c745fefc6c3 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 29 Jul 2022 18:47:21 +0300 Subject: [PATCH 084/135] Lean BEEFY to Full BEEFY - don't skip (older) mandatory blocks and import justifications (#11821) * client/beefy: don't accept vote for older rounds * client/beefy: clean up and reorg the worker struct * client/beefy: first step towards Full BEEFY The first step from Lean->Full BEEFY is to have the worker enforce uninterrupted line of BEEFY finalized mandatory blocks. There is one mandatory block per session (the first block in the session). As such, votes processing and votes generation now enforces that all mandatory blocks are finalized in strict monotonically increasing sequence and no block 'N' will be worked on if there is any GRANDPA finalized but BEEFY non-final mandatory block 'M', where 'M < N'. Implementation details: - Introduced 'VoterOracle' to separate the voting decisions logic, and track new/pending sessions. - New sessions get queued up with the worker operating either: 1. up-to-date - all mandatory blocks leading up to current GRANDPA finalized: queue has ONE element, the 'current session' where `mandatory_done == true`, 2. lagging behind GRANDPA: queue has [1, N] elements, where all `mandatory_done == false`. In this state, everytime a session gets its mandatory block BEEFY finalized, the session is popped off the queue, eventually getting to operating mode `1. up-to-date`. - Votes get triaged and those that fall withing the `VoterOracle` allowed window get processed, the others get dropped if stale, or buffered for later processing (when they reach the window). - Worker general code was also updated to fall in one of two roles: 1. react to external events and change internal 'state', 2. generate events/votes based on internal 'state'. Signed-off-by: acatangiu * client/beefy: sketch idea for block import and sync Signed-off-by: acatangiu * client/beefy: add BEEFY block import * client/beefy: process justifications from block import * client/beefy: add TODOs for sync protocol * client/beefy: add more docs and comments * client/beefy-rpc: fix RPC error * client/beefy: verify justification validity on block import * client/beefy: more tests * client/beefy: small fixes - first handle and note the self vote before gossiping it, - don't shortcircuit on err when processing pending votes. * client/beefy: remove invalid justifications at block import * todo: beefy block import tests * RFC: ideas for multiple justifications per block * Revert "RFC: ideas for multiple justifications per block" This reverts commit 8256fb07d3124db69daf252720b3c0208202624d. * client/beefy: append justif to backend on block import * client/beefy: groundwork for block import test * client/beefy: groundwork2 for block import test * client/beefy: groundwork3 for block import test * client/beefy: add block import test * client/beefy: add required trait bounds to block import builder * remove client from beefy block import, backend gets the job done Signed-off-by: acatangiu --- Cargo.lock | 2 + client/beefy/Cargo.toml | 4 +- client/beefy/rpc/src/lib.rs | 5 +- client/beefy/rpc/src/notification.rs | 2 +- client/beefy/src/error.rs | 4 + client/beefy/src/import.rs | 205 ++++++ client/beefy/src/justification.rs | 177 +++++ client/beefy/src/lib.rs | 101 ++- client/beefy/src/metrics.rs | 10 +- client/beefy/src/notification.rs | 6 +- client/beefy/src/round.rs | 82 ++- client/beefy/src/tests.rs | 199 +++++- client/beefy/src/worker.rs | 932 +++++++++++++++++---------- client/network/test/src/lib.rs | 15 +- primitives/beefy/src/commitment.rs | 6 + primitives/runtime/src/lib.rs | 5 + 16 files changed, 1344 insertions(+), 411 deletions(-) create mode 100644 client/beefy/src/import.rs create mode 100644 client/beefy/src/justification.rs diff --git a/Cargo.lock b/Cargo.lock index 926d6d4cdf19a..34bf34c62c6fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -440,6 +440,7 @@ dependencies = [ name = "beefy-gadget" version = "4.0.0-dev" dependencies = [ + "async-trait", "beefy-primitives", "fnv", "futures", @@ -448,6 +449,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.0", + "sc-block-builder", "sc-chain-spec", "sc-client-api", "sc-consensus", diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index b910f7ce307a5..e219420959c9f 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -9,6 +9,7 @@ description = "BEEFY Client gadget for substrate" homepage = "https://substrate.io" [dependencies] +async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } fnv = "1.0.6" futures = "0.3" @@ -22,6 +23,7 @@ beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy" } prometheus = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-chain-spec = { version = "4.0.0-dev", path = "../../client/chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-finality-grandpa = { version = "0.10.0-dev", path = "../../client/finality-grandpa" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } @@ -42,7 +44,7 @@ serde = "1.0.136" strum = { version = "0.24.1", features = ["derive"] } tempfile = "3.1.0" tokio = "1.17.0" -sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-network-test = { version = "0.8.0", path = "../network/test" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" } sp-keyring = { version = "6.0.0", path = "../../primitives/keyring" } diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 2c3ffda056c26..13bca08d37429 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -164,8 +164,9 @@ where mod tests { use super::*; - use beefy_gadget::notification::{ - BeefyBestBlockStream, BeefySignedCommitment, BeefySignedCommitmentSender, + use beefy_gadget::{ + justification::BeefySignedCommitment, + notification::{BeefyBestBlockStream, BeefySignedCommitmentSender}, }; use beefy_primitives::{known_payload_ids, Payload}; use codec::{Decode, Encode}; diff --git a/client/beefy/rpc/src/notification.rs b/client/beefy/rpc/src/notification.rs index 2f58c7c6bb5dc..cdda667782dd5 100644 --- a/client/beefy/rpc/src/notification.rs +++ b/client/beefy/rpc/src/notification.rs @@ -29,7 +29,7 @@ pub struct EncodedSignedCommitment(sp_core::Bytes); impl EncodedSignedCommitment { pub fn new( - signed_commitment: beefy_gadget::notification::BeefySignedCommitment, + signed_commitment: beefy_gadget::justification::BeefySignedCommitment, ) -> Self where Block: BlockT, diff --git a/client/beefy/src/error.rs b/client/beefy/src/error.rs index eacadeb7613a5..dd5fd649d52ce 100644 --- a/client/beefy/src/error.rs +++ b/client/beefy/src/error.rs @@ -24,8 +24,12 @@ use std::fmt::Debug; #[derive(Debug, thiserror::Error, PartialEq)] pub enum Error { + #[error("Backend: {0}")] + Backend(String), #[error("Keystore error: {0}")] Keystore(String), #[error("Signature error: {0}")] Signature(String), + #[error("Session uninitialized")] + UninitSession, } diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs new file mode 100644 index 0000000000000..7caeb49db5e50 --- /dev/null +++ b/client/beefy/src/import.rs @@ -0,0 +1,205 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use beefy_primitives::{crypto::Signature, BeefyApi, VersionedFinalityProof, BEEFY_ENGINE_ID}; +use codec::Encode; +use log::error; +use std::{collections::HashMap, sync::Arc}; + +use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_blockchain::{well_known_cache_keys, HeaderBackend}; +use sp_consensus::Error as ConsensusError; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + EncodedJustification, +}; + +use sc_client_api::backend::Backend; +use sc_consensus::{BlockCheckParams, BlockImport, BlockImportParams, ImportResult}; + +use crate::{ + justification::decode_and_verify_commitment, notification::BeefySignedCommitmentSender, +}; + +/// A block-import handler for BEEFY. +/// +/// This scans each imported block for BEEFY justifications and verifies them. +/// Wraps a `inner: BlockImport` and ultimately defers to it. +/// +/// When using BEEFY, the block import worker should be using this block import object. +pub struct BeefyBlockImport { + backend: Arc, + runtime: Arc, + inner: I, + justification_sender: BeefySignedCommitmentSender, +} + +impl Clone for BeefyBlockImport { + fn clone(&self) -> Self { + BeefyBlockImport { + backend: self.backend.clone(), + runtime: self.runtime.clone(), + inner: self.inner.clone(), + justification_sender: self.justification_sender.clone(), + } + } +} + +impl BeefyBlockImport { + /// Create a new BeefyBlockImport. + pub fn new( + backend: Arc, + runtime: Arc, + inner: I, + justification_sender: BeefySignedCommitmentSender, + ) -> BeefyBlockImport { + BeefyBlockImport { backend, runtime, inner, justification_sender } + } +} + +impl BeefyBlockImport +where + Block: BlockT, + BE: Backend, + Runtime: ProvideRuntimeApi, + Runtime::Api: BeefyApi + Send + Sync, +{ + fn decode_and_verify( + &self, + encoded: &EncodedJustification, + number: NumberFor, + hash: ::Hash, + ) -> Result, Signature>, ConsensusError> { + let block_id = BlockId::hash(hash); + let validator_set = self + .runtime + .runtime_api() + .validator_set(&block_id) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| ConsensusError::ClientImport("Unknown validator set".to_string()))?; + + decode_and_verify_commitment::(&encoded[..], number, &validator_set) + } + + /// Import BEEFY justification: Send it to worker for processing and also append it to backend. + /// + /// This function assumes: + /// - `justification` is verified and valid, + /// - the block referred by `justification` has been imported _and_ finalized. + fn import_beefy_justification_unchecked( + &self, + number: NumberFor, + justification: VersionedFinalityProof, Signature>, + ) { + // Append the justification to the block in the backend. + if let Err(e) = self.backend.append_justification( + BlockId::Number(number), + (BEEFY_ENGINE_ID, justification.encode()), + ) { + error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, justification); + } + // Send the justification to the BEEFY voter for processing. + match justification { + // TODO #11838: Should not unpack, these channels should also use + // `VersionedFinalityProof`. + VersionedFinalityProof::V1(signed_commitment) => self + .justification_sender + .notify(|| Ok::<_, ()>(signed_commitment)) + .expect("forwards closure result; the closure always returns Ok; qed."), + }; + } +} + +#[async_trait::async_trait] +impl BlockImport for BeefyBlockImport +where + Block: BlockT, + BE: Backend, + I: BlockImport< + Block, + Error = ConsensusError, + Transaction = sp_api::TransactionFor, + > + Send + + Sync, + Runtime: ProvideRuntimeApi + Send + Sync, + Runtime::Api: BeefyApi, +{ + type Error = ConsensusError; + type Transaction = TransactionFor; + + async fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + + let beefy_proof = block + .justifications + .as_mut() + .and_then(|just| { + let decoded = just + .get(BEEFY_ENGINE_ID) + .map(|encoded| self.decode_and_verify(encoded, number, hash)); + // Remove BEEFY justification from the list before giving to `inner`; + // we will append it to backend ourselves at the end if all goes well. + just.remove(BEEFY_ENGINE_ID); + decoded + }) + .transpose() + .unwrap_or(None); + + // Run inner block import. + let inner_import_result = self.inner.import_block(block, new_cache).await?; + + match (beefy_proof, &inner_import_result) { + (Some(proof), ImportResult::Imported(_)) => { + let status = self.backend.blockchain().info(); + if number <= status.finalized_number && + Some(hash) == + self.backend + .blockchain() + .hash(number) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + { + // The proof is valid and the block is imported and final, we can import. + self.import_beefy_justification_unchecked(number, proof); + } else { + error!( + target: "beefy", + "🥩 Cannot import justification: {:?} for, not yet final, block number {:?}", + proof, + number, + ); + } + }, + _ => (), + } + + Ok(inner_import_result) + } + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await + } +} diff --git a/client/beefy/src/justification.rs b/client/beefy/src/justification.rs new file mode 100644 index 0000000000000..2a5191daec4b5 --- /dev/null +++ b/client/beefy/src/justification.rs @@ -0,0 +1,177 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::keystore::BeefyKeystore; +use beefy_primitives::{ + crypto::{AuthorityId, Signature}, + ValidatorSet, VersionedFinalityProof, +}; +use codec::{Decode, Encode}; +use sp_consensus::Error as ConsensusError; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +/// A commitment with matching BEEFY authorities' signatures. +pub type BeefySignedCommitment = + beefy_primitives::SignedCommitment, beefy_primitives::crypto::Signature>; + +/// Decode and verify a Beefy SignedCommitment. +pub(crate) fn decode_and_verify_commitment( + encoded: &[u8], + target_number: NumberFor, + validator_set: &ValidatorSet, +) -> Result, Signature>, ConsensusError> { + let proof = , Signature>>::decode(&mut &*encoded) + .map_err(|_| ConsensusError::InvalidJustification)?; + verify_with_validator_set::(target_number, validator_set, &proof).map(|_| proof) +} + +/// Verify the Beefy finality proof against the validator set at the block it was generated. +fn verify_with_validator_set( + target_number: NumberFor, + validator_set: &ValidatorSet, + proof: &VersionedFinalityProof, Signature>, +) -> Result<(), ConsensusError> { + match proof { + VersionedFinalityProof::V1(signed_commitment) => { + if signed_commitment.signatures.len() != validator_set.len() || + signed_commitment.commitment.validator_set_id != validator_set.id() || + signed_commitment.commitment.block_number != target_number + { + return Err(ConsensusError::InvalidJustification) + } + + // Arrangement of signatures in the commitment should be in the same order + // as validators for that set. + let message = signed_commitment.commitment.encode(); + let valid_signatures = validator_set + .validators() + .into_iter() + .zip(signed_commitment.signatures.iter()) + .filter(|(id, signature)| { + signature + .as_ref() + .map(|sig| BeefyKeystore::verify(id, sig, &message[..])) + .unwrap_or(false) + }) + .count(); + if valid_signatures >= crate::round::threshold(validator_set.len()) { + Ok(()) + } else { + Err(ConsensusError::InvalidJustification) + } + }, + } +} + +#[cfg(test)] +pub(crate) mod tests { + use beefy_primitives::{known_payload_ids, Commitment, Payload, SignedCommitment}; + use substrate_test_runtime_client::runtime::Block; + + use super::*; + use crate::{keystore::tests::Keyring, tests::make_beefy_ids}; + + pub(crate) fn new_signed_commitment( + block_num: NumberFor, + validator_set: &ValidatorSet, + keys: &[Keyring], + ) -> BeefySignedCommitment { + let commitment = Commitment { + payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + block_number: block_num, + validator_set_id: validator_set.id(), + }; + let message = commitment.encode(); + let signatures = keys.iter().map(|key| Some(key.sign(&message))).collect(); + SignedCommitment { commitment, signatures } + } + + #[test] + fn should_verify_with_validator_set() { + let keys = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + + // build valid justification + let block_num = 42; + let proof = new_signed_commitment(block_num, &validator_set, keys); + + let good_proof = proof.clone().into(); + // should verify successfully + verify_with_validator_set::(block_num, &validator_set, &good_proof).unwrap(); + + // wrong block number -> should fail verification + let good_proof = proof.clone().into(); + match verify_with_validator_set::(block_num + 1, &validator_set, &good_proof) { + Err(ConsensusError::InvalidJustification) => (), + _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + }; + + // wrong validator set id -> should fail verification + let good_proof = proof.clone().into(); + let other = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); + match verify_with_validator_set::(block_num, &other, &good_proof) { + Err(ConsensusError::InvalidJustification) => (), + _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + }; + + // wrong signatures length -> should fail verification + let mut bad_proof = proof.clone(); + // change length of signatures + bad_proof.signatures.pop().flatten().unwrap(); + match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { + Err(ConsensusError::InvalidJustification) => (), + _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + }; + + // not enough signatures -> should fail verification + let mut bad_proof = proof.clone(); + // remove a signature (but same length) + *bad_proof.signatures.first_mut().unwrap() = None; + match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { + Err(ConsensusError::InvalidJustification) => (), + _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + }; + + // not enough _correct_ signatures -> should fail verification + let mut bad_proof = proof.clone(); + // change a signature to a different key + *bad_proof.signatures.first_mut().unwrap() = + Some(Keyring::Dave.sign(&proof.commitment.encode())); + match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { + Err(ConsensusError::InvalidJustification) => (), + _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), + }; + } + + #[test] + fn should_decode_and_verify_commitment() { + let keys = &[Keyring::Alice, Keyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + let block_num = 1; + + // build valid justification + let proof = new_signed_commitment(block_num, &validator_set, keys); + let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); + let encoded = versioned_proof.encode(); + + // should successfully decode and verify + let verified = + decode_and_verify_commitment::(&encoded, block_num, &validator_set).unwrap(); + assert_eq!(verified, versioned_proof); + } +} diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index c025ec5686ad2..81c72dec8cd08 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -16,23 +16,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::sync::Arc; - +use beefy_primitives::{BeefyApi, MmrRootHash}; use prometheus::Registry; - use sc_client_api::{Backend, BlockchainEvents, Finalizer}; +use sc_consensus::BlockImport; use sc_network_gossip::Network as GossipNetwork; - use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; -use sp_consensus::SyncOracle; +use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_keystore::SyncCryptoStorePtr; use sp_mmr_primitives::MmrApi; use sp_runtime::traits::Block; - -use beefy_primitives::{BeefyApi, MmrRootHash}; - -use crate::notification::{BeefyBestBlockSender, BeefySignedCommitmentSender}; +use std::sync::Arc; mod error; mod gossip; @@ -41,11 +36,21 @@ mod metrics; mod round; mod worker; +pub mod import; +pub mod justification; pub mod notification; #[cfg(test)] mod tests; +use crate::{ + import::BeefyBlockImport, + notification::{ + BeefyBestBlockSender, BeefyBestBlockStream, BeefySignedCommitmentSender, + BeefySignedCommitmentStream, + }, +}; + pub use beefy_protocol_name::standard_name as protocol_standard_name; pub(crate) mod beefy_protocol_name { @@ -110,6 +115,68 @@ where // empty } +/// Links between the block importer, the background voter and the RPC layer, +/// to be used by the voter. +#[derive(Clone)] +pub struct BeefyVoterLinks { + // BlockImport -> Voter links + /// Stream of BEEFY signed commitments from block import to voter. + pub from_block_import_justif_stream: BeefySignedCommitmentStream, + + // Voter -> RPC links + /// Sends BEEFY signed commitments from voter to RPC. + pub to_rpc_justif_sender: BeefySignedCommitmentSender, + /// Sends BEEFY best block hashes from voter to RPC. + pub to_rpc_best_block_sender: BeefyBestBlockSender, +} + +/// Links used by the BEEFY RPC layer, from the BEEFY background voter. +#[derive(Clone)] +pub struct BeefyRPCLinks { + /// Stream of signed commitments coming from the voter. + pub from_voter_justif_stream: BeefySignedCommitmentStream, + /// Stream of BEEFY best block hashes coming from the voter. + pub from_voter_best_beefy_stream: BeefyBestBlockStream, +} + +/// Make block importer and link half necessary to tie the background voter to it. +pub fn beefy_block_import_and_links( + wrapped_block_import: I, + backend: Arc, + runtime: Arc, +) -> (BeefyBlockImport, BeefyVoterLinks, BeefyRPCLinks) +where + B: Block, + BE: Backend, + I: BlockImport> + + Send + + Sync, + RuntimeApi: ProvideRuntimeApi + Send + Sync, + RuntimeApi::Api: BeefyApi, +{ + // Voter -> RPC links + let (to_rpc_justif_sender, from_voter_justif_stream) = + notification::BeefySignedCommitmentStream::::channel(); + let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = + notification::BeefyBestBlockStream::::channel(); + + // BlockImport -> Voter links + let (to_voter_justif_sender, from_block_import_justif_stream) = + notification::BeefySignedCommitmentStream::::channel(); + + // BlockImport + let import = + BeefyBlockImport::new(backend, runtime, wrapped_block_import, to_voter_justif_sender); + let voter_links = BeefyVoterLinks { + from_block_import_justif_stream, + to_rpc_justif_sender, + to_rpc_best_block_sender, + }; + let rpc_links = BeefyRPCLinks { from_voter_best_beefy_stream, from_voter_justif_stream }; + + (import, voter_links, rpc_links) +} + /// BEEFY gadget initialization parameters. pub struct BeefyParams where @@ -130,16 +197,14 @@ where pub key_store: Option, /// Gossip network pub network: N, - /// BEEFY signed commitment sender - pub signed_commitment_sender: BeefySignedCommitmentSender, - /// BEEFY best block sender - pub beefy_best_block_sender: BeefyBestBlockSender, /// Minimal delta between blocks, BEEFY should vote for pub min_block_delta: u32, /// Prometheus metric registry pub prometheus_registry: Option, /// Chain specific GRANDPA protocol name. See [`beefy_protocol_name::standard_name`]. pub protocol_name: std::borrow::Cow<'static, str>, + /// Links between the block importer, the background voter and the RPC layer. + pub links: BeefyVoterLinks, } /// Start the BEEFY gadget. @@ -160,11 +225,10 @@ where runtime, key_store, network, - signed_commitment_sender, - beefy_best_block_sender, min_block_delta, prometheus_registry, protocol_name, + links, } = beefy_params; let sync_oracle = network.clone(); @@ -194,14 +258,13 @@ where client, backend, runtime, + sync_oracle, key_store: key_store.into(), - signed_commitment_sender, - beefy_best_block_sender, gossip_engine, gossip_validator, - min_block_delta, + links, metrics, - sync_oracle, + min_block_delta, }; let worker = worker::BeefyWorker::<_, _, _, _, _>::new(worker_params); diff --git a/client/beefy/src/metrics.rs b/client/beefy/src/metrics.rs index a6d29dbf88abb..71e34e24c4fa0 100644 --- a/client/beefy/src/metrics.rs +++ b/client/beefy/src/metrics.rs @@ -32,8 +32,8 @@ pub(crate) struct Metrics { pub beefy_best_block: Gauge, /// Next block BEEFY should vote on pub beefy_should_vote_on: Gauge, - /// Number of sessions without a signed commitment - pub beefy_skipped_sessions: Counter, + /// Number of sessions with lagging signed commitment on mandatory block + pub beefy_lagging_sessions: Counter, } impl Metrics { @@ -65,10 +65,10 @@ impl Metrics { Gauge::new("substrate_beefy_should_vote_on", "Next block, BEEFY should vote on")?, registry, )?, - beefy_skipped_sessions: register( + beefy_lagging_sessions: register( Counter::new( - "substrate_beefy_skipped_sessions", - "Number of sessions without a signed commitment", + "substrate_beefy_lagging_sessions", + "Number of sessions with lagging signed commitment on mandatory block", )?, registry, )?, diff --git a/client/beefy/src/notification.rs b/client/beefy/src/notification.rs index 7c18d809f6efb..9479891714234 100644 --- a/client/beefy/src/notification.rs +++ b/client/beefy/src/notification.rs @@ -17,11 +17,9 @@ // along with this program. If not, see . use sc_utils::notification::{NotificationSender, NotificationStream, TracingKeyStr}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_runtime::traits::Block as BlockT; -/// A commitment with matching BEEFY authorities' signatures. -pub type BeefySignedCommitment = - beefy_primitives::SignedCommitment, beefy_primitives::crypto::Signature>; +use crate::justification::BeefySignedCommitment; /// The sending half of the notifications channel(s) used to send /// notifications about best BEEFY block from the gadget side. diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index fecb9557df6ea..ebd85c8dea05d 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -59,7 +59,8 @@ impl RoundTracker { } } -fn threshold(authorities: usize) -> usize { +/// Minimum size of `authorities` subset that produced valid signatures for a block to finalize. +pub fn threshold(authorities: usize) -> usize { let faulty = authorities.saturating_sub(1) / 3; authorities - faulty } @@ -70,9 +71,10 @@ fn threshold(authorities: usize) -> usize { /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). pub(crate) struct Rounds { rounds: BTreeMap<(Payload, NumberFor), RoundTracker>, - best_done: Option>, session_start: NumberFor, validator_set: ValidatorSet, + mandatory_done: bool, + best_done: Option>, } impl Rounds @@ -81,15 +83,15 @@ where B: Block, { pub(crate) fn new(session_start: NumberFor, validator_set: ValidatorSet) -> Self { - Rounds { rounds: BTreeMap::new(), best_done: None, session_start, validator_set } + Rounds { + rounds: BTreeMap::new(), + session_start, + validator_set, + mandatory_done: false, + best_done: None, + } } -} -impl Rounds -where - P: Ord + Hash + Clone, - B: Block, -{ pub(crate) fn validator_set_id(&self) -> ValidatorSetId { self.validator_set.id() } @@ -98,8 +100,12 @@ where self.validator_set.validators() } - pub(crate) fn session_start(&self) -> &NumberFor { - &self.session_start + pub(crate) fn session_start(&self) -> NumberFor { + self.session_start + } + + pub(crate) fn mandatory_done(&self) -> bool { + self.mandatory_done } pub(crate) fn should_self_vote(&self, round: &(P, NumberFor)) -> bool { @@ -113,12 +119,9 @@ where vote: (Public, Signature), self_vote: bool, ) -> bool { - if Some(round.1.clone()) <= self.best_done { - debug!( - target: "beefy", - "🥩 received vote for old stale round {:?}, ignoring", - round.1 - ); + let num = round.1; + if num < self.session_start || Some(num) <= self.best_done { + debug!(target: "beefy", "🥩 received vote for old stale round {:?}, ignoring", num); false } else if !self.validators().iter().any(|id| vote.0 == *id) { debug!( @@ -147,6 +150,7 @@ where // remove this and older (now stale) rounds let signatures = self.rounds.remove(round)?.votes; self.rounds.retain(|&(_, number), _| number > round.1); + self.mandatory_done = self.mandatory_done || round.1 == self.session_start; self.best_done = self.best_done.max(Some(round.1)); debug!(target: "beefy", "🥩 Concluded round #{}", round.1); @@ -160,6 +164,11 @@ where None } } + + #[cfg(test)] + pub(crate) fn test_set_mandatory_done(&mut self, done: bool) { + self.mandatory_done = done; + } } #[cfg(test)] @@ -226,7 +235,7 @@ mod tests { let rounds = Rounds::::new(session_start, validators); assert_eq!(42, rounds.validator_set_id()); - assert_eq!(1, *rounds.session_start()); + assert_eq!(1, rounds.session_start()); assert_eq!( &vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], rounds.validators() @@ -307,6 +316,43 @@ mod tests { )); } + #[test] + fn old_rounds_not_accepted() { + sp_tracing::try_init_simple(); + + let validators = ValidatorSet::::new( + vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], + 42, + ) + .unwrap(); + let alice = (Keyring::Alice.public(), Keyring::Alice.sign(b"I am committed")); + + let session_start = 10u64.into(); + let mut rounds = Rounds::::new(session_start, validators); + + let mut vote = (H256::from_low_u64_le(1), 9); + // add vote for previous session, should fail + assert!(!rounds.add_vote(&vote, alice.clone(), true)); + // no votes present + assert!(rounds.rounds.is_empty()); + + // simulate 11 was concluded + rounds.best_done = Some(11); + // add votes for current session, but already concluded rounds, should fail + vote.1 = 10; + assert!(!rounds.add_vote(&vote, alice.clone(), true)); + vote.1 = 11; + assert!(!rounds.add_vote(&vote, alice.clone(), true)); + // no votes present + assert!(rounds.rounds.is_empty()); + + // add good vote + vote.1 = 12; + assert!(rounds.add_vote(&vote, alice, true)); + // good vote present + assert_eq!(rounds.rounds.len(), 1); + } + #[test] fn multiple_rounds() { sp_tracing::try_init_simple(); diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 78e697a6ada81..9c8f443dd1f7e 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -21,12 +21,15 @@ use futures::{future, stream::FuturesUnordered, Future, StreamExt}; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; -use std::{sync::Arc, task::Poll}; +use std::{collections::HashMap, sync::Arc, task::Poll}; use tokio::{runtime::Runtime, time::Duration}; use sc_chain_spec::{ChainSpec, GenericChainSpec}; use sc_client_api::HeaderBackend; -use sc_consensus::BoxJustificationImport; +use sc_consensus::{ + BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, + ImportedAux, +}; use sc_keystore::LocalKeystore; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, PassThroughVerifier, Peer, PeersClient, @@ -35,7 +38,8 @@ use sc_network_test::{ use sc_utils::notification::NotificationReceiver; use beefy_primitives::{ - crypto::AuthorityId, BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, BEEFY_ENGINE_ID, + crypto::{AuthorityId, Signature}, + BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, VersionedFinalityProof, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, }; use sp_mmr_primitives::{ @@ -47,19 +51,32 @@ use sp_consensus::BlockOrigin; use sp_core::H256; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ - codec::Encode, generic::BlockId, traits::Header as HeaderT, BuildStorage, DigestItem, Storage, + codec::Encode, + generic::BlockId, + traits::{Header as HeaderT, NumberFor}, + BuildStorage, DigestItem, Justifications, Storage, }; use substrate_test_runtime_client::{runtime::Header, ClientExt}; -use crate::{beefy_protocol_name, keystore::tests::Keyring as BeefyKeyring, notification::*}; +use crate::{ + beefy_block_import_and_links, beefy_protocol_name, justification::*, + keystore::tests::Keyring as BeefyKeyring, BeefyRPCLinks, BeefyVoterLinks, +}; pub(crate) const BEEFY_PROTOCOL_NAME: &'static str = "/beefy/1"; const GOOD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0xbf); const BAD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0x42); +type BeefyBlockImport = crate::BeefyBlockImport< + Block, + substrate_test_runtime_client::Backend, + two_validators::TestApi, + BlockImportAdapter>, +>; + pub(crate) type BeefyValidatorSet = ValidatorSet; -pub(crate) type BeefyPeer = Peer; +pub(crate) type BeefyPeer = Peer; #[derive(Debug, Serialize, Deserialize)] struct Genesis(std::collections::BTreeMap); @@ -97,17 +114,10 @@ fn beefy_protocol_name() { assert_eq!(proto_name.to_string(), expected); } -// TODO: compiler warns us about unused `signed_commitment_stream`, will use in later tests -#[allow(dead_code)] -#[derive(Clone)] -pub(crate) struct BeefyLinkHalf { - pub signed_commitment_stream: BeefySignedCommitmentStream, - pub beefy_best_block_stream: BeefyBestBlockStream, -} - #[derive(Default)] pub(crate) struct PeerData { - pub(crate) beefy_link_half: Mutex>, + pub(crate) beefy_rpc_links: Mutex>>, + pub(crate) beefy_voter_links: Mutex>>, } #[derive(Default)] @@ -163,7 +173,7 @@ impl BeefyTestNet { impl TestNetFactory for BeefyTestNet { type Verifier = PassThroughVerifier; - type BlockImport = PeersClient; + type BlockImport = BeefyBlockImport; type PeerData = PeerData; fn make_verifier(&self, _client: PeersClient, _: &PeerData) -> Self::Verifier { @@ -178,7 +188,17 @@ impl TestNetFactory for BeefyTestNet { Option>, Self::PeerData, ) { - (client.as_block_import(), None, PeerData::default()) + let inner = BlockImportAdapter::new(client.clone()); + let (block_import, voter_links, rpc_links) = beefy_block_import_and_links( + inner, + client.as_backend(), + Arc::new(two_validators::TestApi {}), + ); + let peer_data = PeerData { + beefy_rpc_links: Mutex::new(Some(rpc_links)), + beefy_voter_links: Mutex::new(Some(voter_links)), + }; + (BlockImportAdapter::new(block_import), None, peer_data) } fn peer(&mut self, i: usize) -> &mut BeefyPeer { @@ -333,12 +353,12 @@ where let keystore = create_beefy_keystore(*key); - let (signed_commitment_sender, signed_commitment_stream) = - BeefySignedCommitmentStream::::channel(); - let (beefy_best_block_sender, beefy_best_block_stream) = - BeefyBestBlockStream::::channel(); - let beefy_link_half = BeefyLinkHalf { signed_commitment_stream, beefy_best_block_stream }; - *peer.data.beefy_link_half.lock() = Some(beefy_link_half); + let (_, _, peer_data) = net.make_block_import(peer.client().clone()); + let PeerData { beefy_rpc_links, beefy_voter_links } = peer_data; + + let beefy_voter_links = beefy_voter_links.lock().take(); + *peer.data.beefy_rpc_links.lock() = beefy_rpc_links.lock().take(); + *peer.data.beefy_voter_links.lock() = beefy_voter_links.clone(); let beefy_params = crate::BeefyParams { client: peer.client().as_client(), @@ -346,8 +366,7 @@ where runtime: api.clone(), key_store: Some(keystore), network: peer.network_service().clone(), - signed_commitment_sender, - beefy_best_block_sender, + links: beefy_voter_links.unwrap(), min_block_delta, prometheus_registry: None, protocol_name: BEEFY_PROTOCOL_NAME.into(), @@ -382,11 +401,11 @@ pub(crate) fn get_beefy_streams( let mut best_block_streams = Vec::new(); let mut signed_commitment_streams = Vec::new(); for peer_id in 0..peers.len() { - let beefy_link_half = - net.peer(peer_id).data.beefy_link_half.lock().as_ref().unwrap().clone(); - let BeefyLinkHalf { signed_commitment_stream, beefy_best_block_stream } = beefy_link_half; - best_block_streams.push(beefy_best_block_stream.subscribe()); - signed_commitment_streams.push(signed_commitment_stream.subscribe()); + let beefy_rpc_links = net.peer(peer_id).data.beefy_rpc_links.lock().clone().unwrap(); + let BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream } = + beefy_rpc_links; + best_block_streams.push(from_voter_best_beefy_stream.subscribe()); + signed_commitment_streams.push(from_voter_justif_stream.subscribe()); } (best_block_streams, signed_commitment_streams) } @@ -670,3 +689,123 @@ fn correct_beefy_payload() { wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[11]); wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[11]); } + +#[test] +fn beefy_importing_blocks() { + use futures::{executor::block_on, future::poll_fn, task::Poll}; + use sc_block_builder::BlockBuilderProvider; + use sc_client_api::BlockBackend; + + sp_tracing::try_init_simple(); + + let mut net = BeefyTestNet::new(2, 0); + + let client = net.peer(0).client().clone(); + let (mut block_import, _, peer_data) = net.make_block_import(client.clone()); + let PeerData { beefy_rpc_links: _, beefy_voter_links } = peer_data; + let justif_stream = beefy_voter_links.lock().take().unwrap().from_block_import_justif_stream; + + let params = |block: Block, justifications: Option| { + let mut import = BlockImportParams::new(BlockOrigin::File, block.header); + import.justifications = justifications; + import.body = Some(block.extrinsics); + import.finalized = true; + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + import + }; + + let full_client = client.as_client(); + let parent_id = BlockId::Number(0); + let block_id = BlockId::Number(1); + let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); + let block = builder.build().unwrap().block; + + // Import without justifications. + let mut justif_recv = justif_stream.subscribe(); + assert_eq!( + block_on(block_import.import_block(params(block.clone(), None), HashMap::new())).unwrap(), + ImportResult::Imported(ImportedAux { is_new_best: true, ..Default::default() }), + ); + assert_eq!( + block_on(block_import.import_block(params(block, None), HashMap::new())).unwrap(), + ImportResult::AlreadyInChain + ); + // Verify no justifications present: + { + // none in backend, + assert!(full_client.justifications(&block_id).unwrap().is_none()); + // and none sent to BEEFY worker. + block_on(poll_fn(move |cx| { + assert_eq!(justif_recv.poll_next_unpin(cx), Poll::Pending); + Poll::Ready(()) + })); + } + + // Import with valid justification. + let parent_id = BlockId::Number(1); + let block_num = 2; + let keys = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + let proof = crate::justification::tests::new_signed_commitment(block_num, &validator_set, keys); + let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); + let encoded = versioned_proof.encode(); + let justif = Some(Justifications::from((BEEFY_ENGINE_ID, encoded))); + + let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); + let block = builder.build().unwrap().block; + let mut justif_recv = justif_stream.subscribe(); + assert_eq!( + block_on(block_import.import_block(params(block, justif), HashMap::new())).unwrap(), + ImportResult::Imported(ImportedAux { + bad_justification: false, + is_new_best: true, + ..Default::default() + }), + ); + // Verify justification successfully imported: + { + // available in backend, + assert!(full_client.justifications(&BlockId::Number(block_num)).unwrap().is_some()); + // and also sent to BEEFY worker. + block_on(poll_fn(move |cx| { + match justif_recv.poll_next_unpin(cx) { + Poll::Ready(Some(_justification)) => (), + v => panic!("unexpected value: {:?}", v), + } + Poll::Ready(()) + })); + } + + // Import with invalid justification (incorrect validator set). + let parent_id = BlockId::Number(2); + let block_num = 3; + let keys = &[BeefyKeyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); + let proof = crate::justification::tests::new_signed_commitment(block_num, &validator_set, keys); + let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); + let encoded = versioned_proof.encode(); + let justif = Some(Justifications::from((BEEFY_ENGINE_ID, encoded))); + + let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); + let block = builder.build().unwrap().block; + let mut justif_recv = justif_stream.subscribe(); + assert_eq!( + block_on(block_import.import_block(params(block, justif), HashMap::new())).unwrap(), + ImportResult::Imported(ImportedAux { + // Still `false` because we don't want to fail import on bad BEEFY justifications. + bad_justification: false, + is_new_best: true, + ..Default::default() + }), + ); + // Verify bad justifications was not imported: + { + // none in backend, + assert!(full_client.justifications(&block_id).unwrap().is_none()); + // and none sent to BEEFY worker. + block_on(poll_fn(move |cx| { + assert_eq!(justif_recv.poll_next_unpin(cx), Poll::Pending); + Poll::Ready(()) + })); + } +} diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index dccf7ba7504dd..3bff0822ebdb4 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -17,11 +17,10 @@ // along with this program. If not, see . use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, VecDeque}, fmt::Debug, marker::PhantomData, sync::Arc, - time::Duration, }; use codec::{Codec, Decode, Encode}; @@ -48,57 +47,174 @@ use beefy_primitives::{ }; use crate::{ - error, + error::Error, gossip::{topic, GossipValidator}, + justification::BeefySignedCommitment, keystore::BeefyKeystore, metric_inc, metric_set, metrics::Metrics, - notification::{BeefyBestBlockSender, BeefySignedCommitmentSender}, round::Rounds, - Client, + BeefyVoterLinks, Client, }; +enum RoundAction { + Drop, + Process, + Enqueue, +} + +/// Responsible for the voting strategy. +/// It chooses which incoming votes to accept and which votes to generate. +struct VoterOracle { + /// Queue of known sessions. Keeps track of voting rounds (block numbers) within each session. + /// + /// There are three voter states coresponding to three queue states: + /// 1. voter uninitialized: queue empty, + /// 2. up-to-date - all mandatory blocks leading up to current GRANDPA finalized: + /// queue has ONE element, the 'current session' where `mandatory_done == true`, + /// 3. lagging behind GRANDPA: queue has [1, N] elements, where all `mandatory_done == false`. + /// In this state, everytime a session gets its mandatory block BEEFY finalized, it's + /// popped off the queue, eventually getting to state `2. up-to-date`. + sessions: VecDeque>, + /// Min delta in block numbers between two blocks, BEEFY should vote on. + min_block_delta: u32, +} + +impl VoterOracle { + pub fn new(min_block_delta: u32) -> Self { + Self { + sessions: VecDeque::new(), + // Always target at least one block better than current best beefy. + min_block_delta: min_block_delta.max(1), + } + } + + /// Return mutable reference to rounds pertaining to first session in the queue. + /// Voting will always happen at the head of the queue. + pub fn rounds_mut(&mut self) -> Option<&mut Rounds> { + self.sessions.front_mut() + } + + /// Add new observed session to the Oracle. + pub fn add_session(&mut self, rounds: Rounds) { + self.sessions.push_back(rounds); + self.try_prune(); + } + + /// Prune the queue to keep the Oracle in one of the expected three states. + /// + /// Call this function on each BEEFY finality, + /// or at the very least on each BEEFY mandatory block finality. + pub fn try_prune(&mut self) { + if self.sessions.len() > 1 { + // when there's multiple sessions, only keep the `!mandatory_done()` ones. + self.sessions.retain(|s| !s.mandatory_done()) + } + } + + /// Return `(A, B)` tuple representing inclusive [A, B] interval of votes to accept. + pub fn accepted_interval( + &self, + best_grandpa: NumberFor, + ) -> Result<(NumberFor, NumberFor), Error> { + let rounds = self.sessions.front().ok_or(Error::UninitSession)?; + + if rounds.mandatory_done() { + // There's only one session active and its mandatory is done. + // Accept any GRANDPA finalized vote. + Ok((rounds.session_start(), best_grandpa.into())) + } else { + // There's at least one session with mandatory not done. + // Only accept votes for the mandatory block in the front of queue. + Ok((rounds.session_start(), rounds.session_start())) + } + } + + /// Utility function to quickly decide what to do for each round. + pub fn triage_round( + &self, + round: NumberFor, + best_grandpa: NumberFor, + ) -> Result { + let (start, end) = self.accepted_interval(best_grandpa)?; + if start <= round && round <= end { + Ok(RoundAction::Process) + } else if round > end { + Ok(RoundAction::Enqueue) + } else { + Ok(RoundAction::Drop) + } + } + + /// Return `Some(number)` if we should be voting on block `number`, + /// return `None` if there is no block we should vote on. + pub fn voting_target( + &self, + best_beefy: Option>, + best_grandpa: NumberFor, + ) -> Option> { + let rounds = if let Some(r) = self.sessions.front() { + r + } else { + debug!(target: "beefy", "🥩 No voting round started"); + return None + }; + + // `target` is guaranteed > `best_beefy` since `min_block_delta` is at least `1`. + let target = + vote_target(best_grandpa, best_beefy, rounds.session_start(), self.min_block_delta); + trace!( + target: "beefy", + "🥩 best beefy: #{:?}, best finalized: #{:?}, current_vote_target: {:?}", + best_beefy, + best_grandpa, + target + ); + target + } +} + pub(crate) struct WorkerParams { pub client: Arc, pub backend: Arc, pub runtime: Arc, + pub sync_oracle: SO, pub key_store: BeefyKeystore, - pub signed_commitment_sender: BeefySignedCommitmentSender, - pub beefy_best_block_sender: BeefyBestBlockSender, pub gossip_engine: GossipEngine, pub gossip_validator: Arc>, - pub min_block_delta: u32, + pub links: BeefyVoterLinks, pub metrics: Option, - pub sync_oracle: SO, + pub min_block_delta: u32, } /// A BEEFY worker plays the BEEFY protocol pub(crate) struct BeefyWorker { + // utilities client: Arc, backend: Arc, runtime: Arc, + sync_oracle: SO, key_store: BeefyKeystore, - signed_commitment_sender: BeefySignedCommitmentSender, gossip_engine: GossipEngine, gossip_validator: Arc>, - /// Min delta in block numbers between two blocks, BEEFY should vote on - min_block_delta: u32, + + // channels + /// Links between the block importer, the background voter and the RPC layer. + links: BeefyVoterLinks, + + // voter state + /// BEEFY client metrics. metrics: Option, - rounds: Option>, - /// Buffer holding votes for blocks that the client hasn't seen finality for. - pending_votes: BTreeMap, Vec, AuthorityId, Signature>>>, - /// Best block we received a GRANDPA notification for + /// Best block we received a GRANDPA finality for. best_grandpa_block_header: ::Header, - /// Best block a BEEFY voting round has been concluded for + /// Best block a BEEFY voting round has been concluded for. best_beefy_block: Option>, - /// Used to keep RPC worker up to date on latest/best beefy - beefy_best_block_sender: BeefyBestBlockSender, - /// Validator set id for the last signed commitment - last_signed_id: u64, - /// Handle to the sync oracle - sync_oracle: SO, - // keep rustc happy - _backend: PhantomData, + /// Buffer holding votes for future processing. + pending_votes: BTreeMap, Vec, AuthorityId, Signature>>>, + /// Buffer holding justifications for future processing. + pending_justifications: BTreeMap, Vec>>, + /// Chooses which incoming votes to accept and which votes to generate. + voting_oracle: VoterOracle, } impl BeefyWorker @@ -122,13 +238,12 @@ where backend, runtime, key_store, - signed_commitment_sender, - beefy_best_block_sender, + sync_oracle, gossip_engine, gossip_validator, - min_block_delta, + links, metrics, - sync_oracle, + min_block_delta, } = worker_params; let last_finalized_header = client @@ -139,53 +254,29 @@ where client: client.clone(), backend, runtime, + sync_oracle, key_store, - signed_commitment_sender, gossip_engine, gossip_validator, - // always target at least one block better than current best beefy - min_block_delta: min_block_delta.max(1), + links, metrics, - rounds: None, - pending_votes: BTreeMap::new(), best_grandpa_block_header: last_finalized_header, best_beefy_block: None, - last_signed_id: 0, - beefy_best_block_sender, - sync_oracle, - _backend: PhantomData, + pending_votes: BTreeMap::new(), + pending_justifications: BTreeMap::new(), + voting_oracle: VoterOracle::new(min_block_delta), } } - /// Return `Some(number)` if we should be voting on block `number` now, - /// return `None` if there is no block we should vote on now. - fn current_vote_target(&self) -> Option> { - let rounds = if let Some(r) = &self.rounds { - r - } else { - debug!(target: "beefy", "🥩 No voting round started"); - return None - }; - - let best_finalized = *self.best_grandpa_block_header.number(); - // `target` is guaranteed > `best_beefy` since `min_block_delta` is at least `1`. - let target = vote_target( - best_finalized, - self.best_beefy_block, - *rounds.session_start(), - self.min_block_delta, - ); - trace!( - target: "beefy", - "🥩 best beefy: #{:?}, best finalized: #{:?}, current_vote_target: {:?}", - self.best_beefy_block, - best_finalized, - target - ); - if let Some(target) = &target { - metric_set!(self, beefy_should_vote_on, target); - } - target + /// Simple wrapper that gets MMR root from header digests or from client state. + fn get_mmr_root_digest(&self, header: &B::Header) -> Option { + find_mmr_root_digest::(header).or_else(|| { + self.runtime + .runtime_api() + .mmr_root(&BlockId::hash(header.hash())) + .ok() + .and_then(|r| r.ok()) + }) } /// Verify `active` validator set for `block` against the key store @@ -200,7 +291,7 @@ where &self, block: &NumberFor, active: &ValidatorSet, - ) -> Result<(), error::Error> { + ) -> Result<(), Error> { let active: BTreeSet<&AuthorityId> = active.validators().iter().collect(); let public_keys = self.key_store.public_keys()?; @@ -209,121 +300,100 @@ where if store.intersection(&active).count() == 0 { let msg = "no authority public key found in store".to_string(); debug!(target: "beefy", "🥩 for block {:?} {}", block, msg); - Err(error::Error::Keystore(msg)) + Err(Error::Keystore(msg)) } else { Ok(()) } } - /// Set best BEEFY block to `block_num`. - /// - /// Also sends/updates the best BEEFY block hash to the RPC worker. - fn set_best_beefy_block(&mut self, block_num: NumberFor) { - if Some(block_num) > self.best_beefy_block { - // Try to get block hash ourselves. - let block_hash = match self.client.hash(block_num) { - Ok(h) => h, - Err(e) => { - error!(target: "beefy", "🥩 Failed to get hash for block number {}: {}", - block_num, e); - None - }, - }; - // Update RPC worker with new best BEEFY block hash. - block_hash.map(|hash| { - self.beefy_best_block_sender - .notify(|| Ok::<_, ()>(hash)) - .expect("forwards closure result; the closure always returns Ok; qed.") - }); - // Set new best BEEFY block number. - self.best_beefy_block = Some(block_num); - metric_set!(self, beefy_best_block, block_num); - } else { - debug!(target: "beefy", "🥩 Can't set best beefy to older: {}", block_num); - } - } - /// Handle session changes by starting new voting round for mandatory blocks. fn init_session_at( &mut self, - active: ValidatorSet, + validator_set: ValidatorSet, new_session_start: NumberFor, ) { - debug!(target: "beefy", "🥩 New active validator set: {:?}", active); - metric_set!(self, beefy_validator_set_id, active.id()); - // BEEFY should produce a signed commitment for each session - if active.id() != self.last_signed_id + 1 && - active.id() != GENESIS_AUTHORITY_SET_ID && - self.last_signed_id != 0 - { - debug!( - target: "beefy", "🥩 Detected skipped session: active-id {:?}, last-signed-id {:?}", - active.id(), - self.last_signed_id, - ); - metric_inc!(self, beefy_skipped_sessions); + debug!(target: "beefy", "🥩 New active validator set: {:?}", validator_set); + metric_set!(self, beefy_validator_set_id, validator_set.id()); + + // BEEFY should produce the mandatory block of each session. + if let Some(active_session) = self.voting_oracle.rounds_mut() { + if !active_session.mandatory_done() { + debug!( + target: "beefy", "🥩 New session {} while active session {} is still lagging.", + validator_set.id(), + active_session.validator_set_id(), + ); + metric_inc!(self, beefy_lagging_sessions); + } } if log_enabled!(target: "beefy", log::Level::Debug) { // verify the new validator set - only do it if we're also logging the warning - let _ = self.verify_validator_set(&new_session_start, &active); + let _ = self.verify_validator_set(&new_session_start, &validator_set); } - let id = active.id(); - self.rounds = Some(Rounds::new(new_session_start, active)); + let id = validator_set.id(); + self.voting_oracle.add_session(Rounds::new(new_session_start, validator_set)); info!(target: "beefy", "🥩 New Rounds for validator set id: {:?} with session_start {:?}", id, new_session_start); } fn handle_finality_notification(&mut self, notification: &FinalityNotification) { debug!(target: "beefy", "🥩 Finality notification: {:?}", notification); - let number = *notification.header.number(); - - // On start-up ignore old finality notifications that we're not interested in. - if number <= *self.best_grandpa_block_header.number() { - debug!(target: "beefy", "🥩 Got unexpected finality for old block #{:?}", number); - return - } + let header = ¬ification.header; - // update best GRANDPA finalized block we have seen - self.best_grandpa_block_header = notification.header.clone(); + if *header.number() > *self.best_grandpa_block_header.number() { + // update best GRANDPA finalized block we have seen + self.best_grandpa_block_header = header.clone(); - self.handle_finality(¬ification.header); - } - - fn handle_finality(&mut self, header: &B::Header) { - // Check for and handle potential new session. - if let Some(new_validator_set) = find_authorities_change::(header) { - self.init_session_at(new_validator_set, *header.number()); + // Check for and enqueue potential new session. + if let Some(new_validator_set) = find_authorities_change::(header) { + self.init_session_at(new_validator_set, *header.number()); + // TODO: when adding SYNC protocol, fire up a request for justification for this + // mandatory block here. + } } + } - // Handle any pending votes for now finalized blocks. - self.check_pending_votes(); - - // Vote if there's now a new vote target. - if let Some(target_number) = self.current_vote_target() { - self.do_vote(target_number); - } + /// Based on [VoterOracle] this vote is either processed here or enqueued for later. + fn triage_incoming_vote( + &mut self, + vote: VoteMessage, AuthorityId, Signature>, + ) -> Result<(), Error> { + let block_num = vote.commitment.block_number; + let best_grandpa = *self.best_grandpa_block_header.number(); + match self.voting_oracle.triage_round(block_num, best_grandpa)? { + RoundAction::Process => self.handle_vote( + (vote.commitment.payload, vote.commitment.block_number), + (vote.id, vote.signature), + false, + )?, + RoundAction::Enqueue => { + debug!(target: "beefy", "🥩 Buffer vote for round: {:?}.", block_num); + self.pending_votes.entry(block_num).or_default().push(vote) + }, + RoundAction::Drop => (), + }; + Ok(()) } - // Handles all buffered votes for now finalized blocks. - fn check_pending_votes(&mut self) { - let not_finalized = self.best_grandpa_block_header.number().saturating_add(1u32.into()); - let still_pending = self.pending_votes.split_off(¬_finalized); - let votes_to_handle = std::mem::replace(&mut self.pending_votes, still_pending); - for (num, votes) in votes_to_handle.into_iter() { - if Some(num) > self.best_beefy_block { - debug!(target: "beefy", "🥩 Handling buffered votes for now GRANDPA finalized block: {:?}.", num); - for v in votes.into_iter() { - self.handle_vote( - (v.commitment.payload, v.commitment.block_number), - (v.id, v.signature), - false, - ); - } - } else { - debug!(target: "beefy", "🥩 Dropping outdated buffered votes for now BEEFY finalized block: {:?}.", num); - } - } + /// Based on [VoterOracle] this justification is either processed here or enqueued for later. + /// + /// Expects `justification` to be valid. + fn triage_incoming_justif( + &mut self, + justification: BeefySignedCommitment, + ) -> Result<(), Error> { + let block_num = justification.commitment.block_number; + let best_grandpa = *self.best_grandpa_block_header.number(); + match self.voting_oracle.triage_round(block_num, best_grandpa)? { + RoundAction::Process => self.finalize(justification), + RoundAction::Enqueue => { + debug!(target: "beefy", "🥩 Buffer justification for round: {:?}.", block_num); + self.pending_justifications.entry(block_num).or_default().push(justification) + }, + RoundAction::Drop => (), + }; + Ok(()) } fn handle_vote( @@ -331,28 +401,20 @@ where round: (Payload, NumberFor), vote: (AuthorityId, Signature), self_vote: bool, - ) { + ) -> Result<(), Error> { self.gossip_validator.note_round(round.1); - let rounds = if let Some(rounds) = self.rounds.as_mut() { - rounds - } else { - debug!(target: "beefy", "🥩 Missing validator set - can't handle vote {:?}", vote); - return - }; + let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; if rounds.add_vote(&round, vote, self_vote) { if let Some(signatures) = rounds.try_conclude(&round) { self.gossip_validator.conclude_round(round.1); - // id is stored for skipped session metric calculation - self.last_signed_id = rounds.validator_set_id(); - let block_num = round.1; let commitment = Commitment { payload: round.0, block_number: block_num, - validator_set_id: self.last_signed_id, + validator_set_id: rounds.validator_set_id(), }; let signed_commitment = SignedCommitment { commitment, signatures }; @@ -370,24 +432,115 @@ where ) { debug!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, signed_commitment); } - self.signed_commitment_sender - .notify(|| Ok::<_, ()>(signed_commitment)) - .expect("forwards closure result; the closure always returns Ok; qed."); - self.set_best_beefy_block(block_num); + // We created the `signed_commitment` and know to be valid. + self.finalize(signed_commitment); + } + } + Ok(()) + } + + /// Provide BEEFY finality for block based on `signed_commitment`: + /// 1. Prune irrelevant past sessions from the oracle, + /// 2. Set BEEFY best block, + /// 3. Send best block hash and `signed_commitment` to RPC worker. + /// + /// Expects `signed commitment` to be valid. + fn finalize(&mut self, signed_commitment: BeefySignedCommitment) { + // Prune any now "finalized" sessions from queue. + self.voting_oracle.try_prune(); - // Vote if there's now a new vote target. - if let Some(target_number) = self.current_vote_target() { - self.do_vote(target_number); + let block_num = signed_commitment.commitment.block_number; + if Some(block_num) > self.best_beefy_block { + // Set new best BEEFY block number. + self.best_beefy_block = Some(block_num); + metric_set!(self, beefy_best_block, block_num); + + self.client.hash(block_num).ok().flatten().map(|hash| { + self.links + .to_rpc_best_block_sender + .notify(|| Ok::<_, ()>(hash)) + .expect("forwards closure result; the closure always returns Ok; qed.") + }); + + self.links + .to_rpc_justif_sender + .notify(|| Ok::<_, ()>(signed_commitment)) + .expect("forwards closure result; the closure always returns Ok; qed."); + } else { + debug!(target: "beefy", "🥩 Can't set best beefy to older: {}", block_num); + } + } + + /// Handle previously buffered justifications and votes that now land in the voting interval. + fn try_pending_justif_and_votes(&mut self) -> Result<(), Error> { + let best_grandpa = *self.best_grandpa_block_header.number(); + let _ph = PhantomData::::default(); + + fn to_process_for( + pending: &mut BTreeMap, Vec>, + (start, end): (NumberFor, NumberFor), + _: PhantomData, + ) -> BTreeMap, Vec> { + // These are still pending. + let still_pending = pending.split_off(&end.saturating_add(1u32.into())); + // These can be processed. + let to_handle = pending.split_off(&start); + // The rest can be dropped. + *pending = still_pending; + // Return ones to process. + to_handle + } + + // Process pending justifications. + let interval = self.voting_oracle.accepted_interval(best_grandpa)?; + if !self.pending_justifications.is_empty() { + let justifs_to_handle = to_process_for(&mut self.pending_justifications, interval, _ph); + for (num, justifications) in justifs_to_handle.into_iter() { + debug!(target: "beefy", "🥩 Handle buffered justifications for: {:?}.", num); + for justif in justifications.into_iter() { + self.finalize(justif); } } } + + // Process pending votes. + let interval = self.voting_oracle.accepted_interval(best_grandpa)?; + if !self.pending_votes.is_empty() { + let votes_to_handle = to_process_for(&mut self.pending_votes, interval, _ph); + for (num, votes) in votes_to_handle.into_iter() { + debug!(target: "beefy", "🥩 Handle buffered votes for: {:?}.", num); + for v in votes.into_iter() { + if let Err(err) = self.handle_vote( + (v.commitment.payload, v.commitment.block_number), + (v.id, v.signature), + false, + ) { + error!(target: "beefy", "🥩 Error handling buffered vote: {}", err); + }; + } + } + } + Ok(()) + } + + /// Decide if should vote, then vote.. or don't.. + fn try_to_vote(&mut self) -> Result<(), Error> { + // Vote if there's now a new vote target. + if let Some(target) = self + .voting_oracle + .voting_target(self.best_beefy_block, *self.best_grandpa_block_header.number()) + { + metric_set!(self, beefy_should_vote_on, target); + self.do_vote(target)?; + } + Ok(()) } /// Create and gossip Signed Commitment for block number `target_number`. /// /// Also handle this self vote by calling `self.handle_vote()` for it. - fn do_vote(&mut self, target_number: NumberFor) { + fn do_vote(&mut self, target_number: NumberFor) -> Result<(), Error> { debug!(target: "beefy", "🥩 Try voting on {}", target_number); // Most of the time we get here, `target` is actually `best_grandpa`, @@ -395,18 +548,13 @@ where let target_header = if target_number == *self.best_grandpa_block_header.number() { self.best_grandpa_block_header.clone() } else { - match self.client.expect_header(BlockId::Number(target_number)) { - Ok(h) => h, - Err(err) => { - debug!( - target: "beefy", - "🥩 Could not get header for block #{:?} (error: {:?}), skipping vote..", - target_number, - err - ); - return - }, - } + self.client.expect_header(BlockId::Number(target_number)).map_err(|err| { + let err_msg = format!( + "Couldn't get header for block #{:?} (error: {:?}), skipping vote..", + target_number, err + ); + Error::Backend(err_msg) + })? }; let target_hash = target_header.hash(); @@ -414,26 +562,23 @@ where hash } else { warn!(target: "beefy", "🥩 No MMR root digest found for: {:?}", target_hash); - return + return Ok(()) }; let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, mmr_root.encode()); - let (validators, validator_set_id) = if let Some(rounds) = &self.rounds { - if !rounds.should_self_vote(&(payload.clone(), target_number)) { - debug!(target: "beefy", "🥩 Don't double vote for block number: {:?}", target_number); - return - } - (rounds.validators(), rounds.validator_set_id()) - } else { - debug!(target: "beefy", "🥩 Missing validator set - can't vote for: {:?}", target_hash); - return - }; + let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; + if !rounds.should_self_vote(&(payload.clone(), target_number)) { + debug!(target: "beefy", "🥩 Don't double vote for block number: {:?}", target_number); + return Ok(()) + } + let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id()); + let authority_id = if let Some(id) = self.key_store.authority_id(validators) { debug!(target: "beefy", "🥩 Local authority id: {:?}", id); id } else { debug!(target: "beefy", "🥩 Missing validator id - can't vote for: {:?}", target_hash); - return + return Ok(()) }; let commitment = Commitment { payload, block_number: target_number, validator_set_id }; @@ -443,7 +588,7 @@ where Ok(sig) => sig, Err(err) => { warn!(target: "beefy", "🥩 Error signing commitment: {:?}", err); - return + return Ok(()) }, }; @@ -462,13 +607,17 @@ where debug!(target: "beefy", "🥩 Sent vote message: {:?}", message); - self.handle_vote( + if let Err(err) = self.handle_vote( (message.commitment.payload, message.commitment.block_number), (message.id, message.signature), true, - ); + ) { + error!(target: "beefy", "🥩 Error handling self vote: {}", err); + } self.gossip_engine.gossip_message(topic::(), encoded_message, false); + + Ok(()) } /// Wait for BEEFY runtime pallet to be available. @@ -494,6 +643,9 @@ where // Once we'll implement 'initial sync' (catch-up), the worker will be able to // start voting right away. self.handle_finality_notification(¬if); + if let Err(err) = self.try_to_vote() { + debug!(target: "beefy", "🥩 {}", err); + } break } else { trace!(target: "beefy", "🥩 Finality notification: {:?}", notif); @@ -529,15 +681,14 @@ where }) .fuse(), ); + let mut block_import_justif = self.links.from_block_import_justif_stream.subscribe().fuse(); loop { - while self.sync_oracle.is_major_syncing() { - debug!(target: "beefy", "Waiting for major sync to complete..."); - futures_timer::Delay::new(Duration::from_secs(5)).await; - } - let mut gossip_engine = &mut self.gossip_engine; - futures::select! { + // Wait for, and handle external events. + // The branches below only change 'state', actual voting happen afterwards, + // based on the new resulting 'state'. + futures::select_biased! { notification = finality_notifications.next() => { if let Some(notification) = notification { self.handle_finality_notification(¬ification); @@ -545,24 +696,24 @@ where return; } }, + // TODO: when adding SYNC protocol, join the on-demand justifications stream to + // this one, and handle them both here. + justif = block_import_justif.next() => { + if let Some(justif) = justif { + // Block import justifications have already been verified to be valid + // by `BeefyBlockImport`. + if let Err(err) = self.triage_incoming_justif(justif) { + debug!(target: "beefy", "🥩 {}", err); + } + } else { + return; + } + }, vote = votes.next() => { if let Some(vote) = vote { - let block_num = vote.commitment.block_number; - if block_num > *self.best_grandpa_block_header.number() { - // Only handle votes for blocks we _know_ have been finalized. - // Buffer vote to be handled later. - debug!( - target: "beefy", - "🥩 Buffering vote for not (yet) finalized block: {:?}.", - block_num - ); - self.pending_votes.entry(block_num).or_default().push(vote); - } else { - self.handle_vote( - (vote.commitment.payload, vote.commitment.block_number), - (vote.id, vote.signature), - false - ); + // Votes have already been verified to be valid by the gossip validator. + if let Err(err) = self.triage_incoming_vote(vote) { + debug!(target: "beefy", "🥩 {}", err); } } else { return; @@ -573,18 +724,20 @@ where return; } } - } - } - /// Simple wrapper that gets MMR root from header digests or from client state. - fn get_mmr_root_digest(&self, header: &B::Header) -> Option { - find_mmr_root_digest::(header).or_else(|| { - self.runtime - .runtime_api() - .mmr_root(&BlockId::hash(header.hash())) - .ok() - .and_then(|r| r.ok()) - }) + // Don't bother acting on 'state' changes during major sync. + if !self.sync_oracle.is_major_syncing() { + // Handle pending justifications and/or votes for now GRANDPA finalized blocks. + if let Err(err) = self.try_pending_justif_and_votes() { + debug!(target: "beefy", "🥩 {}", err); + } + + // There were external events, 'state' is changed, author a vote if needed/possible. + if let Err(err) = self.try_to_vote() { + debug!(target: "beefy", "🥩 {}", err); + } + } + } } } @@ -684,11 +837,11 @@ pub(crate) mod tests { create_beefy_keystore, get_beefy_streams, make_beefy_ids, two_validators::TestApi, BeefyPeer, BeefyTestNet, BEEFY_PROTOCOL_NAME, }, + BeefyRPCLinks, }; use futures::{executor::block_on, future::poll_fn, task::Poll}; - use crate::tests::BeefyLinkHalf; use sc_client_api::HeaderBackend; use sc_network::NetworkService; use sc_network_test::{PeersFullClient, TestNetFactory}; @@ -705,12 +858,21 @@ pub(crate) mod tests { ) -> BeefyWorker>> { let keystore = create_beefy_keystore(*key); - let (signed_commitment_sender, signed_commitment_stream) = + let (to_rpc_justif_sender, from_voter_justif_stream) = BeefySignedCommitmentStream::::channel(); - let (beefy_best_block_sender, beefy_best_block_stream) = + let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = BeefyBestBlockStream::::channel(); - let beefy_link_half = BeefyLinkHalf { signed_commitment_stream, beefy_best_block_stream }; - *peer.data.beefy_link_half.lock() = Some(beefy_link_half); + let (_, from_block_import_justif_stream) = BeefySignedCommitmentStream::::channel(); + + let beefy_rpc_links = + BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream }; + *peer.data.beefy_rpc_links.lock() = Some(beefy_rpc_links); + + let links = BeefyVoterLinks { + from_block_import_justif_stream, + to_rpc_justif_sender, + to_rpc_best_block_sender, + }; let api = Arc::new(TestApi {}); let network = peer.network_service().clone(); @@ -723,8 +885,7 @@ pub(crate) mod tests { backend: peer.client().as_backend(), runtime: api, key_store: Some(keystore).into(), - signed_commitment_sender, - beefy_best_block_sender, + links, gossip_engine, gossip_validator, min_block_delta, @@ -826,6 +987,107 @@ pub(crate) mod tests { assert_eq!(Some(1072), t); } + #[test] + fn should_vote_target() { + let mut oracle = VoterOracle::::new(1); + + // rounds not initialized -> should vote: `None` + assert_eq!(oracle.voting_target(None, 1), None); + + let keys = &[Keyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + + oracle.add_session(Rounds::new(1, validator_set.clone())); + + // under min delta + oracle.min_block_delta = 4; + assert_eq!(oracle.voting_target(Some(1), 1), None); + assert_eq!(oracle.voting_target(Some(2), 5), None); + + // vote on min delta + assert_eq!(oracle.voting_target(Some(4), 9), Some(8)); + oracle.min_block_delta = 8; + assert_eq!(oracle.voting_target(Some(10), 18), Some(18)); + + // vote on power of two + oracle.min_block_delta = 1; + assert_eq!(oracle.voting_target(Some(1000), 1008), Some(1004)); + assert_eq!(oracle.voting_target(Some(1000), 1016), Some(1008)); + + // nothing new to vote on + assert_eq!(oracle.voting_target(Some(1000), 1000), None); + + // vote on mandatory + oracle.sessions.clear(); + oracle.add_session(Rounds::new(1000, validator_set.clone())); + assert_eq!(oracle.voting_target(None, 1008), Some(1000)); + oracle.sessions.clear(); + oracle.add_session(Rounds::new(1001, validator_set.clone())); + assert_eq!(oracle.voting_target(Some(1000), 1008), Some(1001)); + } + + #[test] + fn test_oracle_accepted_interval() { + let keys = &[Keyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + + let mut oracle = VoterOracle::::new(1); + + // rounds not initialized -> should accept votes: `None` + assert!(oracle.accepted_interval(1).is_err()); + + let session_one = 1; + oracle.add_session(Rounds::new(session_one, validator_set.clone())); + // mandatory not done, only accept mandatory + for i in 0..15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_one, session_one))); + } + + // add more sessions, nothing changes + let session_two = 11; + let session_three = 21; + oracle.add_session(Rounds::new(session_two, validator_set.clone())); + oracle.add_session(Rounds::new(session_three, validator_set.clone())); + // mandatory not done, should accept mandatory for session_one + for i in session_three..session_three + 15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_one, session_one))); + } + + // simulate finish mandatory for session one, prune oracle + oracle.sessions.front_mut().unwrap().test_set_mandatory_done(true); + oracle.try_prune(); + // session_one pruned, should accept mandatory for session_two + for i in session_three..session_three + 15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_two, session_two))); + } + + // simulate finish mandatory for session two, prune oracle + oracle.sessions.front_mut().unwrap().test_set_mandatory_done(true); + oracle.try_prune(); + // session_two pruned, should accept mandatory for session_three + for i in session_three..session_three + 15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_three, session_three))); + } + + // simulate finish mandatory for session three + oracle.sessions.front_mut().unwrap().test_set_mandatory_done(true); + // verify all other blocks in this session are now open to voting + for i in session_three..session_three + 15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_three, i))); + } + // pruning does nothing in this case + oracle.try_prune(); + for i in session_three..session_three + 15 { + assert_eq!(oracle.accepted_interval(i), Ok((session_three, i))); + } + + // adding new session automatically prunes "finalized" previous session + let session_four = 31; + oracle.add_session(Rounds::new(session_four, validator_set.clone())); + assert_eq!(oracle.sessions.front().unwrap().session_start(), session_four); + assert_eq!(oracle.accepted_interval(session_four + 10), Ok((session_four, session_four))); + } + #[test] fn extract_authorities_change_digest() { let mut header = Header::new( @@ -876,69 +1138,6 @@ pub(crate) mod tests { assert_eq!(extracted, Some(mmr_root_hash)); } - #[test] - fn should_vote_target() { - let keys = &[Keyring::Alice]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - - // rounds not initialized -> should vote: `None` - assert_eq!(worker.current_vote_target(), None); - - let set_up = |worker: &mut BeefyWorker< - Block, - Backend, - PeersFullClient, - TestApi, - Arc>, - >, - best_grandpa: u64, - best_beefy: Option, - session_start: u64, - min_delta: u32| { - let grandpa_header = Header::new( - best_grandpa, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - worker.best_grandpa_block_header = grandpa_header; - worker.best_beefy_block = best_beefy; - worker.min_block_delta = min_delta; - worker.rounds = Some(Rounds::new(session_start, validator_set.clone())); - }; - - // under min delta - set_up(&mut worker, 1, Some(1), 1, 4); - assert_eq!(worker.current_vote_target(), None); - set_up(&mut worker, 5, Some(2), 1, 4); - assert_eq!(worker.current_vote_target(), None); - - // vote on min delta - set_up(&mut worker, 9, Some(4), 1, 4); - assert_eq!(worker.current_vote_target(), Some(8)); - set_up(&mut worker, 18, Some(10), 1, 8); - assert_eq!(worker.current_vote_target(), Some(18)); - - // vote on power of two - set_up(&mut worker, 1008, Some(1000), 1, 1); - assert_eq!(worker.current_vote_target(), Some(1004)); - set_up(&mut worker, 1016, Some(1000), 1, 2); - assert_eq!(worker.current_vote_target(), Some(1008)); - - // nothing new to vote on - set_up(&mut worker, 1000, Some(1000), 1, 1); - assert_eq!(worker.current_vote_target(), None); - - // vote on mandatory - set_up(&mut worker, 1008, None, 1000, 8); - assert_eq!(worker.current_vote_target(), Some(1000)); - set_up(&mut worker, 1008, Some(1000), 1001, 8); - assert_eq!(worker.current_vote_target(), Some(1001)); - } - #[test] fn keystore_vs_validator_set() { let keys = &[Keyring::Alice]; @@ -953,39 +1152,57 @@ pub(crate) mod tests { let keys = &[Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let err_msg = "no authority public key found in store".to_string(); - let expected = Err(error::Error::Keystore(err_msg)); + let expected = Err(Error::Keystore(err_msg)); assert_eq!(worker.verify_validator_set(&1, &validator_set), expected); // worker has no keystore worker.key_store = None.into(); - let expected_err = Err(error::Error::Keystore("no Keystore".into())); + let expected_err = Err(Error::Keystore("no Keystore".into())); assert_eq!(worker.verify_validator_set(&1, &validator_set), expected_err); } #[test] - fn setting_best_beefy_block() { + fn test_finalize() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1, 0); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - let (mut best_block_streams, _) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut signed_commitments) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); + let mut signed_commitments = signed_commitments.drain(..).next().unwrap(); - // no 'best beefy block' + let create_signed_commitment = |block_num: NumberFor| { + let commitment = Commitment { + payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + block_number: block_num, + validator_set_id: validator_set.id(), + }; + SignedCommitment { commitment, signatures: vec![None] } + }; + + // no 'best beefy block' or signed commitments assert_eq!(worker.best_beefy_block, None); block_on(poll_fn(move |cx| { assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); + assert_eq!(signed_commitments.poll_next_unpin(cx), Poll::Pending); Poll::Ready(()) })); // unknown hash for block #1 - let (mut best_block_streams, _) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut signed_commitments) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - worker.set_best_beefy_block(1); + let mut signed_commitments = signed_commitments.drain(..).next().unwrap(); + let justif = create_signed_commitment(1); + worker.finalize(justif.clone()); assert_eq!(worker.best_beefy_block, Some(1)); block_on(poll_fn(move |cx| { assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); + match signed_commitments.poll_next_unpin(cx) { + // expect justification + Poll::Ready(Some(received)) => assert_eq!(received, justif), + v => panic!("unexpected value: {:?}", v), + } Poll::Ready(()) })); @@ -994,7 +1211,8 @@ pub(crate) mod tests { let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); net.generate_blocks(2, 10, &validator_set, false); - worker.set_best_beefy_block(2); + let justif = create_signed_commitment(2); + worker.finalize(justif); assert_eq!(worker.best_beefy_block, Some(2)); block_on(poll_fn(move |cx| { match best_block_stream.poll_next_unpin(cx) { @@ -1010,20 +1228,17 @@ pub(crate) mod tests { } #[test] - fn setting_initial_session() { + fn should_init_session() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1, 0); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - assert!(worker.rounds.is_none()); + assert!(worker.voting_oracle.sessions.is_empty()); - // verify setting the correct validator sets and boundary for genesis session worker.init_session_at(validator_set.clone(), 1); - - let worker_rounds = worker.rounds.as_ref().unwrap(); - assert_eq!(worker_rounds.session_start(), &1); - // in genesis case both current and prev validator sets are the same + let worker_rounds = worker.voting_oracle.rounds_mut().unwrap(); + assert_eq!(worker_rounds.session_start(), 1); assert_eq!(worker_rounds.validators(), validator_set.validators()); assert_eq!(worker_rounds.validator_set_id(), validator_set.id()); @@ -1031,12 +1246,79 @@ pub(crate) mod tests { let keys = &[Keyring::Bob]; let new_validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - // verify setting the correct validator sets and boundary for non-genesis session worker.init_session_at(new_validator_set.clone(), 11); + // Since mandatory is not done for old rounds, we still get those. + let rounds = worker.voting_oracle.rounds_mut().unwrap(); + assert_eq!(rounds.validator_set_id(), validator_set.id()); + // Let's finalize mandatory. + rounds.test_set_mandatory_done(true); + worker.voting_oracle.try_prune(); + // Now we should get the next round. + let rounds = worker.voting_oracle.rounds_mut().unwrap(); + // Expect new values. + assert_eq!(rounds.session_start(), 11); + assert_eq!(rounds.validators(), new_validator_set.validators()); + assert_eq!(rounds.validator_set_id(), new_validator_set.id()); + } + + #[test] + fn should_triage_votes_and_process_later() { + let keys = &[Keyring::Alice, Keyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + let mut net = BeefyTestNet::new(1, 0); + let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); + + fn new_vote( + block_number: NumberFor, + ) -> VoteMessage, AuthorityId, Signature> { + let commitment = Commitment { + payload: Payload::new(*b"BF", vec![]), + block_number, + validator_set_id: 0, + }; + VoteMessage { + commitment, + id: Keyring::Alice.public(), + signature: Keyring::Alice.sign(b"I am committed"), + } + } + + // best grandpa is 20 + let best_grandpa_header = Header::new( + 20u32.into(), + Default::default(), + Default::default(), + Default::default(), + Digest::default(), + ); - let worker_rounds = worker.rounds.as_ref().unwrap(); - assert_eq!(worker_rounds.session_start(), &11); - assert_eq!(worker_rounds.validators(), new_validator_set.validators()); - assert_eq!(worker_rounds.validator_set_id(), new_validator_set.id()); + worker.voting_oracle.add_session(Rounds::new(10, validator_set.clone())); + worker.best_grandpa_block_header = best_grandpa_header; + + // triage votes for blocks 10..13 + worker.triage_incoming_vote(new_vote(10)).unwrap(); + worker.triage_incoming_vote(new_vote(11)).unwrap(); + worker.triage_incoming_vote(new_vote(12)).unwrap(); + // triage votes for blocks 20..23 + worker.triage_incoming_vote(new_vote(20)).unwrap(); + worker.triage_incoming_vote(new_vote(21)).unwrap(); + worker.triage_incoming_vote(new_vote(22)).unwrap(); + + // vote for 10 should have been handled, while the rest buffered for later processing + let mut votes = worker.pending_votes.values(); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 11); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 12); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 20); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 21); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 22); + assert!(votes.next().is_none()); + + // simulate mandatory done, and retry buffered votes + worker.voting_oracle.rounds_mut().unwrap().test_set_mandatory_done(true); + worker.try_pending_justif_and_votes().unwrap(); + // all blocks <= grandpa finalized should have been handled, rest still buffered + let mut votes = worker.pending_votes.values(); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 21); + assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 22); } } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index d7c83810d521d..c0e1e9f0e944f 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -25,6 +25,7 @@ mod sync; use std::{ borrow::Cow, collections::HashMap, + marker::PhantomData, pin::Pin, sync::Arc, task::{Context as FutureContext, Poll}, @@ -567,25 +568,27 @@ impl BlockImportAdapterFull for T where /// This is required as the `TestNetFactory` trait does not distinguish between /// full and light nodes. #[derive(Clone)] -pub struct BlockImportAdapter { +pub struct BlockImportAdapter { inner: I, + _phantom: PhantomData, } -impl BlockImportAdapter { +impl BlockImportAdapter { /// Create a new instance of `Self::Full`. pub fn new(inner: I) -> Self { - Self { inner } + Self { inner, _phantom: PhantomData } } } #[async_trait::async_trait] -impl BlockImport for BlockImportAdapter +impl BlockImport for BlockImportAdapter where I: BlockImport + Send + Sync, I::Transaction: Send, + Transaction: Send + 'static, { type Error = ConsensusError; - type Transaction = (); + type Transaction = Transaction; async fn check_block( &mut self, @@ -596,7 +599,7 @@ where async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, cache: HashMap>, ) -> Result { self.inner.import_block(block.clear_storage_changes_and_mutate(), cache).await diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs index ed392139de13f..ddf58474e77a0 100644 --- a/primitives/beefy/src/commitment.rs +++ b/primitives/beefy/src/commitment.rs @@ -293,6 +293,12 @@ pub enum VersionedFinalityProof { V1(SignedCommitment), } +impl From> for VersionedFinalityProof { + fn from(commitment: SignedCommitment) -> Self { + VersionedFinalityProof::V1(commitment) + } +} + #[cfg(test)] mod tests { diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index b41c605d8a3be..bf77c08b76906 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -151,6 +151,11 @@ impl Justifications { self.iter().find(|j| j.0 == engine_id).map(|j| &j.1) } + /// Remove the encoded justification for the given consensus engine, if it exists. + pub fn remove(&mut self, engine_id: ConsensusEngineId) { + self.0.retain(|j| j.0 != engine_id) + } + /// Return a copy of the encoded justification for the given consensus /// engine, if it exists. pub fn into_justification(self, engine_id: ConsensusEngineId) -> Option { From 3e0554a94193c1221c6bf12c7efabc25e2d63404 Mon Sep 17 00:00:00 2001 From: ZhiYong Date: Sat, 30 Jul 2022 01:59:03 +0800 Subject: [PATCH 085/135] Add Event to Pallet Asset-Tx-Payment (#11690) * Add Event to Pallet Asset-Tx-Payment * add asset_id into the Event Co-authored-by: parity-processbot <> --- bin/node/runtime/src/lib.rs | 1 + .../asset-tx-payment/src/lib.rs | 27 +++++++++++++++++-- .../asset-tx-payment/src/tests.rs | 5 ++-- 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 037211155cf0e..2ac1e6444f119 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -456,6 +456,7 @@ impl pallet_transaction_payment::Config for Runtime { } impl pallet_asset_tx_payment::Config for Runtime { + type Event = Event; type Fungibles = Assets; type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< pallet_assets::BalanceToAssetBalance, diff --git a/frame/transaction-payment/asset-tx-payment/src/lib.rs b/frame/transaction-payment/asset-tx-payment/src/lib.rs index 83801c44d3578..08561375247ae 100644 --- a/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -113,6 +113,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + pallet_transaction_payment::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; /// The fungibles instance used to pay for transactions in assets. type Fungibles: Balanced; /// The actual transaction charging logic that charges the fees. @@ -122,6 +124,19 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transaction fee `actual_fee`, of which `tip` was added to the minimum inclusion fee, + /// has been paid by `who` in an asset `asset_id`. + AssetTxFeePaid { + who: T::AccountId, + actual_fee: BalanceOf, + tip: BalanceOf, + asset_id: Option>, + }, + } } /// Require the transactor pay for themselves and maybe include a tip to gain additional priority @@ -213,6 +228,8 @@ where Self::AccountId, // imbalance resulting from withdrawing the fee InitialPayment, + // asset_id for the transaction payment + Option>, ); fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { @@ -240,7 +257,7 @@ where len: usize, ) -> Result { let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment)) + Ok((self.tip, who.clone(), initial_payment, self.asset_id)) } fn post_dispatch( @@ -250,7 +267,7 @@ where len: usize, result: &DispatchResult, ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, initial_payment)) = pre { + if let Some((tip, who, initial_payment, asset_id)) = pre { match initial_payment { InitialPayment::Native(already_withdrawn) => { pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( @@ -273,6 +290,12 @@ where tip.into(), already_withdrawn.into(), )?; + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee, + tip, + asset_id, + }); }, InitialPayment::Nothing => { // `actual_fee` should be zero here for any signed extrinsic. It would be diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index ad5bc3f22e57f..08b17a6bf459c 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -51,7 +51,7 @@ frame_support::construct_runtime!( TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, Assets: pallet_assets::{Pallet, Call, Storage, Event}, Authorship: pallet_authorship::{Pallet, Call, Storage}, - AssetTxPayment: pallet_asset_tx_payment::{Pallet}, + AssetTxPayment: pallet_asset_tx_payment::{Pallet, Event}, } ); @@ -198,6 +198,7 @@ impl HandleCredit for CreditToBlockAuthor { } impl Config for Runtime { + type Event = Event; type Fungibles = Assets; type OnChargeAssetTransaction = FungiblesAdapter< pallet_assets::BalanceToAssetBalance, @@ -663,7 +664,7 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { .unwrap(); // `Pays::No` implies no pre-dispatch fees assert_eq!(Assets::balance(asset_id, caller), balance); - let (_tip, _who, initial_payment) = ⪯ + let (_tip, _who, initial_payment, _asset_id) = ⪯ let not_paying = match initial_payment { &InitialPayment::Nothing => true, _ => false, From 8f4d1b31163aa53f72d0fcb438ebe7d417093fae Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Fri, 29 Jul 2022 23:17:49 +0300 Subject: [PATCH 086/135] Network sync refactoring (part 5) (#11825) * Make `chain_sync` an explicit networking parameter instead of offering factory method * Derive `Copy` on `SyncMode` and remove cloning --- client/network/src/config.rs | 16 ++++------- client/network/src/service.rs | 26 ++++------------- client/network/src/service/tests.rs | 35 ++++++++++++----------- client/network/test/src/lib.rs | 32 ++++++++++++--------- client/service/src/builder.rs | 44 +++++++++++++++-------------- 5 files changed, 69 insertions(+), 84 deletions(-) diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 58349fe973330..2622762da5fc9 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -96,14 +96,8 @@ where /// valid. pub import_queue: Box>, - /// Factory function that creates a new instance of chain sync. - pub create_chain_sync: Box< - dyn FnOnce( - sc_network_common::sync::SyncMode, - Arc, - Option>>, - ) -> crate::error::Result>>, - >, + /// Instance of chain sync implementation. + pub chain_sync: Box>, /// Registry for recording prometheus metrics to. pub metrics_registry: Option, @@ -138,8 +132,8 @@ where /// both outgoing and incoming requests. pub state_request_protocol_config: RequestResponseConfig, - /// Optional warp sync protocol support. Include protocol config and sync provider. - pub warp_sync: Option<(Arc>, RequestResponseConfig)>, + /// Optional warp sync protocol config. + pub warp_sync_protocol_config: Option, } /// Role of the local node. @@ -352,7 +346,7 @@ impl From for ParseErr { } /// Sync operation mode. -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SyncMode { /// Full block download and verification. Full, diff --git a/client/network/src/service.rs b/client/network/src/service.rs index aeb5e25497bb3..409ed88c75c00 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -30,7 +30,7 @@ use crate::{ behaviour::{self, Behaviour, BehaviourOut}, bitswap::Bitswap, - config::{self, parse_str_addr, Params, TransportConfig}, + config::{parse_str_addr, Params, TransportConfig}, discovery::DiscoveryConfig, error::Error, network_state::{ @@ -60,7 +60,7 @@ use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; -use sc_network_common::sync::{SyncMode, SyncState, SyncStatus}; +use sc_network_common::sync::{SyncState, SyncStatus}; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; @@ -239,21 +239,6 @@ where let default_notif_handshake_message = Roles::from(¶ms.role).encode(); - let (warp_sync_provider, warp_sync_protocol_config) = match params.warp_sync { - Some((p, c)) => (Some(p), Some(c)), - None => (None, None), - }; - - let chain_sync = (params.create_chain_sync)( - match params.network_config.sync_mode { - config::SyncMode::Full => SyncMode::Full, - config::SyncMode::Fast { skip_proofs, storage_chain_mode } => - SyncMode::LightState { skip_proofs, storage_chain_mode }, - config::SyncMode::Warp => SyncMode::Warp, - }, - params.chain.clone(), - warp_sync_provider, - )?; let (protocol, peerset_handle, mut known_addresses) = Protocol::new( From::from(¶ms.role), params.chain.clone(), @@ -266,7 +251,7 @@ where ) .collect(), params.metrics_registry.as_ref(), - chain_sync, + params.chain_sync, )?; // List of multiaddresses that we know in the network. @@ -303,7 +288,6 @@ where let is_major_syncing = Arc::new(AtomicBool::new(false)); // Build the swarm. - let client = params.chain.clone(); let (mut swarm, bandwidth): (Swarm>, _) = { let user_agent = format!( "{} ({})", @@ -389,7 +373,7 @@ where }; let behaviour = { - let bitswap = params.network_config.ipfs_server.then(|| Bitswap::new(client)); + let bitswap = params.network_config.ipfs_server.then(|| Bitswap::new(params.chain)); let result = Behaviour::new( protocol, user_agent, @@ -397,7 +381,7 @@ where discovery_config, params.block_request_protocol_config, params.state_request_protocol_config, - warp_sync_protocol_config, + params.warp_sync_protocol_config, bitswap, params.light_client_request_protocol_config, params.network_config.request_response_protocols, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 181d58130aa6b..de474ee8fe4d0 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -42,7 +42,7 @@ type TestNetworkService = NetworkService< /// > **Note**: We return the events stream in order to not possibly lose events between the /// > construction of the service and the moment the events stream is grabbed. fn build_test_full_node( - config: config::NetworkConfiguration, + network_config: config::NetworkConfiguration, ) -> (Arc, impl Stream) { let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); @@ -111,35 +111,36 @@ fn build_test_full_node( protocol_config }; - let max_parallel_downloads = config.max_parallel_downloads; + let chain_sync = ChainSync::new( + match network_config.sync_mode { + config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, + config::SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, + config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, + }, + client.clone(), + Box::new(DefaultBlockAnnounceValidator), + network_config.max_parallel_downloads, + None, + ) + .unwrap(); let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), - network_config: config, + network_config, chain: client.clone(), - transaction_pool: Arc::new(crate::config::EmptyTransactionPool), + transaction_pool: Arc::new(config::EmptyTransactionPool), protocol_id, import_queue, - create_chain_sync: Box::new( - move |sync_mode, chain, warp_sync_provider| match ChainSync::new( - sync_mode, - chain, - Box::new(DefaultBlockAnnounceValidator), - max_parallel_downloads, - warp_sync_provider, - ) { - Ok(chain_sync) => Ok(Box::new(chain_sync)), - Err(error) => Err(Box::new(error).into()), - }, - ), + chain_sync: Box::new(chain_sync), metrics_registry: None, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, - warp_sync: None, + warp_sync_protocol_config: None, }) .unwrap(); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index c0e1e9f0e944f..4659684987f77 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -838,10 +838,25 @@ where protocol_config }; - let max_parallel_downloads = network_config.max_parallel_downloads; let block_announce_validator = config .block_announce_validator .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)); + let chain_sync = ChainSync::new( + match network_config.sync_mode { + SyncMode::Full => sc_network_common::sync::SyncMode::Full, + SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { + skip_proofs, + storage_chain_mode, + }, + SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, + }, + client.clone(), + block_announce_validator, + network_config.max_parallel_downloads, + Some(warp_sync), + ) + .unwrap(); let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, @@ -853,23 +868,12 @@ where transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, import_queue, - create_chain_sync: Box::new(move |sync_mode, chain, warp_sync_provider| { - match ChainSync::new( - sync_mode, - chain, - block_announce_validator, - max_parallel_downloads, - warp_sync_provider, - ) { - Ok(chain_sync) => Ok(Box::new(chain_sync)), - Err(error) => Err(Box::new(error).into()), - } - }), + chain_sync: Box::new(chain_sync), metrics_registry: None, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, - warp_sync: Some((warp_sync, warp_protocol_config)), + warp_sync_protocol_config: Some(warp_protocol_config), }) .unwrap(); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 0a18943f45006..ec537a33b72d5 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -760,13 +760,15 @@ where protocol_config }; - let warp_sync_params = warp_sync.map(|provider| { - // Allow both outgoing and incoming requests. - let (handler, protocol_config) = - WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone()); - spawn_handle.spawn("warp-sync-request-handler", Some("networking"), handler.run()); - (provider, protocol_config) - }); + let (warp_sync_provider, warp_sync_protocol_config) = warp_sync + .map(|provider| { + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = + WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone()); + spawn_handle.spawn("warp-sync-request-handler", Some("networking"), handler.run()); + (Some(provider), Some(protocol_config)) + }) + .unwrap_or_default(); let light_client_request_protocol_config = { // Allow both outgoing and incoming requests. @@ -776,7 +778,18 @@ where protocol_config }; - let max_parallel_downloads = config.network.max_parallel_downloads; + let chain_sync = ChainSync::new( + match config.network.sync_mode { + SyncMode::Full => sc_network_common::sync::SyncMode::Full, + SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, + SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, + }, + client.clone(), + block_announce_validator, + config.network.max_parallel_downloads, + warp_sync_provider, + )?; let network_params = sc_network::config::Params { role: config.role.clone(), executor: { @@ -796,22 +809,11 @@ where transaction_pool: transaction_pool_adapter as _, protocol_id, import_queue: Box::new(import_queue), - create_chain_sync: Box::new( - move |sync_mode, chain, warp_sync_provider| match ChainSync::new( - sync_mode, - chain, - block_announce_validator, - max_parallel_downloads, - warp_sync_provider, - ) { - Ok(chain_sync) => Ok(Box::new(chain_sync)), - Err(error) => Err(Box::new(error).into()), - }, - ), + chain_sync: Box::new(chain_sync), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_request_protocol_config, state_request_protocol_config, - warp_sync: warp_sync_params, + warp_sync_protocol_config, light_client_request_protocol_config, }; From 986fab5ab303e566f637bb57e3c9c9b7df1d369e Mon Sep 17 00:00:00 2001 From: yjh Date: Sat, 30 Jul 2022 06:07:34 +0800 Subject: [PATCH 087/135] feat: generalize ConsensusDataProvider for manual-seal (#11827) * feat: generalize ConsensusDataProvider for manual-seal * rename all generic type param `proof`/`PROOF` to `P` * rename a missing thing * Update client/consensus/manual-seal/src/consensus.rs Co-authored-by: Davide Galassi * Update client/consensus/manual-seal/src/consensus/babe.rs Co-authored-by: Davide Galassi * Update client/consensus/manual-seal/src/consensus/aura.rs Co-authored-by: Davide Galassi Co-authored-by: Davide Galassi --- client/consensus/manual-seal/src/consensus.rs | 4 ++++ .../manual-seal/src/consensus/aura.rs | 11 +++++---- .../manual-seal/src/consensus/babe.rs | 21 ++++++++++++---- client/consensus/manual-seal/src/lib.rs | 24 +++++++++++-------- .../consensus/manual-seal/src/seal_block.rs | 14 ++++++----- 5 files changed, 49 insertions(+), 25 deletions(-) diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index dfd3730fd3427..b5dfc3d809c13 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -33,6 +33,9 @@ pub trait ConsensusDataProvider: Send + Sync { /// Block import transaction type type Transaction; + /// The proof type. + type Proof; + /// Attempt to create a consensus digest. fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result; @@ -42,5 +45,6 @@ pub trait ConsensusDataProvider: Send + Sync { parent: &B::Header, params: &mut BlockImportParams, inherents: &InherentData, + proof: Self::Proof, ) -> Result<(), Error>; } diff --git a/client/consensus/manual-seal/src/consensus/aura.rs b/client/consensus/manual-seal/src/consensus/aura.rs index 7b5d6720562be..065b78732cdc3 100644 --- a/client/consensus/manual-seal/src/consensus/aura.rs +++ b/client/consensus/manual-seal/src/consensus/aura.rs @@ -35,14 +35,14 @@ use sp_timestamp::TimestampInherentData; use std::{marker::PhantomData, sync::Arc}; /// Consensus data provider for Aura. -pub struct AuraConsensusDataProvider { +pub struct AuraConsensusDataProvider { // slot duration slot_duration: SlotDuration, // phantom data for required generics - _phantom: PhantomData<(B, C)>, + _phantom: PhantomData<(B, C, P)>, } -impl AuraConsensusDataProvider +impl AuraConsensusDataProvider where B: BlockT, C: AuxStore + ProvideRuntimeApi + UsageProvider, @@ -58,7 +58,7 @@ where } } -impl ConsensusDataProvider for AuraConsensusDataProvider +impl ConsensusDataProvider for AuraConsensusDataProvider where B: BlockT, C: AuxStore @@ -67,8 +67,10 @@ where + UsageProvider + ProvideRuntimeApi, C::Api: AuraApi, + P: Send + Sync, { type Transaction = TransactionFor; + type Proof = P; fn create_digest( &self, @@ -92,6 +94,7 @@ where _parent: &B::Header, _params: &mut BlockImportParams, _inherents: &InherentData, + _proof: Self::Proof, ) -> Result<(), Error> { Ok(()) } diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 3e7770cd982d2..cc73a3fa961ce 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -31,7 +31,7 @@ use sc_consensus_epochs::{ descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, }; use sp_keystore::SyncCryptoStorePtr; -use std::{borrow::Cow, sync::Arc}; +use std::{borrow::Cow, marker::PhantomData, sync::Arc}; use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; use sp_api::{ProvideRuntimeApi, TransactionFor}; @@ -53,7 +53,7 @@ use sp_timestamp::TimestampInherentData; /// Provides BABE-compatible predigests and BlockImportParams. /// Intended for use with BABE runtimes. -pub struct BabeConsensusDataProvider { +pub struct BabeConsensusDataProvider { /// shared reference to keystore keystore: SyncCryptoStorePtr, @@ -68,6 +68,7 @@ pub struct BabeConsensusDataProvider { /// Authorities to be used for this babe chain. authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + _phantom: PhantomData

, } /// Verifier to be used for babe chains @@ -131,7 +132,7 @@ where } } -impl BabeConsensusDataProvider +impl BabeConsensusDataProvider where B: BlockT, C: AuxStore @@ -153,7 +154,14 @@ where let config = Config::get(&*client)?; - Ok(Self { config, client, keystore, epoch_changes, authorities }) + Ok(Self { + config, + client, + keystore, + epoch_changes, + authorities, + _phantom: Default::default(), + }) } fn epoch(&self, parent: &B::Header, slot: Slot) -> Result { @@ -181,7 +189,7 @@ where } } -impl ConsensusDataProvider for BabeConsensusDataProvider +impl ConsensusDataProvider for BabeConsensusDataProvider where B: BlockT, C: AuxStore @@ -190,8 +198,10 @@ where + UsageProvider + ProvideRuntimeApi, C::Api: BabeApi, + P: Send + Sync, { type Transaction = TransactionFor; + type Proof = P; fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result { let slot = inherents @@ -259,6 +269,7 @@ where parent: &B::Header, params: &mut BlockImportParams, inherents: &InherentData, + _proof: Self::Proof, ) -> Result<(), Error> { let slot = inherents .babe_inherent_data()? diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index a8d2634ade560..ba63666f3e46c 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -81,7 +81,7 @@ where } /// Params required to start the instant sealing authorship task. -pub struct ManualSealParams, TP, SC, CS, CIDP> { +pub struct ManualSealParams, TP, SC, CS, CIDP, P> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -103,14 +103,14 @@ pub struct ManualSealParams, TP, SC, C /// Digest provider for inclusion in blocks. pub consensus_data_provider: - Option>>>, + Option>>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, } /// Params required to start the manual sealing authorship task. -pub struct InstantSealParams, TP, SC, CIDP> { +pub struct InstantSealParams, TP, SC, CIDP, P> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -128,14 +128,14 @@ pub struct InstantSealParams, TP, SC, /// Digest provider for inclusion in blocks. pub consensus_data_provider: - Option>>>, + Option>>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, } /// Creates the background authorship task for the manual seal engine. -pub async fn run_manual_seal( +pub async fn run_manual_seal( ManualSealParams { mut block_import, mut env, @@ -145,7 +145,7 @@ pub async fn run_manual_seal( select_chain, consensus_data_provider, create_inherent_data_providers, - }: ManualSealParams, + }: ManualSealParams, ) where B: BlockT + 'static, BI: BlockImport> @@ -155,12 +155,13 @@ pub async fn run_manual_seal( C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer>, + E::Proposer: Proposer>, CS: Stream::Hash>> + Unpin + 'static, SC: SelectChain + 'static, TransactionFor: 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, + P: Send + Sync + 'static, { while let Some(command) = commands_stream.next().await { match command { @@ -198,7 +199,7 @@ pub async fn run_manual_seal( /// runs the background authorship task for the instant seal engine. /// instant-seal creates a new block for every transaction imported into /// the transaction pool. -pub async fn run_instant_seal( +pub async fn run_instant_seal( InstantSealParams { block_import, env, @@ -207,7 +208,7 @@ pub async fn run_instant_seal( select_chain, consensus_data_provider, create_inherent_data_providers, - }: InstantSealParams, + }: InstantSealParams, ) where B: BlockT + 'static, BI: BlockImport> @@ -217,11 +218,12 @@ pub async fn run_instant_seal( C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer>, + E::Proposer: Proposer>, SC: SelectChain + 'static, TransactionFor: 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, + P: Send + Sync + 'static, { // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. @@ -275,6 +277,7 @@ mod tests { C: ProvideRuntimeApi + Send + Sync, { type Transaction = TransactionFor; + type Proof = (); fn create_digest( &self, @@ -289,6 +292,7 @@ mod tests { _parent: &B::Header, params: &mut BlockImportParams, _inherents: &InherentData, + _proof: Self::Proof, ) -> Result<(), Error> { params.post_digests.push(DigestItem::Other(vec![1])); Ok(()) diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 202b54fe5d0c5..32e3acf68506e 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -36,7 +36,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; pub const MAX_PROPOSAL_DURATION: u64 = 10; /// params for sealing a new block -pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP> { +pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP, P> { /// if true, empty blocks(without extrinsics) will be created. /// otherwise, will return Error::EmptyTransactionPool. pub create_empty: bool, @@ -56,7 +56,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP pub select_chain: &'a SC, /// Digest provider for inclusion in blocks. pub consensus_data_provider: - Option<&'a dyn ConsensusDataProvider>>, + Option<&'a dyn ConsensusDataProvider>>, /// block import object pub block_import: &'a mut BI, /// Something that can create the inherent data providers. @@ -64,7 +64,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP } /// seals a new block with the given params -pub async fn seal_block( +pub async fn seal_block( SealBlockParams { create_empty, finalize, @@ -77,7 +77,7 @@ pub async fn seal_block( create_inherent_data_providers, consensus_data_provider: digest_provider, mut sender, - }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP>, + }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP, P>, ) where B: BlockT, BI: BlockImport> @@ -86,11 +86,12 @@ pub async fn seal_block( + 'static, C: HeaderBackend + ProvideRuntimeApi, E: Environment, - E::Proposer: Proposer>, + E::Proposer: Proposer>, TP: TransactionPool, SC: SelectChain, TransactionFor: 'static, CIDP: CreateInherentDataProviders, + P: Send + Sync + 'static, { let future = async { if pool.status().ready == 0 && !create_empty { @@ -138,6 +139,7 @@ pub async fn seal_block( } let (header, body) = proposal.block.deconstruct(); + let proof = proposal.proof; let mut params = BlockImportParams::new(BlockOrigin::Own, header.clone()); params.body = Some(body); params.finalized = finalize; @@ -147,7 +149,7 @@ pub async fn seal_block( )); if let Some(digest_provider) = digest_provider { - digest_provider.append_block_import(&parent, &mut params, &inherent_data)?; + digest_provider.append_block_import(&parent, &mut params, &inherent_data, proof)?; } // Make sure we return the same post-hash that will be calculated when importing the block From 7d8e5a1e3979cb0fc61123d5cfeec974dfa25334 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Sat, 30 Jul 2022 16:09:25 +0200 Subject: [PATCH 088/135] Auto-incremental CollectionId (#11796) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * autoincrementing CollectionId * fix * benchmarking fix * fmt * fix * update before checking * fmt * fix * fmt * commit * tests & fix * fix * commit * docs * safe math * unexpose function * benchmark * fmt * better naming * fix? * merge fixes * fmt * ".git/.scripts/bench-bot.sh" pallet dev pallet_uniques * wrong weight * Update frame/uniques/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/uniques/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * using substrate trait instead of num-traits * remove unnecessary trait * emit NextCollectionIdIncremented in do_create_collection * fix in benchmarks * check for event & group import * docs Co-authored-by: Sergej Sakač Co-authored-by: command-bot <> Co-authored-by: Oliver Tale-Yazdi --- Cargo.lock | 4 +- frame/uniques/src/benchmarking.rs | 25 +++-- frame/uniques/src/functions.rs | 15 +++ frame/uniques/src/lib.rs | 54 ++++++++-- frame/uniques/src/tests.rs | 83 ++++++++++----- frame/uniques/src/weights.rs | 169 +++++++++++++++++------------- 6 files changed, 233 insertions(+), 117 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 34bf34c62c6fd..2b76f61df7912 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5020,9 +5020,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", "libm", diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs index 713393048f7e9..86247fb964ff5 100644 --- a/frame/uniques/src/benchmarking.rs +++ b/frame/uniques/src/benchmarking.rs @@ -42,13 +42,10 @@ fn create_collection, I: 'static>( let caller_lookup = T::Lookup::unlookup(caller.clone()); let collection = T::Helper::collection(0); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - assert!(Uniques::::force_create( - SystemOrigin::Root.into(), - collection, - caller_lookup.clone(), - false, - ) - .is_ok()); + assert!( + Uniques::::force_create(SystemOrigin::Root.into(), caller_lookup.clone(), false,) + .is_ok() + ); (collection, caller, caller_lookup) } @@ -143,7 +140,7 @@ benchmarks_instance_pallet! { whitelist_account!(caller); let admin = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - let call = Call::::create { collection, admin }; + let call = Call::::create { admin }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_last_event::(Event::Created { collection: T::Helper::collection(0), creator: caller.clone(), owner: caller }.into()); @@ -152,7 +149,7 @@ benchmarks_instance_pallet! { force_create { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - }: _(SystemOrigin::Root, T::Helper::collection(0), caller_lookup, true) + }: _(SystemOrigin::Root, caller_lookup, true) verify { assert_last_event::(Event::ForceCreated { collection: T::Helper::collection(0), owner: caller }.into()); } @@ -408,6 +405,16 @@ benchmarks_instance_pallet! { }.into()); } + try_increment_id { + let (_, caller, _) = create_collection::(); + Uniques::::set_next_id(0); + }: _(SystemOrigin::Signed(caller.clone())) + verify { + assert_last_event::(Event::NextCollectionIdIncremented { + next_id: 1u32.into() + }.into()); + } + set_price { let (collection, caller, _) = create_collection::(); let (item, ..) = mint_item::(0); diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index 9e8d3301616df..dcbb1f27973d7 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -88,7 +88,12 @@ impl, I: 'static> Pallet { }, ); + let next_id = collection.saturating_add(1u32.into()); + CollectionAccount::::insert(&owner, &collection, ()); + NextCollectionId::::set(next_id); + + Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); Self::deposit_event(event); Ok(()) } @@ -208,6 +213,16 @@ impl, I: 'static> Pallet { Ok(()) } + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub fn set_next_id(count: u32) { + NextCollectionId::::set(count.into()); + } + + #[cfg(test)] + pub fn get_next_id() -> T::CollectionId { + NextCollectionId::::get() + } + pub fn do_set_price( collection: T::CollectionId, item: T::ItemId, diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 53f83605da90e..b25fb2f87ea97 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -51,7 +51,7 @@ use frame_support::{ }; use frame_system::Config as SystemConfig; use sp_runtime::{ - traits::{Saturating, StaticLookup, Zero}, + traits::{AtLeast32BitUnsigned, Saturating, StaticLookup, Zero}, ArithmeticError, RuntimeDebug, }; use sp_std::prelude::*; @@ -92,7 +92,12 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// Identifier for the collection of item. - type CollectionId: Member + Parameter + MaxEncodedLen + Copy; + type CollectionId: Member + + Parameter + + MaxEncodedLen + + Copy + + Default + + AtLeast32BitUnsigned; /// The type used to identify a unique item within a collection. type ItemId: Member + Parameter + MaxEncodedLen + Copy; @@ -266,6 +271,12 @@ pub mod pallet { pub(super) type CollectionMaxSupply, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::CollectionId, u32, OptionQuery>; + #[pallet::storage] + /// Stores the `CollectionId` that is going to be used for the next collection. + /// This gets incremented by 1 whenever a new collection is created. + pub(super) type NextCollectionId, I: 'static = ()> = + StorageValue<_, T::CollectionId, ValueQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -357,6 +368,8 @@ pub mod pallet { OwnershipAcceptanceChanged { who: T::AccountId, maybe_collection: Option }, /// Max supply has been set for a collection. CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, + /// Event gets emmited when the `NextCollectionId` gets incremented. + NextCollectionIdIncremented { next_id: T::CollectionId }, /// The price was set for the instance. ItemPriceSet { collection: T::CollectionId, @@ -408,6 +421,10 @@ pub mod pallet { MaxSupplyAlreadySet, /// The provided max supply is less to the amount of items a collection already has. MaxSupplyTooSmall, + /// The `CollectionId` in `NextCollectionId` is not being used. + /// + /// This means that you can directly proceed to call `create`. + NextIdNotUsed, /// The given item ID is unknown. UnknownItem, /// Item is not for sale. @@ -439,7 +456,6 @@ pub mod pallet { /// `ItemDeposit` funds of sender are reserved. /// /// Parameters: - /// - `collection`: The identifier of the new collection. This must not be currently in use. /// - `admin`: The admin of this collection. The admin is the initial address of each /// member of the collection's admin team. /// @@ -449,9 +465,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::create())] pub fn create( origin: OriginFor, - collection: T::CollectionId, admin: ::Source, ) -> DispatchResult { + let collection = NextCollectionId::::get(); + let owner = T::CreateOrigin::ensure_origin(origin, &collection)?; let admin = T::Lookup::lookup(admin)?; @@ -473,7 +490,6 @@ pub mod pallet { /// /// Unlike `create`, no funds are reserved. /// - /// - `collection`: The identifier of the new item. This must not be currently in use. /// - `owner`: The owner of this collection of items. The owner has full superuser /// permissions /// over this item, but may later change and configure the permissions using @@ -485,13 +501,14 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_create())] pub fn force_create( origin: OriginFor, - collection: T::CollectionId, owner: ::Source, free_holding: bool, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; let owner = T::Lookup::lookup(owner)?; + let collection = NextCollectionId::::get(); + Self::do_create_collection( collection, owner.clone(), @@ -502,6 +519,31 @@ pub mod pallet { ) } + /// Increments the `CollectionId` stored in `NextCollectionId`. + /// + /// This is only callable when the next `CollectionId` is already being + /// used for some other collection. + /// + /// The origin must be Signed and the sender must have sufficient funds + /// free. + /// + /// Emits `NextCollectionIdIncremented` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::try_increment_id())] + pub fn try_increment_id(origin: OriginFor) -> DispatchResult { + ensure_signed(origin)?; + ensure!( + Collection::::contains_key(NextCollectionId::::get()), + Error::::NextIdNotUsed + ); + + let next_id = NextCollectionId::::get().saturating_add(1u32.into()); + NextCollectionId::::set(next_id); + Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); + Ok(()) + } + /// Destroy a collection of fungible items. /// /// The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be the diff --git a/frame/uniques/src/tests.rs b/frame/uniques/src/tests.rs index 8b1d00d7ba0c7..bd3a2b032945e 100644 --- a/frame/uniques/src/tests.rs +++ b/frame/uniques/src/tests.rs @@ -92,12 +92,12 @@ fn basic_setup_works() { #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_eq!(collections(), vec![(1, 0)]); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); assert_eq!(items(), vec![(1, 0, 42)]); - assert_ok!(Uniques::force_create(Origin::root(), 1, 2, true)); + assert_ok!(Uniques::force_create(Origin::root(), 2, true)); assert_eq!(collections(), vec![(1, 0), (2, 1)]); assert_ok!(Uniques::mint(Origin::signed(2), 1, 69, 1)); assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); @@ -108,7 +108,7 @@ fn basic_minting_should_work() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_ok!(Uniques::create(Origin::signed(1), 1)); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(collections(), vec![(1, 0)]); assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0, 0], false)); @@ -151,7 +151,7 @@ fn lifecycle_should_work() { fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_ok!(Uniques::create(Origin::signed(1), 1)); let w = Collection::::get(0).unwrap().destroy_witness(); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); @@ -162,7 +162,7 @@ fn destroy_with_bad_witness_should_not_work() { #[test] fn mint_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); assert_eq!(Uniques::owner(0, 42).unwrap(), 1); assert_eq!(collections(), vec![(1, 0)]); @@ -173,7 +173,7 @@ fn mint_should_work() { #[test] fn transfer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 3)); @@ -188,7 +188,7 @@ fn transfer_should_work() { #[test] fn freezing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); assert_ok!(Uniques::freeze(Origin::signed(1), 0, 42)); assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); @@ -205,7 +205,7 @@ fn freezing_should_work() { #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); Balances::make_free_balance_be(&2, 100); @@ -230,7 +230,7 @@ fn transfer_owner_should_work() { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); Balances::make_free_balance_be(&3, 100); - assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_ok!(Uniques::create(Origin::signed(1), 1)); assert_eq!(collections(), vec![(1, 0)]); assert_noop!( Uniques::transfer_ownership(Origin::signed(1), 0, 2), @@ -275,7 +275,7 @@ fn transfer_owner_should_work() { #[test] fn set_team_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 2)); @@ -294,7 +294,7 @@ fn set_collection_metadata_should_work() { Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false), Error::::UnknownCollection, ); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); // Cannot add metadata to unowned item assert_noop!( Uniques::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false), @@ -354,7 +354,7 @@ fn set_item_metadata_should_work() { Balances::make_free_balance_be(&1, 30); // Cannot add metadata to unknown item - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); // Cannot add metadata to unowned item assert_noop!( @@ -410,7 +410,7 @@ fn set_attribute_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); @@ -455,7 +455,7 @@ fn set_attribute_should_respect_freeze() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); @@ -487,7 +487,7 @@ fn force_item_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 2)); assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); @@ -521,7 +521,7 @@ fn force_item_status_should_work() { fn burn_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); assert_noop!( @@ -545,7 +545,7 @@ fn burn_works() { #[test] fn approval_lifecycle_works() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 4)); @@ -560,7 +560,7 @@ fn approval_lifecycle_works() { #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); @@ -592,7 +592,7 @@ fn cancel_approval_works() { #[test] fn cancel_approval_works_with_admin() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); @@ -620,7 +620,7 @@ fn cancel_approval_works_with_admin() { #[test] fn cancel_approval_works_with_force() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); @@ -653,7 +653,7 @@ fn max_supply_should_work() { let max_supply = 2; // validate set_collection_max_supply - assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); + assert_ok!(Uniques::force_create(Origin::root(), user_id, true)); assert!(!CollectionMaxSupply::::contains_key(collection_id)); assert_ok!(Uniques::set_collection_max_supply( @@ -695,15 +695,48 @@ fn max_supply_should_work() { }); } +#[test] +fn try_increment_id_works() { + new_test_ext().execute_with(|| { + // should fail because the next `CollectionId` is not being used. + assert_noop!(Uniques::try_increment_id(Origin::signed(2)), Error::::NextIdNotUsed); + + // create two collections & check for events. + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert!(events().contains(&Event::::NextCollectionIdIncremented { next_id: 1 })); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert!(events().contains(&Event::::NextCollectionIdIncremented { next_id: 2 })); + + // there are now two collections. + assert_eq!(Uniques::get_next_id(), 2); + + // reset the collections counter to test if the `try_increment_id` + // works. + Uniques::set_next_id(0); + assert_ok!(Uniques::try_increment_id(Origin::signed(2))); + + // `try_increment_id` should emit an event when successful. + assert!(events().contains(&Event::::NextCollectionIdIncremented { next_id: 1 })); + + // because reset, the collections count should be now 1 + assert_eq!(Uniques::get_next_id(), 1); + + // increment the collections count again. + assert_ok!(Uniques::try_increment_id(Origin::signed(2))); + // should fail because the next `CollectionId` is not being used. + assert_noop!(Uniques::try_increment_id(Origin::signed(2)), Error::::NextIdNotUsed); + }); +} + #[test] fn set_price_should_work() { new_test_ext().execute_with(|| { let user_id = 1; - let collection_id = 0; + let collection_id: u32 = 0; let item_1 = 1; let item_2 = 2; - assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); + assert_ok!(Uniques::force_create(Origin::root(), user_id, true)); assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_1, user_id)); assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_2, user_id)); @@ -755,7 +788,7 @@ fn buy_item_should_work() { let user_1 = 1; let user_2 = 2; let user_3 = 3; - let collection_id = 0; + let collection_id: u32 = 0; let item_1 = 1; let item_2 = 2; let item_3 = 3; @@ -767,7 +800,7 @@ fn buy_item_should_work() { Balances::make_free_balance_be(&user_2, initial_balance); Balances::make_free_balance_be(&user_3, initial_balance); - assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_1, true)); + assert_ok!(Uniques::force_create(Origin::root(), user_1, true)); assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_1, user_1)); assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_2, user_1)); diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index 7c8cb170b1b1d..75bb89f26b428 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -18,12 +18,12 @@ //! Autogenerated weights for pallet_uniques //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-07-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `test-bench-bot`, CPU: `Intel(R) Xeon(R) CPU @ 3.10GHz` +//! DATE: 2022-07-15, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet // --steps=50 @@ -70,6 +70,7 @@ pub trait WeightInfo { fn cancel_approval() -> Weight; fn set_accept_ownership() -> Weight; fn set_collection_max_supply() -> Weight; + fn try_increment_id() -> Weight; fn set_price() -> Weight; fn buy_item() -> Weight; } @@ -77,19 +78,21 @@ pub trait WeightInfo { /// Weights for pallet_uniques using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (33_075_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + (30_481_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } + // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (19_528_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + (19_811_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -104,12 +107,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 25_000 - .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 25_000 - .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) - // Standard Error: 25_000 - .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 17_000 + .saturating_add((10_950_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 17_000 + .saturating_add((1_657_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 17_000 + .saturating_add((1_512_000 as Weight).saturating_mul(a as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(4 as Weight)) @@ -122,7 +125,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (42_146_000 as Weight) + (36_980_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -131,7 +134,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (42_960_000 as Weight) + (38_771_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } @@ -140,7 +143,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (33_025_000 as Weight) + (29_914_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } @@ -149,8 +152,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { (0 as Weight) - // Standard Error: 24_000 - .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 16_000 + .saturating_add((12_759_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -159,26 +162,26 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (25_194_000 as Weight) + (22_425_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (25_397_000 as Weight) + (23_011_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (19_278_000 as Weight) + (17_718_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (19_304_000 as Weight) + (17_619_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -186,20 +189,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (28_615_000 as Weight) + (25_869_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (19_943_000 as Weight) + (18_058_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (22_583_000 as Weight) + (20_720_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -207,7 +210,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (47_520_000 as Weight) + (41_808_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -215,69 +218,76 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (45_316_000 as Weight) + (39_866_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (38_391_000 as Weight) + (34_767_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (38_023_000 as Weight) + (33_910_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (37_398_000 as Weight) + (33_827_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (35_621_000 as Weight) + (31_998_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (25_856_000 as Weight) + (23_607_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (26_098_000 as Weight) + (23_341_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (24_076_000 as Weight) + (21_969_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (22_035_000 as Weight) + (20_355_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques NextCollectionId (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn try_increment_id() -> Weight { + (19_233_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - (22_534_000 as Weight) + (20_733_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -286,7 +296,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - (45_272_000 as Weight) + (40_798_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } @@ -294,19 +304,21 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { + // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (33_075_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + (30_481_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } + // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (19_528_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + (19_811_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -321,12 +333,12 @@ impl WeightInfo for () { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 25_000 - .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 25_000 - .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) - // Standard Error: 25_000 - .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 17_000 + .saturating_add((10_950_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 17_000 + .saturating_add((1_657_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 17_000 + .saturating_add((1_512_000 as Weight).saturating_mul(a as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) @@ -339,7 +351,7 @@ impl WeightInfo for () { // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (42_146_000 as Weight) + (36_980_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -348,7 +360,7 @@ impl WeightInfo for () { // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (42_960_000 as Weight) + (38_771_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } @@ -357,7 +369,7 @@ impl WeightInfo for () { // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (33_025_000 as Weight) + (29_914_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } @@ -366,8 +378,8 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { (0 as Weight) - // Standard Error: 24_000 - .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 16_000 + .saturating_add((12_759_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -376,26 +388,26 @@ impl WeightInfo for () { // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (25_194_000 as Weight) + (22_425_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (25_397_000 as Weight) + (23_011_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (19_278_000 as Weight) + (17_718_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (19_304_000 as Weight) + (17_619_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -403,20 +415,20 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (28_615_000 as Weight) + (25_869_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (19_943_000 as Weight) + (18_058_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (22_583_000 as Weight) + (20_720_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -424,7 +436,7 @@ impl WeightInfo for () { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (47_520_000 as Weight) + (41_808_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -432,69 +444,76 @@ impl WeightInfo for () { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (45_316_000 as Weight) + (39_866_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (38_391_000 as Weight) + (34_767_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (38_023_000 as Weight) + (33_910_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (37_398_000 as Weight) + (33_827_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (35_621_000 as Weight) + (31_998_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (25_856_000 as Weight) + (23_607_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (26_098_000 as Weight) + (23_341_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (24_076_000 as Weight) + (21_969_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (22_035_000 as Weight) + (20_355_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques NextCollectionId (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn try_increment_id() -> Weight { + (19_233_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - (22_534_000 as Weight) + (20_733_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -503,7 +522,7 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - (45_272_000 as Weight) + (40_798_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } From 4d598040e0369b02cd24e8e67eaf317d09f74f85 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 1 Aug 2022 11:44:56 +0100 Subject: [PATCH 089/135] remove unused staking trait bound (#11942) --- frame/nomination-pools/benchmarking/src/mock.rs | 8 -------- frame/nomination-pools/test-staking/src/mock.rs | 8 -------- frame/session/benchmarking/src/mock.rs | 10 ---------- frame/staking/src/mock.rs | 11 +---------- frame/staking/src/pallet/mod.rs | 4 ++-- 5 files changed, 3 insertions(+), 38 deletions(-) diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index 1c74c301e562d..d239d4f072b80 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -168,14 +168,6 @@ impl pallet_nomination_pools::Config for Runtime { impl crate::Config for Runtime {} -impl frame_system::offchain::SendTransactionTypes for Runtime -where - Call: From, -{ - type OverarchingCall = Call; - type Extrinsic = UncheckedExtrinsic; -} - type Block = frame_system::mocking::MockBlock; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; frame_support::construct_runtime!( diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 852b2c319f376..055ba7b4b3c06 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -180,14 +180,6 @@ impl pallet_nomination_pools::Config for Runtime { type PalletId = PoolsPalletId; } -impl frame_system::offchain::SendTransactionTypes for Runtime -where - Call: From, -{ - type OverarchingCall = Call; - type Extrinsic = UncheckedExtrinsic; -} - type Block = frame_system::mocking::MockBlock; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index c777f2c56de3a..2181493f72947 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -144,16 +144,6 @@ parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; } -pub type Extrinsic = sp_runtime::testing::TestXt; - -impl frame_system::offchain::SendTransactionTypes for Test -where - Call: From, -{ - type OverarchingCall = Call; - type Extrinsic = Extrinsic; -} - pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { type System = Test; diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 70d00c2b648d8..d9dc97f9c1127 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -33,7 +33,7 @@ use sp_core::H256; use sp_io; use sp_runtime::{ curve::PiecewiseLinear, - testing::{Header, TestXt, UintAuthorityId}, + testing::{Header, UintAuthorityId}, traits::{IdentityLookup, Zero}, }; use sp_staking::offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}; @@ -304,15 +304,6 @@ impl crate::pallet::pallet::Config for Test { type WeightInfo = (); } -impl frame_system::offchain::SendTransactionTypes for Test -where - Call: From, -{ - type OverarchingCall = Call; - type Extrinsic = Extrinsic; -} - -pub type Extrinsic = TestXt; pub(crate) type StakingCall = crate::Call; pub(crate) type TestRuntimeCall = ::Call; diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index c999020c28167..4ce96ab68b11a 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -27,7 +27,7 @@ use frame_support::{ }, weights::Weight, }; -use frame_system::{ensure_root, ensure_signed, offchain::SendTransactionTypes, pallet_prelude::*}; +use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; use sp_runtime::{ traits::{CheckedSub, SaturatedConversion, StaticLookup, Zero}, Perbill, Percent, @@ -73,7 +73,7 @@ pub mod pallet { } #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config { /// The staking balance. type Currency: LockableCurrency< Self::AccountId, From 6401300a2ff7ce013385f2c61fc9a3798bd4272d Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Mon, 1 Aug 2022 22:24:22 +0800 Subject: [PATCH 090/135] Add and implement MaxEncodedLen to token traits (#11945) * Add and implement MaxEncodedLen bounds to token traits * cargo fmt * Update UI test expectations --- frame/support/src/traits/tokens/misc.rs | 31 ++++++++++++++----- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- 3 files changed, 25 insertions(+), 10 deletions(-) diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index fbbef6c31822f..294d0e89c8b9e 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -17,7 +17,7 @@ //! Miscellaneous types. -use codec::{Decode, Encode, FullCodec}; +use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; use sp_runtime::{ArithmeticError, DispatchError, TokenError}; @@ -116,7 +116,9 @@ pub enum ExistenceRequirement { } /// Status of funds. -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] +#[derive( + PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen, +)] pub enum BalanceStatus { /// Funds are free, as corresponding to `free` item in Balances. Free, @@ -126,7 +128,7 @@ pub enum BalanceStatus { bitflags::bitflags! { /// Reasons for moving funds out of an account. - #[derive(Encode, Decode)] + #[derive(Encode, Decode, MaxEncodedLen)] pub struct WithdrawReasons: u8 { /// In order to pay for (system) transaction costs. const TRANSACTION_PAYMENT = 0b00000001; @@ -161,16 +163,29 @@ impl WithdrawReasons { } /// Simple amalgamation trait to collect together properties for an AssetId under one roof. -pub trait AssetId: FullCodec + Copy + Eq + PartialEq + Debug + scale_info::TypeInfo {} -impl AssetId for T {} +pub trait AssetId: + FullCodec + Copy + Eq + PartialEq + Debug + scale_info::TypeInfo + MaxEncodedLen +{ +} +impl AssetId + for T +{ +} /// Simple amalgamation trait to collect together properties for a Balance under one roof. pub trait Balance: - AtLeast32BitUnsigned + FullCodec + Copy + Default + Debug + scale_info::TypeInfo + AtLeast32BitUnsigned + FullCodec + Copy + Default + Debug + scale_info::TypeInfo + MaxEncodedLen { } -impl Balance - for T +impl< + T: AtLeast32BitUnsigned + + FullCodec + + Copy + + Default + + Debug + + scale_info::TypeInfo + + MaxEncodedLen, + > Balance for T { } diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index d87e6900197f5..c2ec8cf7f4d05 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 70 others + and 72 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index bc8e99cd65006..dbbc426de2906 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 70 others + and 72 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` From 457de070e4a847dc66bba6c20e03c14346fb5619 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 1 Aug 2022 20:56:19 +0200 Subject: [PATCH 091/135] Remove `remove_member_wrong_refund` from `phragmen` WeightInfo (#11952) * Remove 'remove_member_wrong_refund' from WeightInfo Signed-off-by: Oliver Tale-Yazdi * ".git/.scripts/bench-bot.sh" pallet dev pallet_elections_phragmen Co-authored-by: command-bot <> --- frame/elections-phragmen/src/weights.rs | 181 ++++++++++++------------ 1 file changed, 93 insertions(+), 88 deletions(-) diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index 5ad986bf142a3..07ee7aa2012ad 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -18,22 +18,24 @@ //! Autogenerated weights for pallet_elections_phragmen //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-08-01, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_elections_phragmen // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --pallet=pallet_elections_phragmen +// --chain=dev // --output=./frame/elections-phragmen/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,7 +56,6 @@ pub trait WeightInfo { fn renounce_candidacy_runners_up() -> Weight; fn remove_member_without_replacement() -> Weight; fn remove_member_with_replacement() -> Weight; - fn remove_member_wrong_refund() -> Weight; fn clean_defunct_voters(v: u32, d: u32, ) -> Weight; fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight; } @@ -67,10 +68,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - (27_798_000 as Weight) - // Standard Error: 5_000 - .saturating_add((238_000 as Weight).saturating_mul(v as Weight)) + (26_143_000 as Weight) + // Standard Error: 23_000 + .saturating_add((297_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -79,10 +81,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - (41_241_000 as Weight) - // Standard Error: 8_000 - .saturating_add((259_000 as Weight).saturating_mul(v as Weight)) + (40_431_000 as Weight) + // Standard Error: 6_000 + .saturating_add((205_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -91,35 +94,38 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - (41_313_000 as Weight) - // Standard Error: 8_000 - .saturating_add((255_000 as Weight).saturating_mul(v as Weight)) + (40_188_000 as Weight) + // Standard Error: 6_000 + .saturating_add((225_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - (39_218_000 as Weight) + (38_031_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) + /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - (41_348_000 as Weight) - // Standard Error: 1_000 - .saturating_add((112_000 as Weight).saturating_mul(c as Weight)) + (43_715_000 as Weight) + // Standard Error: 0 + .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Elections Candidates (r:1 w:1) + /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (35_522_000 as Weight) + (47_882_000 as Weight) // Standard Error: 1_000 - .saturating_add((92_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((25_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -129,13 +135,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - (47_887_000 as Weight) + (45_600_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - (36_271_000 as Weight) + (34_959_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -143,34 +149,29 @@ impl WeightInfo for SubstrateWeight { fn remove_member_without_replacement() -> Weight { (2_000_000_000_000 as Weight) } - // Storage: Elections RunnersUp (r:1 w:1) // Storage: Elections Members (r:1 w:1) // Storage: System Account (r:1 w:1) + // Storage: Elections RunnersUp (r:1 w:1) // Storage: Council Prime (r:1 w:1) // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - (55_024_000 as Weight) + (52_684_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } - // Storage: Elections RunnersUp (r:1 w:0) - fn remove_member_wrong_refund() -> Weight { - (13_089_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - } - // Storage: Elections Voting (r:251 w:250) + // Storage: Elections Voting (r:5001 w:5000) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Candidates (r:1 w:0) - // Storage: Balances Locks (r:250 w:250) - // Storage: System Account (r:250 w:250) - fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { + // Storage: Balances Locks (r:5000 w:5000) + // Storage: System Account (r:5000 w:5000) + /// The range of component `v` is `[5000, 10000]`. + /// The range of component `d` is `[1, 5000]`. + fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 40_000 - .saturating_add((51_848_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 38_000 - .saturating_add((537_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 65_000 + .saturating_add((64_009_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) @@ -178,21 +179,23 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) - // Storage: Elections Voting (r:502 w:0) + // Storage: Elections Voting (r:10001 w:0) // Storage: Council Proposals (r:1 w:0) // Storage: Elections ElectionRounds (r:1 w:1) // Storage: Council Members (r:0 w:1) // Storage: Council Prime (r:0 w:1) - // Storage: System Account (r:2 w:2) + // Storage: System Account (r:19 w:19) + /// The range of component `c` is `[1, 1000]`. + /// The range of component `v` is `[1, 10000]`. + /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_664_000 - .saturating_add((30_736_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 692_000 - .saturating_add((49_739_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 47_000 - .saturating_add((3_363_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) + // Standard Error: 778_000 + .saturating_add((81_049_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 51_000 + .saturating_add((4_420_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(T::DbWeight::get().reads(279 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(c as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) } @@ -205,10 +208,11 @@ impl WeightInfo for () { // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - (27_798_000 as Weight) - // Standard Error: 5_000 - .saturating_add((238_000 as Weight).saturating_mul(v as Weight)) + (26_143_000 as Weight) + // Standard Error: 23_000 + .saturating_add((297_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -217,10 +221,11 @@ impl WeightInfo for () { // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - (41_241_000 as Weight) - // Standard Error: 8_000 - .saturating_add((259_000 as Weight).saturating_mul(v as Weight)) + (40_431_000 as Weight) + // Standard Error: 6_000 + .saturating_add((205_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -229,35 +234,38 @@ impl WeightInfo for () { // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - (41_313_000 as Weight) - // Standard Error: 8_000 - .saturating_add((255_000 as Weight).saturating_mul(v as Weight)) + (40_188_000 as Weight) + // Standard Error: 6_000 + .saturating_add((225_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - (39_218_000 as Weight) + (38_031_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) + /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - (41_348_000 as Weight) - // Standard Error: 1_000 - .saturating_add((112_000 as Weight).saturating_mul(c as Weight)) + (43_715_000 as Weight) + // Standard Error: 0 + .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Elections Candidates (r:1 w:1) + /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (35_522_000 as Weight) + (47_882_000 as Weight) // Standard Error: 1_000 - .saturating_add((92_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((25_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -267,13 +275,13 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - (47_887_000 as Weight) + (45_600_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - (36_271_000 as Weight) + (34_959_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -281,34 +289,29 @@ impl WeightInfo for () { fn remove_member_without_replacement() -> Weight { (2_000_000_000_000 as Weight) } - // Storage: Elections RunnersUp (r:1 w:1) // Storage: Elections Members (r:1 w:1) // Storage: System Account (r:1 w:1) + // Storage: Elections RunnersUp (r:1 w:1) // Storage: Council Prime (r:1 w:1) // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - (55_024_000 as Weight) + (52_684_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } - // Storage: Elections RunnersUp (r:1 w:0) - fn remove_member_wrong_refund() -> Weight { - (13_089_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - } - // Storage: Elections Voting (r:251 w:250) + // Storage: Elections Voting (r:5001 w:5000) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) // Storage: Elections Candidates (r:1 w:0) - // Storage: Balances Locks (r:250 w:250) - // Storage: System Account (r:250 w:250) - fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { + // Storage: Balances Locks (r:5000 w:5000) + // Storage: System Account (r:5000 w:5000) + /// The range of component `v` is `[5000, 10000]`. + /// The range of component `d` is `[1, 5000]`. + fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 40_000 - .saturating_add((51_848_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 38_000 - .saturating_add((537_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 65_000 + .saturating_add((64_009_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) @@ -316,21 +319,23 @@ impl WeightInfo for () { // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) - // Storage: Elections Voting (r:502 w:0) + // Storage: Elections Voting (r:10001 w:0) // Storage: Council Proposals (r:1 w:0) // Storage: Elections ElectionRounds (r:1 w:1) // Storage: Council Members (r:0 w:1) // Storage: Council Prime (r:0 w:1) - // Storage: System Account (r:2 w:2) + // Storage: System Account (r:19 w:19) + /// The range of component `c` is `[1, 1000]`. + /// The range of component `v` is `[1, 10000]`. + /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_664_000 - .saturating_add((30_736_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 692_000 - .saturating_add((49_739_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 47_000 - .saturating_add((3_363_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) + // Standard Error: 778_000 + .saturating_add((81_049_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 51_000 + .saturating_add((4_420_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(RocksDbWeight::get().reads(279 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(c as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) } From a5fe1dc09fb047c267d476c8bbe058594325aae3 Mon Sep 17 00:00:00 2001 From: lisa-parity <92225469+lisa-parity@users.noreply.github.com> Date: Mon, 1 Aug 2022 11:58:42 -0700 Subject: [PATCH 092/135] Update the link to the runtime versioning topic (#11957) --- bin/node-template/runtime/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index a4382cc9038ce..88fc86db02ef9 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -89,8 +89,8 @@ pub mod opaque { } } -// To learn more about runtime versioning and what each of the following value means: -// https://docs.substrate.io/v3/runtime/upgrades#runtime-versioning +// To learn more about runtime versioning, see: +// https://docs.substrate.io/main-docs/build/upgrade#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-template"), From 84cc128a6edc1c87b68954e6d64407ee36be45c1 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Tue, 2 Aug 2022 09:06:36 +0200 Subject: [PATCH 093/135] fix(rpc middleware): fix `is_error` bug (#11951) * fix(rpc middleware): fix `is_error` bug * Update client/rpc-servers/src/middleware.rs --- client/rpc-servers/src/middleware.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 5b2ee4bedb7dd..cb9918268f4de 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -176,7 +176,9 @@ impl Middleware for RpcMiddleware { .with_label_values(&[ self.transport_label, name, - if success { "true" } else { "false" }, + // the label "is_error", so `success` should be regarded as false + // and vice-versa to be registrered correctly. + if success { "false" } else { "true" }, ]) .inc(); } From 938c0d9363131d862bcc04d2cf913ce9cb9dd5fe Mon Sep 17 00:00:00 2001 From: bernardo Date: Tue, 2 Aug 2022 13:43:57 +0100 Subject: [PATCH 094/135] benchmarking f32 step calculation (#11890) * benchmark steps with f32 * divide by self.steps - 1 * avoid steps <= 1 * Fix step size Signed-off-by: Oliver Tale-Yazdi * Fix 'steps' print and print number of args Signed-off-by: Oliver Tale-Yazdi * Add Test Signed-off-by: Oliver Tale-Yazdi * fix comments * Remove unneeded comment Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: parity-processbot <> --- bin/node/cli/tests/benchmark_pallet_works.rs | 49 +++++++++++++++++++ .../benchmarking-cli/src/pallet/command.rs | 27 ++++++---- 2 files changed, 67 insertions(+), 9 deletions(-) create mode 100644 bin/node/cli/tests/benchmark_pallet_works.rs diff --git a/bin/node/cli/tests/benchmark_pallet_works.rs b/bin/node/cli/tests/benchmark_pallet_works.rs new file mode 100644 index 0000000000000..bf29c0e308bcb --- /dev/null +++ b/bin/node/cli/tests/benchmark_pallet_works.rs @@ -0,0 +1,49 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use assert_cmd::cargo::cargo_bin; +use std::process::Command; + +pub mod common; + +/// `benchmark pallet` works for the different combinations of `steps` and `repeat`. +#[test] +fn benchmark_pallet_works() { + // Some invalid combinations: + benchmark_pallet(0, 10, false); + benchmark_pallet(1, 10, false); + // ... and some valid: + benchmark_pallet(2, 1, true); + benchmark_pallet(50, 20, true); + benchmark_pallet(20, 50, true); +} + +fn benchmark_pallet(steps: u32, repeat: u32, should_work: bool) { + let output = Command::new(cargo_bin("substrate")) + .args(["benchmark", "pallet", "--dev"]) + // Use the `addition` benchmark since is the fastest. + .args(["--pallet", "frame-benchmarking", "--extrinsic", "addition"]) + .args(["--steps", &format!("{}", steps), "--repeat", &format!("{}", repeat)]) + .output() + .unwrap(); + + if output.status.success() != should_work { + let log = String::from_utf8_lossy(&output.stderr).to_string(); + panic!("Test failed:\n{}", log); + } +} diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index fae5a4494cdc4..fb6f1393650ad 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -236,14 +236,22 @@ impl PalletCmd { let lowest = self.lowest_range_values.get(idx).cloned().unwrap_or(*low); let highest = self.highest_range_values.get(idx).cloned().unwrap_or(*high); - let diff = highest - lowest; + let diff = + highest.checked_sub(lowest).ok_or("`low` cannot be higher than `high`")?; - // Create up to `STEPS` steps for that component between high and low. - let step_size = (diff / self.steps).max(1); - let num_of_steps = diff / step_size + 1; - for s in 0..num_of_steps { + // The slope logic needs at least two points + // to compute a slope. + if self.steps < 2 { + return Err("`steps` must be at least 2.".into()) + } + + let step_size = (diff as f32 / (self.steps - 1) as f32).max(0.0); + + for s in 0..self.steps { // This is the value we will be testing for component `name` - let component_value = lowest + step_size * s; + let component_value = ((lowest as f32 + step_size * s as f32) as u32) + .min(highest) + .max(lowest); // Select the max value for all the other components. let c: Vec<(BenchmarkParameter, u32)> = components @@ -364,13 +372,14 @@ impl PalletCmd { if elapsed >= time::Duration::from_secs(5) { timer = time::SystemTime::now(); log::info!( - "Running Benchmark: {}.{} {}/{} {}/{}", + "Running Benchmark: {}.{}({} args) {}/{} {}/{}", String::from_utf8(pallet.clone()) .expect("Encoded from String; qed"), String::from_utf8(extrinsic.clone()) .expect("Encoded from String; qed"), - s + 1, // s starts at 0. todo show step - self.steps, + components.len(), + s + 1, // s starts at 0. + all_components.len(), r + 1, self.external_repeat, ); From 028ee356720d9b8de9995acae93de68af4ef8ece Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 2 Aug 2022 15:01:15 +0200 Subject: [PATCH 095/135] [ci] chores: remove cargo install nextest (#11961) --- scripts/ci/gitlab/pipeline/test.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index b2daf5bada24f..7a9f57e9a7734 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -213,8 +213,6 @@ test-linux-stable: parallel: 3 script: - rusty-cachier snapshot create - # TODO: remove when current paritytech/ci-linux:staging (with rust stable 1.62) is switched to production - - time cargo install cargo-nextest # this job runs all tests in former runtime-benchmarks, frame-staking and wasmtime tests # tests are partitioned by nextest and executed in parallel on $CI_NODE_TOTAL runners # node-cli is excluded until https://github.com/paritytech/substrate/issues/11321 fixed @@ -373,8 +371,6 @@ test-wasmer-sandbox: parallel: 3 script: - rusty-cachier snapshot create - # TODO: remove when current paritytech/ci-linux:staging (with rust stable 1.62) is switched to production - - cargo install cargo-nextest - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" - time cargo nextest run --release --features runtime-benchmarks,wasmer-sandbox,disable-ui-tests --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} - if [ ${CI_NODE_INDEX} == 1 ]; then rusty-cachier cache upload; fi From 43427bd3dd9b04e1830c497df47042a7f5a7aa6e Mon Sep 17 00:00:00 2001 From: Nikos Kontakis Date: Tue, 2 Aug 2022 17:25:52 +0200 Subject: [PATCH 096/135] Rename `node-runtime` to `node-kitchensink-runtime` (#11930) * Rename node=runtime to kithensink-runtime * Undo md formatting --- Cargo.lock | 198 +++++++++--------- bin/node/bench/Cargo.toml | 2 +- bin/node/bench/src/import.rs | 4 +- bin/node/cli/Cargo.toml | 6 +- bin/node/cli/benches/block_production.rs | 6 +- bin/node/cli/benches/transaction_pool.rs | 2 +- bin/node/cli/src/benchmarking.rs | 2 +- bin/node/cli/src/chain_spec.rs | 6 +- bin/node/cli/src/command.rs | 4 +- bin/node/cli/src/service.rs | 42 ++-- bin/node/executor/Cargo.toml | 2 +- bin/node/executor/benches/bench.rs | 12 +- bin/node/executor/src/lib.rs | 4 +- bin/node/executor/tests/basic.rs | 6 +- bin/node/executor/tests/common.rs | 12 +- bin/node/executor/tests/fees.rs | 4 +- bin/node/executor/tests/submit_transaction.rs | 2 +- bin/node/runtime/Cargo.toml | 2 +- bin/node/testing/Cargo.toml | 2 +- bin/node/testing/src/bench.rs | 15 +- bin/node/testing/src/client.rs | 8 +- bin/node/testing/src/genesis.rs | 2 +- bin/node/testing/src/keyring.rs | 2 +- bin/utils/subkey/src/main.rs | 2 +- docs/README.adoc | 2 +- scripts/ci/gitlab/pipeline/test.yml | 6 +- .../generate-bags/node-runtime/Cargo.toml | 4 +- .../generate-bags/node-runtime/src/main.rs | 7 +- utils/wasm-builder/src/lib.rs | 2 +- 29 files changed, 188 insertions(+), 180 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b76f61df7912..733c6d73d2709 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3299,6 +3299,100 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "kitchensink-runtime" +version = "3.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "node-primitives", + "pallet-alliance", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-bags-list", + "pallet-balances", + "pallet-bounties", + "pallet-child-bounties", + "pallet-collective", + "pallet-contracts", + "pallet-contracts-primitives", + "pallet-contracts-rpc-runtime-api", + "pallet-conviction-voting", + "pallet-democracy", + "pallet-election-provider-multi-phase", + "pallet-election-provider-support-benchmarking", + "pallet-elections-phragmen", + "pallet-gilt", + "pallet-grandpa", + "pallet-identity", + "pallet-im-online", + "pallet-indices", + "pallet-lottery", + "pallet-membership", + "pallet-mmr", + "pallet-multisig", + "pallet-nomination-pools", + "pallet-nomination-pools-benchmarking", + "pallet-nomination-pools-runtime-api", + "pallet-offences", + "pallet-offences-benchmarking", + "pallet-preimage", + "pallet-proxy", + "pallet-randomness-collective-flip", + "pallet-ranked-collective", + "pallet-recovery", + "pallet-referenda", + "pallet-remark", + "pallet-scheduler", + "pallet-session", + "pallet-session-benchmarking", + "pallet-society", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-state-trie-migration", + "pallet-sudo", + "pallet-timestamp", + "pallet-tips", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-transaction-storage", + "pallet-treasury", + "pallet-uniques", + "pallet-utility", + "pallet-vesting", + "pallet-whitelist", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-authority-discovery", + "sp-block-builder", + "sp-consensus-babe", + "sp-core", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-sandbox", + "sp-session", + "sp-staking", + "sp-std", + "sp-transaction-pool", + "sp-version", + "static_assertions", + "substrate-wasm-builder", +] + [[package]] name = "kv-log-macro" version = "1.0.7" @@ -4511,12 +4605,12 @@ dependencies = [ "futures", "hash-db", "hex", + "kitchensink-runtime", "kvdb", "kvdb-rocksdb", "lazy_static", "log", "node-primitives", - "node-runtime", "node-testing", "parity-db", "parity-util-mem", @@ -4553,13 +4647,13 @@ dependencies = [ "futures", "hex-literal", "jsonrpsee", + "kitchensink-runtime", "log", "nix 0.23.1", "node-executor", "node-inspect", "node-primitives", "node-rpc", - "node-runtime", "pallet-asset-tx-payment", "pallet-balances", "pallet-im-online", @@ -4631,8 +4725,8 @@ dependencies = [ "frame-support", "frame-system", "futures", + "kitchensink-runtime", "node-primitives", - "node-runtime", "node-testing", "pallet-balances", "pallet-contracts", @@ -4716,107 +4810,13 @@ dependencies = [ "substrate-state-trie-migration-rpc", ] -[[package]] -name = "node-runtime" -version = "3.0.0-dev" -dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "node-primitives", - "pallet-alliance", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-authority-discovery", - "pallet-authorship", - "pallet-babe", - "pallet-bags-list", - "pallet-balances", - "pallet-bounties", - "pallet-child-bounties", - "pallet-collective", - "pallet-contracts", - "pallet-contracts-primitives", - "pallet-contracts-rpc-runtime-api", - "pallet-conviction-voting", - "pallet-democracy", - "pallet-election-provider-multi-phase", - "pallet-election-provider-support-benchmarking", - "pallet-elections-phragmen", - "pallet-gilt", - "pallet-grandpa", - "pallet-identity", - "pallet-im-online", - "pallet-indices", - "pallet-lottery", - "pallet-membership", - "pallet-mmr", - "pallet-multisig", - "pallet-nomination-pools", - "pallet-nomination-pools-benchmarking", - "pallet-nomination-pools-runtime-api", - "pallet-offences", - "pallet-offences-benchmarking", - "pallet-preimage", - "pallet-proxy", - "pallet-randomness-collective-flip", - "pallet-ranked-collective", - "pallet-recovery", - "pallet-referenda", - "pallet-remark", - "pallet-scheduler", - "pallet-session", - "pallet-session-benchmarking", - "pallet-society", - "pallet-staking", - "pallet-staking-reward-curve", - "pallet-state-trie-migration", - "pallet-sudo", - "pallet-timestamp", - "pallet-tips", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-transaction-storage", - "pallet-treasury", - "pallet-uniques", - "pallet-utility", - "pallet-vesting", - "pallet-whitelist", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-authority-discovery", - "sp-block-builder", - "sp-consensus-babe", - "sp-core", - "sp-inherents", - "sp-io", - "sp-offchain", - "sp-runtime", - "sp-sandbox", - "sp-session", - "sp-staking", - "sp-std", - "sp-transaction-pool", - "sp-version", - "static_assertions", - "substrate-wasm-builder", -] - [[package]] name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ "clap 3.1.18", "generate-bags", - "node-runtime", + "kitchensink-runtime", ] [[package]] @@ -4905,10 +4905,10 @@ dependencies = [ "frame-system", "fs_extra", "futures", + "kitchensink-runtime", "log", "node-executor", "node-primitives", - "node-runtime", "pallet-asset-tx-payment", "pallet-transaction-payment", "parity-scale-codec", diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 90c325b8e5b32..4d090c71a72e9 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -13,7 +13,7 @@ clap = { version = "3.1.18", features = ["derive"] } log = "0.4.17" node-primitives = { version = "2.0.0", path = "../primitives" } node-testing = { version = "3.0.0-dev", path = "../testing" } -node-runtime = { version = "3.0.0-dev", path = "../runtime" } +kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api/" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machine" } diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index b9229fbd5331d..47f630eb68700 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -148,13 +148,13 @@ impl core::Benchmark for ImportBenchmark { // the transaction fee into the treasury // - extrinsic success assert_eq!( - node_runtime::System::events().len(), + kitchensink_runtime::System::events().len(), (self.block.extrinsics.len() - 1) * 8 + 1, ); }, BlockType::Noop => { assert_eq!( - node_runtime::System::events().len(), + kitchensink_runtime::System::events().len(), // should be 2 per signed extrinsic + 1 per unsigned // we have 1 unsigned and the rest are signed in the block // those 2 events per signed are: diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index b2e24f0b33189..9dde225c0d6fb 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -87,7 +87,7 @@ pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transa pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../../frame/im-online" } # node-specific dependencies -node-runtime = { version = "3.0.0-dev", path = "../runtime" } +kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } node-rpc = { version = "3.0.0-dev", path = "../rpc" } node-primitives = { version = "2.0.0", path = "../primitives" } node-executor = { version = "3.0.0-dev", path = "../executor" } @@ -159,10 +159,10 @@ cli = [ "substrate-build-script-utils", "try-runtime-cli", ] -runtime-benchmarks = ["node-runtime/runtime-benchmarks", "frame-benchmarking-cli"] +runtime-benchmarks = ["kitchensink-runtime/runtime-benchmarks", "frame-benchmarking-cli"] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["node-runtime/try-runtime", "try-runtime-cli"] +try-runtime = ["kitchensink-runtime/try-runtime", "try-runtime-cli"] [[bench]] name = "transaction_pool" diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index ad16ba8e4072b..6d269ccaac271 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -18,8 +18,8 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; +use kitchensink_runtime::{constants::currency::*, BalancesCall}; use node_cli::service::{create_extrinsic, FullClient}; -use node_runtime::{constants::currency::*, BalancesCall}; use sc_block_builder::{BlockBuilderProvider, BuiltBlock, RecordProof}; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_consensus::{ @@ -121,9 +121,9 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { } fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { - node_runtime::UncheckedExtrinsic { + kitchensink_runtime::UncheckedExtrinsic { signature: None, - function: node_runtime::Call::Timestamp(pallet_timestamp::Call::set { now }), + function: kitchensink_runtime::Call::Timestamp(pallet_timestamp::Call::set { now }), } .into() } diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index d88c3a65b6138..580a10d6a6678 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -20,9 +20,9 @@ use std::time::Duration; use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; use futures::{future, StreamExt}; +use kitchensink_runtime::{constants::currency::*, BalancesCall, SudoCall}; use node_cli::service::{create_extrinsic, fetch_nonce, FullClient, TransactionPool}; use node_primitives::AccountId; -use node_runtime::{constants::currency::*, BalancesCall, SudoCall}; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::{ config::{ diff --git a/bin/node/cli/src/benchmarking.rs b/bin/node/cli/src/benchmarking.rs index 52657016c046a..19bd1660a4dd9 100644 --- a/bin/node/cli/src/benchmarking.rs +++ b/bin/node/cli/src/benchmarking.rs @@ -22,8 +22,8 @@ use crate::service::{create_extrinsic, FullClient}; +use kitchensink_runtime::{BalancesCall, SystemCall}; use node_primitives::{AccountId, Balance}; -use node_runtime::{BalancesCall, SystemCall}; use sc_cli::Result; use sp_inherents::{InherentData, InherentDataProvider}; use sp_keyring::Sr25519Keyring; diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 47c3634aa00df..77e2f73dd6e18 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -20,7 +20,7 @@ use grandpa_primitives::AuthorityId as GrandpaId; use hex_literal::hex; -use node_runtime::{ +use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, Block, CouncilConfig, DemocracyConfig, ElectionsConfig, GrandpaConfig, ImOnlineConfig, IndicesConfig, MaxNominations, NominationPoolsConfig, SessionConfig, @@ -40,8 +40,8 @@ use sp_runtime::{ Perbill, }; +pub use kitchensink_runtime::GenesisConfig; pub use node_primitives::{AccountId, Balance, Signature}; -pub use node_runtime::GenesisConfig; type AccountPublic = ::Signer; @@ -343,7 +343,7 @@ pub fn testnet_genesis( sudo: SudoConfig { key: Some(root_key) }, babe: BabeConfig { authorities: vec![], - epoch_config: Some(node_runtime::BABE_GENESIS_EPOCH_CONFIG), + epoch_config: Some(kitchensink_runtime::BABE_GENESIS_EPOCH_CONFIG), }, im_online: ImOnlineConfig { keys: vec![] }, authority_discovery: AuthorityDiscoveryConfig { keys: vec![] }, diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index df3d5a891e2ab..85e5415dbe139 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -23,9 +23,9 @@ use crate::{ Cli, Subcommand, }; use frame_benchmarking_cli::*; +use kitchensink_runtime::{ExistentialDeposit, RuntimeApi}; use node_executor::ExecutorDispatch; use node_primitives::Block; -use node_runtime::{ExistentialDeposit, RuntimeApi}; use sc_cli::{ChainSpec, Result, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; use sp_keyring::Sr25519Keyring; @@ -75,7 +75,7 @@ impl SubstrateCli for Cli { } fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { - &node_runtime::VERSION + &kitchensink_runtime::VERSION } } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index bff4be88002fb..e0644f462cf20 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -23,9 +23,9 @@ use codec::Encode; use frame_system_rpc_runtime_api::AccountNonceApi; use futures::prelude::*; +use kitchensink_runtime::RuntimeApi; use node_executor::ExecutorDispatch; use node_primitives::Block; -use node_runtime::RuntimeApi; use sc_client_api::{BlockBackend, ExecutorProvider}; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; @@ -68,41 +68,43 @@ pub fn fetch_nonce(client: &FullClient, account: sp_core::sr25519::Pair) -> u32 pub fn create_extrinsic( client: &FullClient, sender: sp_core::sr25519::Pair, - function: impl Into, + function: impl Into, nonce: Option, -) -> node_runtime::UncheckedExtrinsic { +) -> kitchensink_runtime::UncheckedExtrinsic { let function = function.into(); let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); let best_hash = client.chain_info().best_hash; let best_block = client.chain_info().best_number; let nonce = nonce.unwrap_or_else(|| fetch_nonce(client, sender.clone())); - let period = node_runtime::BlockHashCount::get() + let period = kitchensink_runtime::BlockHashCount::get() .checked_next_power_of_two() .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: node_runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal( + let extra: kitchensink_runtime::SignedExtra = ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal( period, best_block.saturated_into(), )), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::::from(tip, None), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_asset_tx_payment::ChargeAssetTxPayment::::from( + tip, None, + ), ); - let raw_payload = node_runtime::SignedPayload::from_raw( + let raw_payload = kitchensink_runtime::SignedPayload::from_raw( function.clone(), extra.clone(), ( (), - node_runtime::VERSION.spec_version, - node_runtime::VERSION.transaction_version, + kitchensink_runtime::VERSION.spec_version, + kitchensink_runtime::VERSION.transaction_version, genesis_hash, best_hash, (), @@ -112,10 +114,10 @@ pub fn create_extrinsic( ); let signature = raw_payload.using_encoded(|e| sender.sign(e)); - node_runtime::UncheckedExtrinsic::new_signed( + kitchensink_runtime::UncheckedExtrinsic::new_signed( function, sp_runtime::AccountId32::from(sender.public()).into(), - node_runtime::Signature::Sr25519(signature), + kitchensink_runtime::Signature::Sr25519(signature), extra, ) } @@ -565,11 +567,11 @@ pub fn new_full( mod tests { use crate::service::{new_full_base, NewFullBase}; use codec::Encode; - use node_primitives::{Block, DigestItem, Signature}; - use node_runtime::{ + use kitchensink_runtime::{ constants::{currency::CENTS, time::SLOT_DURATION}, Address, BalancesCall, Call, UncheckedExtrinsic, }; + use node_primitives::{Block, DigestItem, Signature}; use sc_client_api::BlockBackend; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_consensus_babe::{BabeIntermediate, CompatibleDigestItem, INTERMEDIATE_KEY}; diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 6143ea3bd8c42..71865783da8ac 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -16,7 +16,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } scale-info = { version = "2.1.1", features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarking" } node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "3.0.0-dev", path = "../runtime" } +kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-keystore = { version = "0.12.0", path = "../../../primitives/keystore" } diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 61e2d1b053012..a1d31a5a966db 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -18,12 +18,12 @@ use codec::{Decode, Encode}; use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use frame_support::Hashable; -use node_executor::ExecutorDispatch; -use node_primitives::{BlockNumber, Hash}; -use node_runtime::{ +use kitchensink_runtime::{ constants::currency::*, Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, UncheckedExtrinsic, }; +use node_executor::ExecutorDispatch; +use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; #[cfg(feature = "wasmtime")] use sc_executor::WasmtimeInstantiationStrategy; @@ -41,7 +41,7 @@ criterion_main!(benches); /// The wasm runtime code. pub fn compact_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY.expect( + kitchensink_runtime::WASM_BINARY.expect( "Development wasm binary is not available. Testing is only supported with the flag \ disabled.", ) @@ -49,9 +49,9 @@ pub fn compact_code_unwrap() -> &'static [u8] { const GENESIS_HASH: [u8; 32] = [69u8; 32]; -const TRANSACTION_VERSION: u32 = node_runtime::VERSION.transaction_version; +const TRANSACTION_VERSION: u32 = kitchensink_runtime::VERSION.transaction_version; -const SPEC_VERSION: u32 = node_runtime::VERSION.spec_version; +const SPEC_VERSION: u32 = kitchensink_runtime::VERSION.spec_version; const HEAP_PAGES: u64 = 20; diff --git a/bin/node/executor/src/lib.rs b/bin/node/executor/src/lib.rs index 9f87c7d12623c..c4edf5ad22f47 100644 --- a/bin/node/executor/src/lib.rs +++ b/bin/node/executor/src/lib.rs @@ -28,10 +28,10 @@ impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { - node_runtime::api::dispatch(method, data) + kitchensink_runtime::api::dispatch(method, data) } fn native_version() -> sc_executor::NativeVersion { - node_runtime::native_version() + kitchensink_runtime::native_version() } } diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 31a9bd0a90496..27e848a281097 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -26,12 +26,12 @@ use sp_runtime::{ traits::Hash as HashT, transaction_validity::InvalidTransaction, ApplyExtrinsicResult, }; -use node_primitives::{Balance, Hash}; -use node_runtime::{ +use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, Balances, Call, CheckedExtrinsic, Event, Header, Runtime, System, TransactionPayment, UncheckedExtrinsic, }; +use node_primitives::{Balance, Hash}; use node_testing::keyring::*; use wat; @@ -44,7 +44,7 @@ use self::common::{sign, *}; /// have to execute provided wasm code instead of the native equivalent. This trick is used to /// test code paths that differ between native and wasm versions. pub fn bloaty_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY_BLOATY.expect( + kitchensink_runtime::WASM_BINARY_BLOATY.expect( "Development wasm binary is not available. \ Testing is only supported with the flag disabled.", ) diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index a2bb91056f474..407a1e09f8efb 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -35,12 +35,12 @@ use sp_runtime::{ }; use sp_state_machine::TestExternalities as CoreTestExternalities; -use node_executor::ExecutorDispatch; -use node_primitives::{BlockNumber, Hash}; -use node_runtime::{ +use kitchensink_runtime::{ constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, Runtime, UncheckedExtrinsic, }; +use node_executor::ExecutorDispatch; +use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; use sp_externalities::Externalities; @@ -69,7 +69,7 @@ impl AppCrypto for TestAuthorityId { /// making the binary slimmer. There is a convention to use compact version of the runtime /// as canonical. pub fn compact_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY.expect( + kitchensink_runtime::WASM_BINARY.expect( "Development wasm binary is not available. Testing is only supported with the flag \ disabled.", ) @@ -77,9 +77,9 @@ pub fn compact_code_unwrap() -> &'static [u8] { pub const GENESIS_HASH: [u8; 32] = [69u8; 32]; -pub const SPEC_VERSION: u32 = node_runtime::VERSION.spec_version; +pub const SPEC_VERSION: u32 = kitchensink_runtime::VERSION.spec_version; -pub const TRANSACTION_VERSION: u32 = node_runtime::VERSION.transaction_version; +pub const TRANSACTION_VERSION: u32 = kitchensink_runtime::VERSION.transaction_version; pub type TestExternalities = CoreTestExternalities; diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index e1550071d3006..008ed5f53927b 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -20,11 +20,11 @@ use frame_support::{ traits::Currency, weights::{constants::ExtrinsicBaseWeight, GetDispatchInfo, IdentityFee, WeightToFee}, }; -use node_primitives::Balance; -use node_runtime::{ +use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, Balances, Call, CheckedExtrinsic, Multiplier, Runtime, TransactionByteFee, TransactionPayment, }; +use node_primitives::Balance; use node_testing::keyring::*; use sp_core::NeverNativeValue; use sp_runtime::{traits::One, Perbill}; diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index 7df13a577006e..be43f3c78674f 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -17,7 +17,7 @@ use codec::Decode; use frame_system::offchain::{SendSignedTransaction, Signer, SubmitTransaction}; -use node_runtime::{Executive, Indices, Runtime, UncheckedExtrinsic}; +use kitchensink_runtime::{Executive, Indices, Runtime, UncheckedExtrinsic}; use sp_application_crypto::AppKey; use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt}; use sp_keyring::sr25519::Keyring::Alice; diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index d3138ce51de9c..10b15b6ec554d 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "node-runtime" +name = "kitchensink-runtime" version = "3.0.0-dev" authors = ["Parity Technologies "] edition = "2021" diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 7caf10366b48c..ed81301e45189 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -21,7 +21,7 @@ tempfile = "3.1.0" frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } node-executor = { version = "3.0.0-dev", path = "../executor" } node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "3.0.0-dev", path = "../runtime" } +kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-tx-payment" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 00ce7f64bc3f0..18e979b95737f 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -34,11 +34,11 @@ use crate::{ }; use codec::{Decode, Encode}; use futures::executor; -use node_primitives::Block; -use node_runtime::{ +use kitchensink_runtime::{ constants::currency::DOLLARS, AccountId, BalancesCall, Call, CheckedExtrinsic, MinimumPeriod, Signature, SystemCall, UncheckedExtrinsic, }; +use node_primitives::Block; use sc_block_builder::BlockBuilderProvider; use sc_client_api::{ execution_extensions::{ExecutionExtensions, ExecutionStrategies}, @@ -304,20 +304,21 @@ impl<'a> Iterator for BlockContentIterator<'a> { CheckedExtrinsic { signed: Some(( sender, - signed_extra(0, node_runtime::ExistentialDeposit::get() + 1), + signed_extra(0, kitchensink_runtime::ExistentialDeposit::get() + 1), )), function: match self.content.block_type { BlockType::RandomTransfersKeepAlive => Call::Balances(BalancesCall::transfer_keep_alive { dest: sp_runtime::MultiAddress::Id(receiver), - value: node_runtime::ExistentialDeposit::get() + 1, + value: kitchensink_runtime::ExistentialDeposit::get() + 1, }), BlockType::RandomTransfersReaping => { Call::Balances(BalancesCall::transfer { dest: sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential // deposit so that we kill the sender account. - value: 100 * DOLLARS - (node_runtime::ExistentialDeposit::get() - 1), + value: 100 * DOLLARS - + (kitchensink_runtime::ExistentialDeposit::get() - 1), }) }, BlockType::Noop => Call::System(SystemCall::remark { remark: Vec::new() }), @@ -592,9 +593,9 @@ impl BenchKeyring { } /// Generate genesis with accounts from this keyring endowed with some balance. - pub fn generate_genesis(&self) -> node_runtime::GenesisConfig { + pub fn generate_genesis(&self) -> kitchensink_runtime::GenesisConfig { crate::genesis::config_endowed( - Some(node_runtime::wasm_binary_unwrap()), + Some(kitchensink_runtime::wasm_binary_unwrap()), self.collect_account_ids(), ) } diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index 8cb98511098f1..590304bdd52a5 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -16,14 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Utilities to build a `TestClient` for `node-runtime`. +//! Utilities to build a `TestClient` for `kitchensink-runtime`. use sc_service::client; use sp_runtime::BuildStorage; /// Re-export test-client utilities. pub use substrate_test_client::*; -/// Call executor for `node-runtime` `TestClient`. +/// Call executor for `kitchensink-runtime` `TestClient`. pub type ExecutorDispatch = sc_executor::NativeElseWasmExecutor; /// Default backend type. @@ -34,10 +34,10 @@ pub type Client = client::Client< Backend, client::LocalCallExecutor, node_primitives::Block, - node_runtime::RuntimeApi, + kitchensink_runtime::RuntimeApi, >; -/// Transaction for node-runtime. +/// Transaction for kitchensink-runtime. pub type Transaction = sc_client_api::backend::TransactionFor; /// Genesis configuration parameters for `TestClient`. diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index fbd28c5af0298..1eb7318db52da 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -19,7 +19,7 @@ //! Genesis Configuration. use crate::keyring::*; -use node_runtime::{ +use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, AccountId, BabeConfig, BalancesConfig, GenesisConfig, GrandpaConfig, IndicesConfig, SessionConfig, SocietyConfig, StakerStatus, StakingConfig, SystemConfig, BABE_GENESIS_EPOCH_CONFIG, diff --git a/bin/node/testing/src/keyring.rs b/bin/node/testing/src/keyring.rs index 41dd28bb89988..88d9dc56b0532 100644 --- a/bin/node/testing/src/keyring.rs +++ b/bin/node/testing/src/keyring.rs @@ -19,8 +19,8 @@ //! Test accounts. use codec::Encode; +use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; use node_primitives::{AccountId, Balance, Index}; -use node_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; use sp_keyring::{AccountKeyring, Ed25519Keyring, Sr25519Keyring}; use sp_runtime::generic::Era; diff --git a/bin/utils/subkey/src/main.rs b/bin/utils/subkey/src/main.rs index d85c6dbe9d048..271388549bcf9 100644 --- a/bin/utils/subkey/src/main.rs +++ b/bin/utils/subkey/src/main.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Subkey utility, based on node_runtime. +//! Subkey utility, based on kitchensink_runtime. fn main() -> Result<(), sc_cli::Error> { subkey::run() diff --git a/docs/README.adoc b/docs/README.adoc index 0b82f0ed82a13..5d7b0b52c5250 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -445,7 +445,7 @@ pallet-assets, pallet-balances, pallet-consensus, pallet-contracts, pallet-counc frame-executive, pallet-session, pallet-staking, pallet-timestamp, pallet-treasury * Node [source, shell] -node-cli, node-consensus, node-executor, node-network, node-primitives, node-runtime +node-cli, node-consensus, node-executor, node-network, node-primitives, kitchensink-runtime * Subkey [source, shell] subkey diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 7a9f57e9a7734..ccf8338236d0a 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -181,13 +181,13 @@ test-deterministic-wasm: script: - rusty-cachier snapshot create # build runtime - - cargo build --verbose --release -p node-runtime + - cargo build --verbose --release -p kitchensink-runtime # make checksum - - sha256sum $CARGO_TARGET_DIR/release/wbuild/node-runtime/target/wasm32-unknown-unknown/release/node_runtime.wasm > checksum.sha256 + - sha256sum $CARGO_TARGET_DIR/release/wbuild/kitchensink-runtime/target/wasm32-unknown-unknown/release/kitchensink_runtime.wasm > checksum.sha256 # clean up - rm -rf $CARGO_TARGET_DIR/release/wbuild # build again - - cargo build --verbose --release -p node-runtime + - cargo build --verbose --release -p kitchensink-runtime # confirm checksum - sha256sum -c ./checksum.sha256 # clean up again, don't put release binaries into the cache diff --git a/utils/frame/generate-bags/node-runtime/Cargo.toml b/utils/frame/generate-bags/node-runtime/Cargo.toml index c5f3b1998fa97..5af7dd78a08e8 100644 --- a/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" -description = "Bag threshold generation script for pallet-bag-list and node-runtime." +description = "Bag threshold generation script for pallet-bag-list and kitchensink-runtime." readme = "README.md" [dependencies] -node-runtime = { version = "3.0.0-dev", path = "../../../../bin/node/runtime" } +kitchensink-runtime = { version = "3.0.0-dev", path = "../../../../bin/node/runtime" } generate-bags = { version = "4.0.0-dev", path = "../" } # third-party diff --git a/utils/frame/generate-bags/node-runtime/src/main.rs b/utils/frame/generate-bags/node-runtime/src/main.rs index 12bcf8d28cf2b..5ea1262d95d34 100644 --- a/utils/frame/generate-bags/node-runtime/src/main.rs +++ b/utils/frame/generate-bags/node-runtime/src/main.rs @@ -43,5 +43,10 @@ struct Opt { fn main() -> Result<(), std::io::Error> { let Opt { n_bags, output, total_issuance, minimum_balance } = Opt::parse(); - generate_thresholds::(n_bags, &output, total_issuance, minimum_balance) + generate_thresholds::( + n_bags, + &output, + total_issuance, + minimum_balance, + ) } diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index 919290655368b..fc86a06170a50 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -92,7 +92,7 @@ //! //! Each project can be skipped individually by using the environment variable //! `SKIP_PROJECT_NAME_WASM_BUILD`. Where `PROJECT_NAME` needs to be replaced by the name of the -//! cargo project, e.g. `node-runtime` will be `NODE_RUNTIME`. +//! cargo project, e.g. `kitchensink-runtime` will be `NODE_RUNTIME`. //! //! ## Prerequisites: //! From 9b0156931330532ff5d5284c6f84ffbad1af8010 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 2 Aug 2022 20:00:18 +0300 Subject: [PATCH 097/135] rpc: Update jsonrpsee v0.15.1 (#11939) * Bump jsonrpsee to v0.15.1 Signed-off-by: Alexandru Vasile * Update cargo.lock Signed-off-by: Alexandru Vasile * rpc-servers: Adjust RpcMiddleware to WS and HTTP traits Signed-off-by: Alexandru Vasile * rpc/author: Use `SubscriptionSink` Signed-off-by: Alexandru Vasile * rpc/chain: Use `SubscriptionSink` Signed-off-by: Alexandru Vasile * rpc/state: Use `SubscriptionSink` Signed-off-by: Alexandru Vasile * rpc/finality-grandpa: Use `SubscriptionSink` Signed-off-by: Alexandru Vasile * rpc/beefy: Use `SubscriptionSink` Signed-off-by: Alexandru Vasile * client: Extract RPC string result from queries Signed-off-by: Alexandru Vasile * Apply rust-fmt Signed-off-by: Alexandru Vasile * Fix warnings Signed-off-by: Alexandru Vasile * Fix testing Signed-off-by: Alexandru Vasile * rpc/tests: Remove trailing comma Signed-off-by: Alexandru Vasile * rpc: Use `SubscriptionResult` for implementations Signed-off-by: Alexandru Vasile * rpc: Remove comment Signed-off-by: Alexandru Vasile * rpc: Delegate middleware calls to `RpcMiddleware` Signed-off-by: Alexandru Vasile * rpc: Remove comment Signed-off-by: Alexandru Vasile * Revert Cargo.lock Signed-off-by: Alexandru Vasile * Update Cargo.lock with minimal changes Signed-off-by: Alexandru Vasile * rpc: Update imports for `SubscriptionResult` Signed-off-by: Alexandru Vasile * Apply cargo fmt Signed-off-by: Alexandru Vasile * rpc/tests: Submit raw json requests to validate DenyUnsafe Signed-off-by: Alexandru Vasile --- Cargo.lock | 52 ++++++------ bin/node-template/node/Cargo.toml | 2 +- bin/node/cli/Cargo.toml | 2 +- bin/node/rpc/Cargo.toml | 2 +- client/beefy/rpc/Cargo.toml | 2 +- client/beefy/rpc/src/lib.rs | 21 +++-- client/consensus/babe/rpc/Cargo.toml | 2 +- client/consensus/babe/rpc/src/lib.rs | 4 +- client/consensus/manual-seal/Cargo.toml | 2 +- client/finality-grandpa/rpc/Cargo.toml | 2 +- client/finality-grandpa/rpc/src/lib.rs | 20 ++--- client/rpc-api/Cargo.toml | 2 +- client/rpc-servers/Cargo.toml | 2 +- client/rpc-servers/src/middleware.rs | 79 +++++++++++++++---- client/rpc/Cargo.toml | 2 +- client/rpc/src/author/mod.rs | 17 ++-- client/rpc/src/chain/chain_full.rs | 14 ++-- client/rpc/src/chain/mod.rs | 23 +++--- client/rpc/src/dev/tests.rs | 19 +++-- client/rpc/src/state/mod.rs | 23 ++++-- client/rpc/src/state/state_full.rs | 18 ++--- client/service/Cargo.toml | 2 +- client/service/src/lib.rs | 5 +- client/sync-state-rpc/Cargo.toml | 2 +- frame/contracts/rpc/Cargo.toml | 2 +- frame/merkle-mountain-range/rpc/Cargo.toml | 2 +- frame/transaction-payment/rpc/Cargo.toml | 2 +- utils/frame/remote-externalities/Cargo.toml | 2 +- .../rpc/state-trie-migration-rpc/Cargo.toml | 2 +- utils/frame/rpc/support/Cargo.toml | 4 +- utils/frame/rpc/system/Cargo.toml | 2 +- utils/frame/try-runtime/cli/Cargo.toml | 2 +- 32 files changed, 201 insertions(+), 136 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 733c6d73d2709..ec5c5267142a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2877,13 +2877,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.3" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 0.4.8", + "itoa 1.0.1", ] [[package]] @@ -3134,9 +3134,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11e017217fcd18da0a25296d3693153dd19c8a6aadab330b3595285d075385d1" +checksum = "8bd0d559d5e679b1ab2f869b486a11182923863b1b3ee8b421763cdd707b783a" dependencies = [ "jsonrpsee-core", "jsonrpsee-http-server", @@ -3149,9 +3149,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce395539a14d3ad4ec1256fde105abd36a2da25d578a291cabe98f45adfdb111" +checksum = "8752740ecd374bcbf8b69f3e80b0327942df76f793f8d4e60d3355650c31fb74" dependencies = [ "futures-util", "http", @@ -3170,9 +3170,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16efcd4477de857d4a2195a45769b2fe9ebb54f3ef5a4221d3b014a4fe33ec0b" +checksum = "f3dc3e9cf2ba50b7b1d7d76a667619f82846caa39e8e8daa8a4962d74acaddca" dependencies = [ "anyhow", "arrayvec 0.7.2", @@ -3183,6 +3183,7 @@ dependencies = [ "futures-timer", "futures-util", "globset", + "http", "hyper", "jsonrpsee-types", "lazy_static", @@ -3195,14 +3196,15 @@ dependencies = [ "thiserror", "tokio", "tracing", + "tracing-futures", "unicase", ] [[package]] name = "jsonrpsee-http-server" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdd69efeb3ce2cba767f126872f4eeb4624038a29098e75d77608b2b4345ad03" +checksum = "03802f0373a38c2420c70b5144742d800b509e2937edc4afb116434f07120117" dependencies = [ "futures-channel", "futures-util", @@ -3213,13 +3215,14 @@ dependencies = [ "serde_json", "tokio", "tracing", + "tracing-futures", ] [[package]] name = "jsonrpsee-proc-macros" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "874cf3f6a027cebf36cae767feca9aa2e8a8f799880e49eb5540819fcbd8eada" +checksum = "bd67957d4280217247588ac86614ead007b301ca2fa9f19c19f880a536f029e3" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -3229,9 +3232,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcf76cd316f5d3ad48138085af1f45e2c58c98e02f0779783dbb034d43f7c86" +checksum = "e290bba767401b646812f608c099b922d8142603c9e73a50fb192d3ac86f4a0d" dependencies = [ "anyhow", "beef", @@ -3243,10 +3246,11 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee043cb5dd0d51d3eb93432e998d5bae797691a7b10ec4a325e036bcdb48c48a" +checksum = "6ee5feddd5188e62ac08fcf0e56478138e581509d4730f3f7be9b57dd402a4ff" dependencies = [ + "http", "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", @@ -3254,12 +3258,13 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-server" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2e4d266774a671f8def3794255b28eddd09b18d76e0b913fa439f34588c0a" +checksum = "d488ba74fb369e5ab68926feb75a483458b88e768d44319f37e4ecad283c7325" dependencies = [ "futures-channel", "futures-util", + "http", "jsonrpsee-core", "jsonrpsee-types", "serde_json", @@ -3268,6 +3273,7 @@ dependencies = [ "tokio-stream", "tokio-util", "tracing", + "tracing-futures", ] [[package]] @@ -10839,9 +10845,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.29" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" +checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if 1.0.0", "log", @@ -10852,9 +10858,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.18" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" +checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index c8e74ea9515ac..eeba198da8212 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -42,7 +42,7 @@ frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } # These dependencies are used for the node template's RPCs -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 9dde225c0d6fb..fe7c5332e2b45 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -37,7 +37,7 @@ crate-type = ["cdylib", "rlib"] clap = { version = "3.1.18", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.0.0" } serde = { version = "1.0.136", features = ["derive"] } -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } futures = "0.3.21" hex-literal = "0.3.4" log = "0.4.17" diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 547d0df7a8372..07b25085b9d10 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } node-primitives = { version = "2.0.0", path = "../primitives" } pallet-contracts-rpc = { version = "4.0.0-dev", path = "../../../frame/contracts/rpc/" } pallet-mmr-rpc = { version = "3.0.0", path = "../../../frame/merkle-mountain-range/rpc/" } diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml index 7d31aea3f6971..46ee7640d710a 100644 --- a/client/beefy/rpc/Cargo.toml +++ b/client/beefy/rpc/Cargo.toml @@ -11,7 +11,7 @@ homepage = "https://substrate.io" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } log = "0.4" parking_lot = "0.12.0" serde = { version = "1.0.136", features = ["derive"] } diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 13bca08d37429..91ff59324bd95 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -30,8 +30,8 @@ use futures::{task::SpawnError, FutureExt, StreamExt}; use jsonrpsee::{ core::{async_trait, Error as JsonRpseeError, RpcResult}, proc_macros::rpc, - types::{error::CallError, ErrorObject}, - PendingSubscription, + types::{error::CallError, ErrorObject, SubscriptionResult}, + SubscriptionSink, }; use log::warn; @@ -135,19 +135,18 @@ impl BeefyApiServer f where Block: BlockT, { - fn subscribe_justifications(&self, pending: PendingSubscription) { + fn subscribe_justifications(&self, mut sink: SubscriptionSink) -> SubscriptionResult { let stream = self .signed_commitment_stream .subscribe() .map(|sc| notification::EncodedSignedCommitment::new::(sc)); let fut = async move { - if let Some(mut sink) = pending.accept() { - sink.pipe_from_stream(stream).await; - } + sink.pipe_from_stream(stream).await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + Ok(()) } async fn latest_finalized(&self) -> RpcResult { @@ -197,9 +196,9 @@ mod tests { let (rpc, _) = setup_io_handler(); let request = r#"{"jsonrpc":"2.0","method":"beefy_getFinalizedHead","params":[],"id":1}"#; let expected_response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"BEEFY RPC endpoint not ready"},"id":1}"#.to_string(); - let (result, _) = rpc.raw_json_request(&request).await.unwrap(); + let (response, _) = rpc.raw_json_request(&request).await.unwrap(); - assert_eq!(expected_response, result,); + assert_eq!(expected_response, response.result); } #[tokio::test] @@ -229,8 +228,8 @@ mod tests { let deadline = std::time::Instant::now() + std::time::Duration::from_secs(2); while std::time::Instant::now() < deadline { let (response, _) = io.raw_json_request(request).await.expect("RPC requests work"); - if response != not_ready { - assert_eq!(response, expected); + if response.result != not_ready { + assert_eq!(response.result, expected); // Success return } @@ -260,7 +259,7 @@ mod tests { .unwrap(); let expected = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; - assert_eq!(response, expected); + assert_eq!(response.result, expected); } fn create_commitment() -> BeefySignedCommitment { diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 4c9350735d529..488036277d7b6 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } futures = "0.3.21" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index af19d410346e3..b000d38a44f02 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -262,7 +262,7 @@ mod tests { let (response, _) = api.raw_json_request(request).await.unwrap(); let expected = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[1,2,4],"secondary_vrf":[]}},"id":1}"#; - assert_eq!(&response, expected); + assert_eq!(&response.result, expected); } #[tokio::test] @@ -274,6 +274,6 @@ mod tests { let (response, _) = api.raw_json_request(request).await.unwrap(); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32601,"message":"RPC call is unsafe to be called externally"},"id":1}"#; - assert_eq!(&response, expected); + assert_eq!(&response.result, expected); } } diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 7aa2232178f22..83c156ef5667f 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } assert_matches = "1.3.0" async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0" } diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index f2d37da058f45..075179d3ceaf7 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" [dependencies] finality-grandpa = { version = "0.16.0", features = ["derive-codec"] } futures = "0.3.16" -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } log = "0.4.8" parity-scale-codec = { version = "3.0.0", features = ["derive"] } serde = { version = "1.0.105", features = ["derive"] } diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index 1cf23a18c794b..85df72de77b54 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -26,7 +26,8 @@ use std::sync::Arc; use jsonrpsee::{ core::{async_trait, RpcResult}, proc_macros::rpc, - PendingSubscription, + types::SubscriptionResult, + SubscriptionSink, }; mod error; @@ -102,7 +103,7 @@ where ReportedRoundStates::from(&self.authority_set, &self.voter_state).map_err(Into::into) } - fn subscribe_justifications(&self, pending: PendingSubscription) { + fn subscribe_justifications(&self, mut sink: SubscriptionSink) -> SubscriptionResult { let stream = self.justification_stream.subscribe().map( |x: sc_finality_grandpa::GrandpaJustification| { JustificationNotification::from(x) @@ -110,12 +111,11 @@ where ); let fut = async move { - if let Some(mut sink) = pending.accept() { - sink.pipe_from_stream(stream).await; - } + sink.pipe_from_stream(stream).await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + Ok(()) } async fn prove_finality( @@ -283,9 +283,9 @@ mod tests { let (rpc, _) = setup_io_handler(EmptyVoterState); let expected_response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"GRANDPA RPC endpoint not ready"},"id":0}"#.to_string(); let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":0}"#; - let (result, _) = rpc.raw_json_request(&request).await.unwrap(); + let (response, _) = rpc.raw_json_request(&request).await.unwrap(); - assert_eq!(expected_response, result,); + assert_eq!(expected_response, response.result); } #[tokio::test] @@ -306,8 +306,8 @@ mod tests { },\"id\":0}".to_string(); let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":0}"#; - let (result, _) = rpc.raw_json_request(&request).await.unwrap(); - assert_eq!(expected_response, result); + let (response, _) = rpc.raw_json_request(&request).await.unwrap(); + assert_eq!(expected_response, response.result); } #[tokio::test] @@ -328,7 +328,7 @@ mod tests { .unwrap(); let expected = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; - assert_eq!(response, expected); + assert_eq!(response.result, expected); } fn create_justification() -> GrandpaJustification { diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 3425ba2b245df..101d558663f9f 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -28,4 +28,4 @@ sp-rpc = { version = "6.0.0", path = "../../primitives/rpc" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } sp-version = { version = "5.0.0", path = "../../primitives/version" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index daaa955839045..8b40972527be8 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.21" -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } log = "0.4.17" serde_json = "1.0.79" tokio = { version = "1.17.0", features = ["parking_lot"] } diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index cb9918268f4de..1c0660fc3528d 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -18,11 +18,12 @@ //! RPC middlware to collect prometheus metrics on RPC calls. -use jsonrpsee::core::middleware::Middleware; +use jsonrpsee::core::middleware::{Headers, HttpMiddleware, MethodKind, Params, WsMiddleware}; use prometheus_endpoint::{ register, Counter, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, }; +use std::net::SocketAddr; /// Metrics for RPC middleware storing information about the number of requests started/completed, /// calls started/completed and their timings. @@ -134,30 +135,33 @@ impl RpcMiddleware { pub fn new(metrics: RpcMetrics, transport_label: &'static str) -> Self { Self { metrics, transport_label } } -} - -impl Middleware for RpcMiddleware { - type Instant = std::time::Instant; - - fn on_connect(&self) { - self.metrics.ws_sessions_opened.as_ref().map(|counter| counter.inc()); - } - fn on_request(&self) -> Self::Instant { + /// Called when a new JSON-RPC request comes to the server. + fn on_request(&self) -> std::time::Instant { let now = std::time::Instant::now(); self.metrics.requests_started.with_label_values(&[self.transport_label]).inc(); now } - fn on_call(&self, name: &str) { - log::trace!(target: "rpc_metrics", "[{}] on_call name={}", self.transport_label, name); + /// Called on each JSON-RPC method call, batch requests will trigger `on_call` multiple times. + fn on_call(&self, name: &str, params: Params, kind: MethodKind) { + log::trace!( + target: "rpc_metrics", + "[{}] on_call name={} params={:?} kind={}", + self.transport_label, + name, + params, + kind, + ); self.metrics .calls_started .with_label_values(&[self.transport_label, name]) .inc(); } - fn on_result(&self, name: &str, success: bool, started_at: Self::Instant) { + /// Called on each JSON-RPC method completion, batch requests will trigger `on_result` multiple + /// times. + fn on_result(&self, name: &str, success: bool, started_at: std::time::Instant) { let micros = started_at.elapsed().as_micros(); log::debug!( target: "rpc_metrics", @@ -183,12 +187,57 @@ impl Middleware for RpcMiddleware { .inc(); } - fn on_response(&self, started_at: Self::Instant) { + /// Called once the JSON-RPC request is finished and response is sent to the output buffer. + fn on_response(&self, _result: &str, started_at: std::time::Instant) { log::trace!(target: "rpc_metrics", "[{}] on_response started_at={:?}", self.transport_label, started_at); self.metrics.requests_finished.with_label_values(&[self.transport_label]).inc(); } +} + +impl WsMiddleware for RpcMiddleware { + type Instant = std::time::Instant; + + fn on_connect(&self, _remote_addr: SocketAddr, _headers: &Headers) { + self.metrics.ws_sessions_opened.as_ref().map(|counter| counter.inc()); + } + + fn on_request(&self) -> Self::Instant { + self.on_request() + } + + fn on_call(&self, name: &str, params: Params, kind: MethodKind) { + self.on_call(name, params, kind) + } + + fn on_result(&self, name: &str, success: bool, started_at: Self::Instant) { + self.on_result(name, success, started_at) + } + + fn on_response(&self, _result: &str, started_at: Self::Instant) { + self.on_response(_result, started_at) + } - fn on_disconnect(&self) { + fn on_disconnect(&self, _remote_addr: SocketAddr) { self.metrics.ws_sessions_closed.as_ref().map(|counter| counter.inc()); } } + +impl HttpMiddleware for RpcMiddleware { + type Instant = std::time::Instant; + + fn on_request(&self, _remote_addr: SocketAddr, _headers: &Headers) -> Self::Instant { + self.on_request() + } + + fn on_call(&self, name: &str, params: Params, kind: MethodKind) { + self.on_call(name, params, kind) + } + + fn on_result(&self, name: &str, success: bool, started_at: Self::Instant) { + self.on_result(name, success, started_at) + } + + fn on_response(&self, _result: &str, started_at: Self::Instant) { + self.on_response(_result, started_at) + } +} diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index e8c657f1a2949..5a05ae6e29df1 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" hash-db = { version = "0.15.2", default-features = false } -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } lazy_static = { version = "1.4.0", optional = true } log = "0.4.17" parking_lot = "0.12.0" diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index b8c4f5d582808..7d0ffdc62e080 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -29,7 +29,8 @@ use codec::{Decode, Encode}; use futures::{FutureExt, TryFutureExt}; use jsonrpsee::{ core::{async_trait, Error as JsonRpseeError, RpcResult}, - PendingSubscription, + types::SubscriptionResult, + SubscriptionSink, }; use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::{ @@ -176,13 +177,13 @@ where .collect()) } - fn watch_extrinsic(&self, pending: PendingSubscription, xt: Bytes) { + fn watch_extrinsic(&self, mut sink: SubscriptionSink, xt: Bytes) -> SubscriptionResult { let best_block_hash = self.client.info().best_hash; let dxt = match TransactionFor::

::decode(&mut &xt[..]).map_err(|e| Error::from(e)) { Ok(dxt) => dxt, Err(e) => { - pending.reject(JsonRpseeError::from(e)); - return + let _ = sink.reject(JsonRpseeError::from(e)); + return Ok(()) }, }; @@ -199,19 +200,15 @@ where let stream = match submit.await { Ok(stream) => stream, Err(err) => { - pending.reject(JsonRpseeError::from(err)); + let _ = sink.reject(JsonRpseeError::from(err)); return }, }; - let mut sink = match pending.accept() { - Some(sink) => sink, - _ => return, - }; - sink.pipe_from_stream(stream).await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + Ok(()) } } diff --git a/client/rpc/src/chain/chain_full.rs b/client/rpc/src/chain/chain_full.rs index c00c6e5875d94..375e724a33d69 100644 --- a/client/rpc/src/chain/chain_full.rs +++ b/client/rpc/src/chain/chain_full.rs @@ -26,7 +26,7 @@ use futures::{ future::{self, FutureExt}, stream::{self, Stream, StreamExt}, }; -use jsonrpsee::PendingSubscription; +use jsonrpsee::SubscriptionSink; use sc_client_api::{BlockBackend, BlockchainEvents}; use sp_blockchain::HeaderBackend; use sp_runtime::{ @@ -69,7 +69,7 @@ where self.client.block(&BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err) } - fn subscribe_all_heads(&self, sink: PendingSubscription) { + fn subscribe_all_heads(&self, sink: SubscriptionSink) { subscribe_headers( &self.client, &self.executor, @@ -83,7 +83,7 @@ where ) } - fn subscribe_new_heads(&self, sink: PendingSubscription) { + fn subscribe_new_heads(&self, sink: SubscriptionSink) { subscribe_headers( &self.client, &self.executor, @@ -98,7 +98,7 @@ where ) } - fn subscribe_finalized_heads(&self, sink: PendingSubscription) { + fn subscribe_finalized_heads(&self, sink: SubscriptionSink) { subscribe_headers( &self.client, &self.executor, @@ -117,7 +117,7 @@ where fn subscribe_headers( client: &Arc, executor: &SubscriptionTaskExecutor, - pending: PendingSubscription, + mut sink: SubscriptionSink, best_block_hash: G, stream: F, ) where @@ -143,9 +143,7 @@ fn subscribe_headers( let stream = stream::iter(maybe_header).chain(stream()); let fut = async move { - if let Some(mut sink) = pending.accept() { - sink.pipe_from_stream(stream).await; - } + sink.pipe_from_stream(stream).await; }; executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index a300c80271fd1..be06b91ca747f 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -27,7 +27,7 @@ use std::sync::Arc; use crate::SubscriptionTaskExecutor; -use jsonrpsee::{core::RpcResult, PendingSubscription}; +use jsonrpsee::{core::RpcResult, types::SubscriptionResult, SubscriptionSink}; use sc_client_api::BlockchainEvents; use sp_rpc::{list::ListOrValue, number::NumberOrHex}; use sp_runtime::{ @@ -95,13 +95,13 @@ where } /// All new head subscription - fn subscribe_all_heads(&self, sink: PendingSubscription); + fn subscribe_all_heads(&self, sink: SubscriptionSink); /// New best head subscription - fn subscribe_new_heads(&self, sink: PendingSubscription); + fn subscribe_new_heads(&self, sink: SubscriptionSink); /// Finalized head subscription - fn subscribe_finalized_heads(&self, sink: PendingSubscription); + fn subscribe_finalized_heads(&self, sink: SubscriptionSink); } /// Create new state API that works on full node. @@ -160,16 +160,19 @@ where self.backend.finalized_head().map_err(Into::into) } - fn subscribe_all_heads(&self, sink: PendingSubscription) { - self.backend.subscribe_all_heads(sink) + fn subscribe_all_heads(&self, sink: SubscriptionSink) -> SubscriptionResult { + self.backend.subscribe_all_heads(sink); + Ok(()) } - fn subscribe_new_heads(&self, sink: PendingSubscription) { - self.backend.subscribe_new_heads(sink) + fn subscribe_new_heads(&self, sink: SubscriptionSink) -> SubscriptionResult { + self.backend.subscribe_new_heads(sink); + Ok(()) } - fn subscribe_finalized_heads(&self, sink: PendingSubscription) { - self.backend.subscribe_finalized_heads(sink) + fn subscribe_finalized_heads(&self, sink: SubscriptionSink) -> SubscriptionResult { + self.backend.subscribe_finalized_heads(sink); + Ok(()) } } diff --git a/client/rpc/src/dev/tests.rs b/client/rpc/src/dev/tests.rs index b7a0de8f5ae0b..f3b18690d0972 100644 --- a/client/rpc/src/dev/tests.rs +++ b/client/rpc/src/dev/tests.rs @@ -17,8 +17,6 @@ // along with this program. If not, see . use super::*; -use assert_matches::assert_matches; -use jsonrpsee::{core::Error as JsonRpseeError, types::error::CallError}; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; @@ -61,9 +59,18 @@ async fn deny_unsafe_works() { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; client.import(BlockOrigin::Own, block).await.unwrap(); - assert_matches!( - api.call::<_, Option>("dev_getBlockStats", [client.info().best_hash]) - .await, - Err(JsonRpseeError::Call(CallError::Custom(err))) if err.message().contains("RPC call is unsafe to be called externally") + let best_hash = client.info().best_hash; + let best_hash_param = + serde_json::to_string(&best_hash).expect("To string must always succeed for block hashes"); + + let request = format!( + "{{\"jsonrpc\":\"2.0\",\"method\":\"dev_getBlockStats\",\"params\":[{}],\"id\":1}}", + best_hash_param + ); + let (resp, _) = api.raw_json_request(&request).await.expect("Raw calls should succeed"); + + assert_eq!( + resp.result, + r#"{"jsonrpc":"2.0","error":{"code":-32601,"message":"RPC call is unsafe to be called externally"},"id":1}"# ); } diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 232be4edc8aab..7213e4360ae2b 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -29,7 +29,8 @@ use crate::SubscriptionTaskExecutor; use jsonrpsee::{ core::{Error as JsonRpseeError, RpcResult}, - ws_server::PendingSubscription, + types::SubscriptionResult, + ws_server::SubscriptionSink, }; use sc_rpc_api::{state::ReadProof, DenyUnsafe}; @@ -155,10 +156,10 @@ where ) -> Result; /// New runtime version subscription - fn subscribe_runtime_version(&self, sink: PendingSubscription); + fn subscribe_runtime_version(&self, sink: SubscriptionSink); /// New storage subscription - fn subscribe_storage(&self, sink: PendingSubscription, keys: Option>); + fn subscribe_storage(&self, sink: SubscriptionSink, keys: Option>); } /// Create new state API that works on full node. @@ -318,19 +319,25 @@ where .map_err(Into::into) } - fn subscribe_runtime_version(&self, sink: PendingSubscription) { - self.backend.subscribe_runtime_version(sink) + fn subscribe_runtime_version(&self, sink: SubscriptionSink) -> SubscriptionResult { + self.backend.subscribe_runtime_version(sink); + Ok(()) } - fn subscribe_storage(&self, sink: PendingSubscription, keys: Option>) { + fn subscribe_storage( + &self, + mut sink: SubscriptionSink, + keys: Option>, + ) -> SubscriptionResult { if keys.is_none() { if let Err(err) = self.deny_unsafe.check_if_safe() { let _ = sink.reject(JsonRpseeError::from(err)); - return + return Ok(()) } } - self.backend.subscribe_storage(sink, keys) + self.backend.subscribe_storage(sink, keys); + Ok(()) } } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 5a8a83cdd4851..42ba70b0af7e7 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -28,7 +28,7 @@ use super::{ use crate::SubscriptionTaskExecutor; use futures::{future, stream, FutureExt, StreamExt}; -use jsonrpsee::{core::Error as JsonRpseeError, PendingSubscription}; +use jsonrpsee::{core::Error as JsonRpseeError, SubscriptionSink}; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, ProofProvider, StorageProvider, @@ -357,7 +357,7 @@ where .map_err(client_err) } - fn subscribe_runtime_version(&self, pending: PendingSubscription) { + fn subscribe_runtime_version(&self, mut sink: SubscriptionSink) { let client = self.client.clone(); let initial = match self @@ -369,7 +369,7 @@ where { Ok(initial) => initial, Err(e) => { - pending.reject(JsonRpseeError::from(e)); + let _ = sink.reject(JsonRpseeError::from(e)); return }, }; @@ -397,19 +397,17 @@ where let stream = futures::stream::once(future::ready(initial)).chain(version_stream); let fut = async move { - if let Some(mut sink) = pending.accept() { - sink.pipe_from_stream(stream).await; - } + sink.pipe_from_stream(stream).await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } - fn subscribe_storage(&self, pending: PendingSubscription, keys: Option>) { + fn subscribe_storage(&self, mut sink: SubscriptionSink, keys: Option>) { let stream = match self.client.storage_changes_notification_stream(keys.as_deref(), None) { Ok(stream) => stream, Err(blockchain_err) => { - pending.reject(JsonRpseeError::from(Error::Client(Box::new(blockchain_err)))); + let _ = sink.reject(JsonRpseeError::from(Error::Client(Box::new(blockchain_err)))); return }, }; @@ -442,9 +440,7 @@ where .filter(|storage| future::ready(!storage.changes.is_empty())); let fut = async move { - if let Some(mut sink) = pending.accept() { - sink.pipe_from_stream(stream).await; - } + sink.pipe_from_stream(stream).await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 31b0c860cf190..e8ddf40a0ae03 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -22,7 +22,7 @@ wasmtime = ["sc-executor/wasmtime"] test-helpers = [] [dependencies] -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } thiserror = "1.0.30" futures = "0.3.21" rand = "0.7.3" diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 2e7b611ffb187..98bcb17174157 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -101,7 +101,10 @@ impl RpcHandlers { &self, json_query: &str, ) -> Result<(String, mpsc::UnboundedReceiver), JsonRpseeError> { - self.0.raw_json_request(json_query).await + self.0 + .raw_json_request(json_query) + .await + .map(|(method_res, recv)| (method_res.result, recv)) } /// Provides access to the underlying `RpcModule` diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index bd9194092fa3d..d02637fcf884b 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.79" thiserror = "1.0.30" diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index a81abef9f37ca..7876c7cba40d0 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } serde = { version = "1", features = ["derive"] } # Substrate Dependencies diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml index 45cb975df277b..c7d9662904747 100644 --- a/frame/merkle-mountain-range/rpc/Cargo.toml +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } serde = { version = "1.0.136", features = ["derive"] } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 31e0972a0d5b5..16c2cc55efefb 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index a77d090246421..3121157df68d8 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } env_logger = "0.9" -jsonrpsee = { version = "0.14.0", features = ["ws-client", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["ws-client", "macros"] } log = "0.4.17" serde = "1.0.136" serde_json = "1.0" diff --git a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 6c6bc5cf327b5..00fdc87a506e8 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -25,7 +25,7 @@ sp-state-machine = { path = "../../../../primitives/state-machine" } sp-trie = { path = "../../../../primitives/trie" } trie-db = { version = "0.23.1" } -jsonrpsee = { version = "0.14.0", features = ["server", "macros"] } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } # Substrate Dependencies sc-client-api = { version = "4.0.0-dev", path = "../../../../client/api" } diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 7ea07534e1bdb..2104774bd2605 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" -jsonrpsee = { version = "0.14.0", features = ["jsonrpsee-types"] } +jsonrpsee = { version = "0.15.1", features = ["jsonrpsee-types"] } serde = "1" frame-support = { version = "4.0.0-dev", path = "../../../../frame/support" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } @@ -25,6 +25,6 @@ sp-storage = { version = "6.0.0", path = "../../../../primitives/storage" } [dev-dependencies] scale-info = "2.1.1" -jsonrpsee = { version = "0.14.0", features = ["ws-client", "jsonrpsee-types"] } +jsonrpsee = { version = "0.15.1", features = ["ws-client", "jsonrpsee-types"] } tokio = "1.17.0" frame-system = { version = "4.0.0-dev", path = "../../../../frame/system" } diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index f76944a0fec40..5d8984e8d399b 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde_json = "1" codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } futures = "0.3.21" log = "0.4.17" frame-system-rpc-runtime-api = { version = "4.0.0-dev", path = "../../../../frame/system/rpc/runtime-api" } diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index b82da0f1222a7..4b4f9bdb2809a 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -19,7 +19,7 @@ parity-scale-codec = "3.0.0" serde = "1.0.136" zstd = { version = "0.11.2", default-features = false } remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities" } -jsonrpsee = { version = "0.14.0", default-features = false, features = ["ws-client"] } +jsonrpsee = { version = "0.15.1", default-features = false, features = ["ws-client"] } sc-chain-spec = { version = "4.0.0-dev", path = "../../../../client/chain-spec" } sc-cli = { version = "0.10.0-dev", path = "../../../../client/cli" } sc-executor = { version = "0.10.0-dev", path = "../../../../client/executor" } From 862dbbc1c16a17b33e0e619b0e996b608a1c5069 Mon Sep 17 00:00:00 2001 From: Andronik Date: Tue, 2 Aug 2022 20:49:03 +0200 Subject: [PATCH 098/135] offences: make fn slash_fraction non-static (#11956) * offences: make fn slash_fraction non-static * Bastifmt (inline variable) --- frame/babe/src/equivocation.rs | 4 ++-- frame/grandpa/src/equivocation.rs | 4 ++-- frame/im-online/src/lib.rs | 6 +++--- frame/im-online/src/tests.rs | 10 ++++++---- frame/offences/benchmarking/src/lib.rs | 6 ++---- frame/offences/src/lib.rs | 3 +-- frame/offences/src/mock.rs | 4 ++-- primitives/staking/src/offence.rs | 7 +++---- 8 files changed, 21 insertions(+), 23 deletions(-) diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index df46f3544b389..f55bda751887d 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -284,9 +284,9 @@ impl Offence self.slot } - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + fn slash_fraction(&self, offenders_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational(3 * offenders_count, validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, self.validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 804272c20480f..181d22fba545c 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -353,9 +353,9 @@ impl Offence self.time_slot } - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + fn slash_fraction(&self, offenders_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational(3 * offenders_count, validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, self.validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index f190f6672f309..34c1c70d79f75 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -958,12 +958,12 @@ impl Offence for UnresponsivenessOffence { self.session_index } - fn slash_fraction(offenders: u32, validator_set_count: u32) -> Perbill { + fn slash_fraction(&self, offenders: u32) -> Perbill { // the formula is min((3 * (k - (n / 10 + 1))) / n, 1) * 0.07 // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% // when 13/30 are offline (around 5% when 1/3 are offline). - if let Some(threshold) = offenders.checked_sub(validator_set_count / 10 + 1) { - let x = Perbill::from_rational(3 * threshold, validator_set_count); + if let Some(threshold) = offenders.checked_sub(self.validator_set_count / 10 + 1) { + let x = Perbill::from_rational(3 * threshold, self.validator_set_count); x.saturating_mul(Perbill::from_percent(7)) } else { Perbill::default() diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 288081556a085..05e1af169dba9 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -36,22 +36,24 @@ use sp_runtime::{ #[test] fn test_unresponsiveness_slash_fraction() { + let dummy_offence = + UnresponsivenessOffence { session_index: 0, validator_set_count: 50, offenders: vec![()] }; // A single case of unresponsiveness is not slashed. - assert_eq!(UnresponsivenessOffence::<()>::slash_fraction(1, 50), Perbill::zero()); + assert_eq!(dummy_offence.slash_fraction(1), Perbill::zero()); assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(5, 50), + dummy_offence.slash_fraction(5), Perbill::zero(), // 0% ); assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(7, 50), + dummy_offence.slash_fraction(7), Perbill::from_parts(4200000), // 0.42% ); // One third offline should be punished around 5%. assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(17, 50), + dummy_offence.slash_fraction(17), Perbill::from_parts(46200000), // 4.62% ); } diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 98c6390964d82..b793bd8d2699a 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -288,15 +288,13 @@ benchmarks! { let (offenders, raw_offenders) = make_offenders_im_online::(o, n)?; let keys = ImOnline::::keys(); let validator_set_count = keys.len() as u32; - - let slash_fraction = UnresponsivenessOffence::::slash_fraction( - offenders.len() as u32, validator_set_count, - ); + let offenders_count = offenders.len() as u32; let offence = UnresponsivenessOffence { session_index: 0, validator_set_count, offenders, }; + let slash_fraction = offence.slash_fraction(offenders_count); assert_eq!(System::::event_count(), 0); }: { let _ = ::ReportUnresponsiveness::report_offence( diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index e4b75d9c3c015..ae454d6b06740 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -120,7 +120,6 @@ where fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { let offenders = offence.offenders(); let time_slot = offence.time_slot(); - let validator_set_count = offence.validator_set_count(); // Go through all offenders in the offence report and find all offenders that were spotted // in unique reports. @@ -134,7 +133,7 @@ where let offenders_count = concurrent_offenders.len() as u32; // The amount new offenders are slashed - let new_fraction = O::slash_fraction(offenders_count, validator_set_count); + let new_fraction = offence.slash_fraction(offenders_count); let slash_perbill: Vec<_> = (0..concurrent_offenders.len()).map(|_| new_fraction).collect(); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 6a69b54b3cca0..d9ecf44ad8734 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -168,8 +168,8 @@ impl offence::Offence for Offence { 1 } - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { - Perbill::from_percent(5 + offenders_count * 100 / validator_set_count) + fn slash_fraction(&self, offenders_count: u32) -> Perbill { + Perbill::from_percent(5 + offenders_count * 100 / self.validator_set_count) } } diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index 4261063993a52..f6517b9e9028b 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -108,11 +108,10 @@ pub trait Offence { } /// A slash fraction of the total exposure that should be slashed for this - /// particular offence kind for the given parameters that happened at a singular `TimeSlot`. + /// particular offence for the `offenders_count` that happened at a singular `TimeSlot`. /// - /// `offenders_count` - the count of unique offending authorities. It is >0. - /// `validator_set_count` - the cardinality of the validator set at the time of offence. - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill; + /// `offenders_count` - the count of unique offending authorities for this `TimeSlot`. It is >0. + fn slash_fraction(&self, offenders_count: u32) -> Perbill; } /// Errors that may happen on offence reports. From b27c470eaff379f512d1dec052aff5d551ed3b03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 3 Aug 2022 10:46:10 +0200 Subject: [PATCH 099/135] Revert non-best block (#11716) * Revert non-best block This makes `revert` also revert non-best blocks. * Update client/db/src/lib.rs * Do not count leaves against the maximum number to revert * Add some explanation * Fix bug * Apply suggestions from code review Co-authored-by: Davide Galassi Co-authored-by: Davide Galassi --- client/api/src/backend.rs | 3 +- client/api/src/leaves.rs | 5 + client/db/src/lib.rs | 149 +++++++++++++++---- client/service/src/chain_ops/revert_chain.rs | 7 + 4 files changed, 135 insertions(+), 29 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index af8552886b72e..54784a2f27b64 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -507,7 +507,8 @@ pub trait Backend: AuxStore + Send + Sync { /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to /// revert past any finalized block, this is unsafe and can potentially leave the node in an - /// inconsistent state. + /// inconsistent state. All blocks higher than the best block are also reverted and not counting + /// towards `n`. /// /// Returns the number of blocks that were successfully reverted and the list of finalized /// blocks that has been reverted. diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index 2e5d4be3a5462..26eda46e6f76f 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -259,6 +259,11 @@ where removed } + + /// Returns the highest leaf and all hashes associated to it. + pub fn highest_leaf(&self) -> Option<(N, &[H])> { + self.storage.iter().next().map(|(k, v)| (k.0.clone(), &v[..])) + } } /// Helper for undoing operations. diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 7dd49f9831f1c..e14d3a26aa557 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -2057,36 +2057,46 @@ impl sc_client_api::backend::Backend for Backend { ) -> ClientResult<(NumberFor, HashSet)> { let mut reverted_finalized = HashSet::new(); - let mut best_number = self.blockchain.info().best_number; - let mut best_hash = self.blockchain.info().best_hash; + let info = self.blockchain.info(); - let finalized = self.blockchain.info().finalized_number; + let highest_leaf = self + .blockchain + .leaves + .read() + .highest_leaf() + .and_then(|(n, h)| h.last().map(|h| (n, *h))); + + let best_number = info.best_number; + let best_hash = info.best_hash; + + let finalized = info.finalized_number; let revertible = best_number - finalized; let n = if !revert_finalized && revertible < n { revertible } else { n }; + let (n, mut number_to_revert, mut hash_to_revert) = match highest_leaf { + Some((l_n, l_h)) => (n + (l_n - best_number), l_n, l_h), + None => (n, best_number, best_hash), + }; + let mut revert_blocks = || -> ClientResult> { for c in 0..n.saturated_into::() { - if best_number.is_zero() { + if number_to_revert.is_zero() { return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); let removed = - self.blockchain.header(BlockId::Number(best_number))?.ok_or_else(|| { + self.blockchain.header(BlockId::Hash(hash_to_revert))?.ok_or_else(|| { sp_blockchain::Error::UnknownBlock(format!( - "Error reverting to {}. Block hash not found.", - best_number + "Error reverting to {}. Block header not found.", + hash_to_revert, )) })?; let removed_hash = removed.hash(); - let prev_number = best_number.saturating_sub(One::one()); - let prev_hash = self.blockchain.hash(prev_number)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!( - "Error reverting to {}. Block hash not found.", - best_number - )) - })?; + let prev_number = number_to_revert.saturating_sub(One::one()); + let prev_hash = + if prev_number == best_number { best_hash } else { *removed.parent_hash() }; if !self.have_state_at(&prev_hash, prev_number) { return Ok(c.saturated_into::>()) @@ -2096,12 +2106,15 @@ impl sc_client_api::backend::Backend for Backend { Some(commit) => { apply_state_commit(&mut transaction, commit); - best_number = prev_number; - best_hash = prev_hash; + number_to_revert = prev_number; + hash_to_revert = prev_hash; - let update_finalized = best_number < finalized; + let update_finalized = number_to_revert < finalized; - let key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?; + let key = utils::number_and_hash_to_lookup_key( + number_to_revert, + &hash_to_revert, + )?; if update_finalized { transaction.set_from_vec( columns::META, @@ -2111,12 +2124,14 @@ impl sc_client_api::backend::Backend for Backend { reverted_finalized.insert(removed_hash); if let Some((hash, _)) = self.blockchain.info().finalized_state { - if hash == best_hash { - if !best_number.is_zero() && - self.have_state_at(&prev_hash, best_number - One::one()) - { + if hash == hash_to_revert { + if !number_to_revert.is_zero() && + self.have_state_at( + &prev_hash, + number_to_revert - One::one(), + ) { let lookup_key = utils::number_and_hash_to_lookup_key( - best_number - One::one(), + number_to_revert - One::one(), prev_hash, )?; transaction.set_from_vec( @@ -2137,13 +2152,16 @@ impl sc_client_api::backend::Backend for Backend { &mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, - best_hash, + hash_to_revert, ); self.storage.db.commit(transaction)?; + + let is_best = number_to_revert < best_number; + self.blockchain.update_meta(MetaUpdate { - hash: best_hash, - number: best_number, - is_best: true, + hash: hash_to_revert, + number: number_to_revert, + is_best, is_finalized: update_finalized, with_state: false, }); @@ -2161,7 +2179,7 @@ impl sc_client_api::backend::Backend for Backend { let mut transaction = Transaction::new(); let mut leaves = self.blockchain.leaves.write(); - leaves.revert(best_hash, best_number); + leaves.revert(hash_to_revert, number_to_revert); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); self.storage.db.commit(transaction)?; @@ -3463,4 +3481,79 @@ pub(crate) mod tests { insert_header_no_head(&backend, 1, block0, [1; 32].into()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); } + + #[test] + fn revert_non_best_blocks() { + let backend = Backend::::new_test(10, 10); + + let genesis = + insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None) + .unwrap(); + + let block1 = + insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap(); + + let block2 = + insert_block(&backend, 2, block1, None, Default::default(), vec![], None).unwrap(); + + let block3 = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap(); + let header = Header { + number: 3, + parent_hash: block2, + state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + header.hash() + }; + + let block4 = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + let header = Header { + number: 4, + parent_hash: block3, + state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + header.hash() + }; + + let block3_fork = insert_header_no_head(&backend, 3, block2, Default::default()); + + assert!(backend.have_state_at(&block1, 1)); + assert!(backend.have_state_at(&block2, 2)); + assert!(backend.have_state_at(&block3, 3)); + assert!(backend.have_state_at(&block4, 4)); + assert!(backend.have_state_at(&block3_fork, 3)); + + assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]); + assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0); + + assert_eq!(3, backend.revert(1, false).unwrap().0); + + assert!(backend.have_state_at(&block1, 1)); + assert!(!backend.have_state_at(&block2, 2)); + assert!(!backend.have_state_at(&block3, 3)); + assert!(!backend.have_state_at(&block4, 4)); + assert!(!backend.have_state_at(&block3_fork, 3)); + + assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]); + assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0); + } } diff --git a/client/service/src/chain_ops/revert_chain.rs b/client/service/src/chain_ops/revert_chain.rs index 9a3ce6024ed92..3ee4399d063b3 100644 --- a/client/service/src/chain_ops/revert_chain.rs +++ b/client/service/src/chain_ops/revert_chain.rs @@ -40,6 +40,13 @@ where info!("There aren't any non-finalized blocks to revert."); } else { info!("Reverted {} blocks. Best: #{} ({})", reverted.0, info.best_number, info.best_hash); + + if reverted.0 > blocks { + info!( + "Number of reverted blocks is higher than requested \ + because of reverted leaves higher than the best block." + ) + } } Ok(()) } From 28ac0a8591c3b9765206cada54980e9ea3416ea0 Mon Sep 17 00:00:00 2001 From: Sacha Lansky Date: Wed, 3 Aug 2022 12:34:33 +0100 Subject: [PATCH 100/135] Fix docs urls (#11966) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix docs urls * Update bin/node-template/README.md * Update frame/aura/src/lib.rs * Run cargo +nightly fmt Co-authored-by: Bastian Köcher --- README.md | 2 +- bin/node-template/README.md | 32 ++++++++----------- bin/node-template/docs/rust-setup.md | 6 ++-- bin/node-template/pallets/template/src/lib.rs | 10 +++--- client/rpc-api/src/state/mod.rs | 2 +- frame/examples/basic/src/benchmarking.rs | 2 +- frame/examples/basic/src/lib.rs | 2 +- frame/remark/src/tests.rs | 3 ++ frame/sudo/README.md | 2 +- frame/sudo/src/lib.rs | 2 +- .../procedural/src/pallet/expand/error.rs | 2 +- .../procedural/src/pallet/expand/event.rs | 2 +- .../procedural/src/pallet/expand/mod.rs | 2 +- .../src/pallet/expand/pallet_struct.rs | 2 +- frame/support/src/dispatch.rs | 2 +- frame/support/src/traits/misc.rs | 2 +- primitives/runtime/src/bounded/bounded_vec.rs | 2 +- .../runtime/src/bounded/weak_bounded_vec.rs | 2 +- 18 files changed, 39 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index b716794428a00..c609641af7ce2 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Substrate is a next-generation framework for blockchain innovation 🚀. ## Trying it out Simply go to [docs.substrate.io](https://docs.substrate.io) and follow the -[installation](https://docs.substrate.io/v3/getting-started/overview) instructions. You can +[installation](https://docs.substrate.io/main-docs/install/) instructions. You can also try out one of the [tutorials](https://docs.substrate.io/tutorials/). ## Contributions & Code of Conduct diff --git a/bin/node-template/README.md b/bin/node-template/README.md index 8defb870fa1b0..0f6fd9450aeee 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -114,7 +114,7 @@ local node template. ### Multi-Node Local Testnet If you want to see the multi-node consensus algorithm in action, refer to our -[Start a Private Network tutorial](https://docs.substrate.io/tutorials/v3/private-network). +[Simulate a network tutorial](https://docs.substrate.io/tutorials/get-started/simulate-network/). ## Template Structure @@ -129,7 +129,7 @@ Substrate-based blockchain nodes expose a number of capabilities: - Networking: Substrate nodes use the [`libp2p`](https://libp2p.io/) networking stack to allow the nodes in the network to communicate with one another. - Consensus: Blockchains must have a way to come to - [consensus](https://docs.substrate.io/v3/advanced/consensus) on the state of the + [consensus](https://docs.substrate.io/main-docs/fundamentals/consensus/) on the state of the network. Substrate makes it possible to supply custom consensus engines and also ships with several consensus mechanisms that have been built on top of [Web3 Foundation research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). @@ -138,22 +138,20 @@ Substrate-based blockchain nodes expose a number of capabilities: There are several files in the `node` directory - take special note of the following: - [`chain_spec.rs`](./node/src/chain_spec.rs): A - [chain specification](https://docs.substrate.io/v3/runtime/chain-specs) is a + [chain specification](https://docs.substrate.io/main-docs/build/chain-spec/) is a source code file that defines a Substrate chain's initial (genesis) state. Chain specifications are useful for development and testing, and critical when architecting the launch of a production chain. Take note of the `development_config` and `testnet_genesis` functions, which are used to define the genesis state for the local development chain configuration. These functions identify some - [well-known accounts](https://docs.substrate.io/v3/tools/subkey#well-known-keys) + [well-known accounts](https://docs.substrate.io/reference/command-line-tools/subkey/) and use them to configure the blockchain's initial state. - [`service.rs`](./node/src/service.rs): This file defines the node implementation. Take note of the libraries that this file imports and the names of the functions it invokes. In particular, there are references to consensus-related topics, such as the - [longest chain rule](https://docs.substrate.io/v3/advanced/consensus#longest-chain-rule), - the [Aura](https://docs.substrate.io/v3/advanced/consensus#aura) block authoring - mechanism and the - [GRANDPA](https://docs.substrate.io/v3/advanced/consensus#grandpa) finality - gadget. + [block finalization and forks](https://docs.substrate.io/main-docs/fundamentals/consensus/#finalization-and-forks) + and other [consensus mechanisms](https://docs.substrate.io/main-docs/fundamentals/consensus/#default-consensus-models) + such as Aura for block authoring and GRANDPA for finality. After the node has been [built](#build), refer to the embedded documentation to learn more about the capabilities and configuration parameters that it exposes: @@ -165,16 +163,15 @@ capabilities and configuration parameters that it exposes: ### Runtime In Substrate, the terms -"[runtime](https://docs.substrate.io/v3/getting-started/glossary#runtime)" and -"[state transition function](https://docs.substrate.io/v3/getting-started/glossary#state-transition-function-stf)" +"runtime" and "state transition function" are analogous - they refer to the core logic of the blockchain that is responsible for validating blocks and executing the state changes they define. The Substrate project in this repository uses -the [FRAME](https://docs.substrate.io/v3/runtime/frame) framework to construct a +[FRAME](https://docs.substrate.io/main-docs/fundamentals/runtime-intro/#frame) to construct a blockchain runtime. FRAME allows runtime developers to declare domain-specific logic in modules called "pallets". At the heart of FRAME is a helpful -[macro language](https://docs.substrate.io/v3/runtime/macros) that makes it easy to +[macro language](https://docs.substrate.io/reference/frame-macros/) that makes it easy to create pallets and flexibly compose them to create blockchains that can address -[a variety of needs](https://www.substrate.io/substrate-users/). +[a variety of needs](https://substrate.io/ecosystem/projects/). Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this template and note the following: @@ -184,8 +181,7 @@ the following: - The pallets are composed into a single runtime by way of the [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) macro, which is part of the core - [FRAME Support](https://docs.substrate.io/v3/runtime/frame#support-crate) - library. + FRAME Support [system](https://docs.substrate.io/reference/frame-pallets/#system-pallets) library. ### Pallets @@ -196,12 +192,12 @@ template pallet that is [defined in the `pallets`](./pallets/template/src/lib.rs A FRAME pallet is compromised of a number of blockchain primitives: - Storage: FRAME defines a rich set of powerful - [storage abstractions](https://docs.substrate.io/v3/runtime/storage) that makes + [storage abstractions](https://docs.substrate.io/main-docs/build/runtime-storage/) that makes it easy to use Substrate's efficient key-value database to manage the evolving state of a blockchain. - Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) from outside of the runtime in order to update its state. -- Events: Substrate uses [events and errors](https://docs.substrate.io/v3/runtime/events-and-errors) +- Events: Substrate uses [events and errors](https://docs.substrate.io/main-docs/build/events-errors/) to notify users of important changes in the runtime. - Errors: When a dispatchable fails, it returns an error. - Config: The `Config` configuration interface is used to define the types and parameters upon diff --git a/bin/node-template/docs/rust-setup.md b/bin/node-template/docs/rust-setup.md index ea133ca847af7..2755966e3ae0f 100644 --- a/bin/node-template/docs/rust-setup.md +++ b/bin/node-template/docs/rust-setup.md @@ -3,7 +3,7 @@ title: Installation --- This guide is for reference only, please check the latest information on getting starting with Substrate -[here](https://docs.substrate.io/v3/getting-started/installation/). +[here](https://docs.substrate.io/main-docs/install/). This page will guide you through the **2 steps** needed to prepare a computer for **Substrate** development. Since Substrate is built with [the Rust programming language](https://www.rust-lang.org/), the first @@ -73,11 +73,11 @@ brew install openssl ### Windows -**_PLEASE NOTE:_** Native development of Substrate is _not_ very well supported! It is _highly_ +**_PLEASE NOTE:_** Native Windows development of Substrate is _not_ very well supported! It is _highly_ recommend to use [Windows Subsystem Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10) (WSL) and follow the instructions for [Ubuntu/Debian](#ubuntudebian). Please refer to the separate -[guide for native Windows development](https://docs.substrate.io/v3/getting-started/windows-users/). +[guide for native Windows development](https://docs.substrate.io/main-docs/install/windows/). ## Rust developer environment diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 067c7ce2575a0..2c337146859ef 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -2,7 +2,7 @@ /// Edit this file to define custom logic or remove it if it is not needed. /// Learn more about FRAME and the core library of Substrate FRAME pallets: -/// +/// pub use pallet::*; #[cfg(test)] @@ -31,15 +31,15 @@ pub mod pallet { pub struct Pallet(_); // The pallet's runtime storage items. - // https://docs.substrate.io/v3/runtime/storage + // https://docs.substrate.io/main-docs/build/runtime-storage/ #[pallet::storage] #[pallet::getter(fn something)] // Learn more about declaring storage items: - // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items + // https://docs.substrate.io/main-docs/build/runtime-storage/#declaring-storage-items pub type Something = StorageValue<_, u32>; // Pallets use events to inform users when important changes are made. - // https://docs.substrate.io/v3/runtime/events-and-errors + // https://docs.substrate.io/main-docs/build/events-errors/ #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -68,7 +68,7 @@ pub mod pallet { pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. - // https://docs.substrate.io/v3/runtime/origins + // https://docs.substrate.io/main-docs/build/origins/ let who = ensure_signed(origin)?; // Update storage. diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 54bf21674a8bd..40e208c2eba8d 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -265,7 +265,7 @@ pub trait StateApi { /// [substrate storage][1], [transparent keys in substrate][2], /// [querying substrate storage via rpc][3]. /// - /// [1]: https://docs.substrate.io/v3/advanced/storage#storage-map-keys + /// [1]: https://docs.substrate.io/main-docs/fundamentals/state-transitions-and-storage/ /// [2]: https://www.shawntabrizi.com/substrate/transparent-keys-in-substrate/ /// [3]: https://www.shawntabrizi.com/substrate/querying-substrate-storage-via-rpc/ /// diff --git a/frame/examples/basic/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs index d7b933577ead5..93e14f358208e 100644 --- a/frame/examples/basic/src/benchmarking.rs +++ b/frame/examples/basic/src/benchmarking.rs @@ -26,7 +26,7 @@ use frame_system::RawOrigin; // To actually run this benchmark on pallet-example-basic, we need to put this pallet into the // runtime and compile it with `runtime-benchmarks` feature. The detail procedures are // documented at: -// https://docs.substrate.io/v3/runtime/benchmarking#how-to-benchmark +// https://docs.substrate.io/reference/how-to-guides/weights/add-benchmarks/ // // The auto-generated weight estimate of this pallet is copied over to the `weights.rs` file. // The exact command of how the estimate generated is printed at the top of the file. diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index f8acc1962388f..03dc2c613c01e 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -318,7 +318,7 @@ const MILLICENTS: u32 = 1_000_000_000; // - assigns a dispatch class `operational` if the argument of the call is more than 1000. // // More information can be read at: -// - https://docs.substrate.io/v3/runtime/weights-and-fees +// - https://docs.substrate.io/main-docs/build/tx-weights-fees/ // // Manually configuring weight is an advanced operation and what you really need may well be // fulfilled by running the benchmarking toolchain. Refer to `benchmarking.rs` file. diff --git a/frame/remark/src/tests.rs b/frame/remark/src/tests.rs index 60a376c5afca5..eac006054c1d5 100644 --- a/frame/remark/src/tests.rs +++ b/frame/remark/src/tests.rs @@ -30,11 +30,14 @@ fn generates_event() { System::set_block_number(System::block_number() + 1); //otherwise event won't be registered. assert_ok!(Remark::::store(RawOrigin::Signed(caller.clone()).into(), data.clone(),)); let events = System::events(); + // this one we create as we expect it let system_event: ::Event = Event::Stored { content_hash: sp_io::hashing::blake2_256(&data).into(), sender: caller, } .into(); + // this one we actually go into the system pallet and get the last event + // because we know its there from block +1 let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); }); diff --git a/frame/sudo/README.md b/frame/sudo/README.md index e8f688091e326..7342832d2d7a7 100644 --- a/frame/sudo/README.md +++ b/frame/sudo/README.md @@ -72,6 +72,6 @@ You need to set an initial superuser account as the sudo `key`. [`Call`]: ./enum.Call.html [`Config`]: ./trait.Config.html -[`Origin`]: https://docs.substrate.io/v3/runtime/origins +[`Origin`]: https://docs.substrate.io/main-docs/build/origins/ License: Apache-2.0 diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index d9e72b37f2970..a47b5a79bd017 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -88,7 +88,7 @@ //! //! * [Democracy](../pallet_democracy/index.html) //! -//! [`Origin`]: https://docs.substrate.io/v3/runtime/origins +//! [`Origin`]: https://docs.substrate.io/main-docs/build/origins/ #![cfg_attr(not(feature = "std"), no_std)] diff --git a/frame/support/procedural/src/pallet/expand/error.rs b/frame/support/procedural/src/pallet/expand/error.rs index 124e8b312ce39..5a8487b09de5c 100644 --- a/frame/support/procedural/src/pallet/expand/error.rs +++ b/frame/support/procedural/src/pallet/expand/error.rs @@ -111,7 +111,7 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&error_item.attrs).is_empty() { error_item.attrs.push(syn::parse_quote!( #[doc = r" - Custom [dispatch errors](https://docs.substrate.io/v3/runtime/events-and-errors) + Custom [dispatch errors](https://docs.substrate.io/main-docs/build/events-errors/) of this pallet. "] )); diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index acd60ab959c61..791f930302207 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -98,7 +98,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&event_item.attrs).is_empty() { event_item.attrs.push(syn::parse_quote!( #[doc = r" - The [event](https://docs.substrate.io/v3/runtime/events-and-errors) emitted + The [event](https://docs.substrate.io/main-docs/build/events-errors/) emitted by this pallet. "] )); diff --git a/frame/support/procedural/src/pallet/expand/mod.rs b/frame/support/procedural/src/pallet/expand/mod.rs index 83bef7a97af1f..c33d2386700b2 100644 --- a/frame/support/procedural/src/pallet/expand/mod.rs +++ b/frame/support/procedural/src/pallet/expand/mod.rs @@ -74,7 +74,7 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { def.item.attrs.push(syn::parse_quote!( #[doc = r" The module that hosts all the - [FRAME](https://docs.substrate.io/v3/runtime/frame) + [FRAME](https://docs.substrate.io/main-docs/build/events-errors/) types needed to add this pallet to a runtime. "] diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index 52586a70a521a..a4a8acc10f799 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -62,7 +62,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&pallet_item.attrs).is_empty() { pallet_item.attrs.push(syn::parse_quote!( #[doc = r" - The [pallet](https://docs.substrate.io/v3/runtime/frame#pallets) implementing + The [pallet](https://docs.substrate.io/reference/frame-pallets/#pallets) implementing the on-chain logic. "] )); diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index ae4230efc63f8..2b05478e0b02a 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -306,7 +306,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug + /// /// The following are reserved function signatures: /// -/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.io/v3/runtime/events-and-errors). +/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.io/main-docs/build/events-errors/). /// The default behavior is to call `deposit_event` from the [System /// module](../frame_system/index.html). However, you can write your own implementation for events /// in your runtime. To use the default behavior, add `fn deposit_event() = default;` to your diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index ccbb47909d5f4..ddb7f6f41b378 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -763,7 +763,7 @@ impl MaxEncodedLen for WrapperOpaque { fn max_encoded_len() -> usize { let t_max_len = T::max_encoded_len(); - // See scale encoding https://docs.substrate.io/v3/advanced/scale-codec + // See scale encoding: https://docs.substrate.io/reference/scale-codec/ if t_max_len < 64 { t_max_len + 1 } else if t_max_len < 2usize.pow(14) { diff --git a/primitives/runtime/src/bounded/bounded_vec.rs b/primitives/runtime/src/bounded/bounded_vec.rs index 10d9fc608c273..d5f3f2da0d615 100644 --- a/primitives/runtime/src/bounded/bounded_vec.rs +++ b/primitives/runtime/src/bounded/bounded_vec.rs @@ -847,7 +847,7 @@ where fn max_encoded_len() -> usize { // BoundedVec encodes like Vec which encodes like [T], which is a compact u32 // plus each item in the slice: - // https://docs.substrate.io/v3/advanced/scale-codec + // See: https://docs.substrate.io/reference/scale-codec/ codec::Compact(S::get()) .encoded_size() .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) diff --git a/primitives/runtime/src/bounded/weak_bounded_vec.rs b/primitives/runtime/src/bounded/weak_bounded_vec.rs index ed9f4bba62b55..a447e7285f906 100644 --- a/primitives/runtime/src/bounded/weak_bounded_vec.rs +++ b/primitives/runtime/src/bounded/weak_bounded_vec.rs @@ -443,7 +443,7 @@ where fn max_encoded_len() -> usize { // WeakBoundedVec encodes like Vec which encodes like [T], which is a compact u32 // plus each item in the slice: - // https://docs.substrate.io/v3/advanced/scale-codec + // See: https://docs.substrate.io/reference/scale-codec/ codec::Compact(S::get()) .encoded_size() .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) From 744eda47f545e31083ee722abb3a286f91a7f98d Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 3 Aug 2022 12:35:47 +0100 Subject: [PATCH 101/135] nit improvements to pallet template (#11968) --- bin/node-template/pallets/template/src/lib.rs | 8 ++++---- bin/node-template/pallets/template/src/mock.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 2c337146859ef..f1519efe062bd 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -19,6 +19,10 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] pub trait Config: frame_system::Config { @@ -26,10 +30,6 @@ pub mod pallet { type Event: From> + IsType<::Event>; } - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); - // The pallet's runtime storage items. // https://docs.substrate.io/main-docs/build/runtime-storage/ #[pallet::storage] diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 8721fe6c78851..e03f37b2eea69 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -17,8 +17,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - TemplateModule: pallet_template::{Pallet, Call, Storage, Event}, + System: frame_system, + TemplateModule: pallet_template, } ); From 614126401211afc95f43b62a80f83f45d38f9279 Mon Sep 17 00:00:00 2001 From: lucasvanmol Date: Thu, 4 Aug 2022 00:16:24 -0700 Subject: [PATCH 102/135] Update node-template's docker-compose.yml (#11802) * Update docker-compose.yml * Use production image Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: parity-processbot <> --- bin/node-template/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node-template/docker-compose.yml b/bin/node-template/docker-compose.yml index cfc4437bbae41..bc1922f47d963 100644 --- a/bin/node-template/docker-compose.yml +++ b/bin/node-template/docker-compose.yml @@ -3,7 +3,7 @@ version: "3.2" services: dev: container_name: node-template - image: paritytech/ci-linux:974ba3ac-20201006 + image: paritytech/ci-linux:production working_dir: /var/www/node-template ports: - "9944:9944" From 317808ab8920e261840013cc0e522b47a6c0d57d Mon Sep 17 00:00:00 2001 From: ZhiYong Date: Thu, 4 Aug 2022 15:47:52 +0800 Subject: [PATCH 103/135] Beefy: use VersionedFinalityProof instead of SignedCommitment (#11962) * beefy: use VersionedFinalityProof instead of SignedCommitment. * Change the exposed RPC API to support versioned proofs. Co-authored-by: Adrian Catangiu --- client/beefy/rpc/src/lib.rs | 51 ++++++++++---------- client/beefy/rpc/src/notification.rs | 12 ++--- client/beefy/src/import.rs | 26 +++++------ client/beefy/src/justification.rs | 53 ++++++++++++--------- client/beefy/src/lib.rs | 14 +++--- client/beefy/src/notification.rs | 21 +++++---- client/beefy/src/tests.rs | 50 +++++++++++--------- client/beefy/src/worker.rs | 70 +++++++++++++++------------- 8 files changed, 157 insertions(+), 140 deletions(-) diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 91ff59324bd95..3be182ceb8f39 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -35,7 +35,7 @@ use jsonrpsee::{ }; use log::warn; -use beefy_gadget::notification::{BeefyBestBlockStream, BeefySignedCommitmentStream}; +use beefy_gadget::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}; mod notification; @@ -101,7 +101,7 @@ pub trait BeefyApi { /// Implements the BeefyApi RPC trait for interacting with BEEFY. pub struct Beefy { - signed_commitment_stream: BeefySignedCommitmentStream, + finality_proof_stream: BeefyVersionedFinalityProofStream, beefy_best_block: Arc>>, executor: SubscriptionTaskExecutor, } @@ -112,7 +112,7 @@ where { /// Creates a new Beefy Rpc handler instance. pub fn new( - signed_commitment_stream: BeefySignedCommitmentStream, + finality_proof_stream: BeefyVersionedFinalityProofStream, best_block_stream: BeefyBestBlockStream, executor: SubscriptionTaskExecutor, ) -> Result { @@ -126,20 +126,21 @@ where }); executor.spawn("substrate-rpc-subscription", Some("rpc"), future.map(drop).boxed()); - Ok(Self { signed_commitment_stream, beefy_best_block, executor }) + Ok(Self { finality_proof_stream, beefy_best_block, executor }) } } #[async_trait] -impl BeefyApiServer for Beefy +impl BeefyApiServer + for Beefy where Block: BlockT, { fn subscribe_justifications(&self, mut sink: SubscriptionSink) -> SubscriptionResult { let stream = self - .signed_commitment_stream + .finality_proof_stream .subscribe() - .map(|sc| notification::EncodedSignedCommitment::new::(sc)); + .map(|vfp| notification::EncodedVersionedFinalityProof::new::(vfp)); let fut = async move { sink.pipe_from_stream(stream).await; @@ -164,31 +165,31 @@ mod tests { use super::*; use beefy_gadget::{ - justification::BeefySignedCommitment, - notification::{BeefyBestBlockStream, BeefySignedCommitmentSender}, + justification::BeefyVersionedFinalityProof, + notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofSender}, }; - use beefy_primitives::{known_payload_ids, Payload}; + use beefy_primitives::{known_payload_ids, Payload, SignedCommitment}; use codec::{Decode, Encode}; use jsonrpsee::{types::EmptyParams, RpcModule}; use sp_runtime::traits::{BlakeTwo256, Hash}; use substrate_test_runtime_client::runtime::Block; - fn setup_io_handler() -> (RpcModule>, BeefySignedCommitmentSender) { + fn setup_io_handler() -> (RpcModule>, BeefyVersionedFinalityProofSender) { let (_, stream) = BeefyBestBlockStream::::channel(); setup_io_handler_with_best_block_stream(stream) } fn setup_io_handler_with_best_block_stream( best_block_stream: BeefyBestBlockStream, - ) -> (RpcModule>, BeefySignedCommitmentSender) { - let (commitment_sender, commitment_stream) = - BeefySignedCommitmentStream::::channel(); + ) -> (RpcModule>, BeefyVersionedFinalityProofSender) { + let (finality_proof_sender, finality_proof_stream) = + BeefyVersionedFinalityProofStream::::channel(); let handler = - Beefy::new(commitment_stream, best_block_stream, sc_rpc::testing::test_executor()) + Beefy::new(finality_proof_stream, best_block_stream, sc_rpc::testing::test_executor()) .expect("Setting up the BEEFY RPC handler works"); - (handler.into_rpc(), commitment_sender) + (handler.into_rpc(), finality_proof_sender) } #[tokio::test] @@ -262,21 +263,21 @@ mod tests { assert_eq!(response.result, expected); } - fn create_commitment() -> BeefySignedCommitment { + fn create_finality_proof() -> BeefyVersionedFinalityProof { let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); - BeefySignedCommitment:: { + BeefyVersionedFinalityProof::::V1(SignedCommitment { commitment: beefy_primitives::Commitment { payload, block_number: 5, validator_set_id: 0, }, signatures: vec![], - } + }) } #[tokio::test] async fn subscribe_and_listen_to_one_justification() { - let (rpc, commitment_sender) = setup_io_handler(); + let (rpc, finality_proof_sender) = setup_io_handler(); // Subscribe let mut sub = rpc @@ -284,16 +285,16 @@ mod tests { .await .unwrap(); - // Notify with commitment - let commitment = create_commitment(); - let r: Result<(), ()> = commitment_sender.notify(|| Ok(commitment.clone())); + // Notify with finality_proof + let finality_proof = create_finality_proof(); + let r: Result<(), ()> = finality_proof_sender.notify(|| Ok(finality_proof.clone())); r.unwrap(); // Inspect what we received let (bytes, recv_sub_id) = sub.next::().await.unwrap().unwrap(); - let recv_commitment: BeefySignedCommitment = + let recv_finality_proof: BeefyVersionedFinalityProof = Decode::decode(&mut &bytes[..]).unwrap(); assert_eq!(&recv_sub_id, sub.subscription_id()); - assert_eq!(recv_commitment, commitment); + assert_eq!(recv_finality_proof, finality_proof); } } diff --git a/client/beefy/rpc/src/notification.rs b/client/beefy/rpc/src/notification.rs index cdda667782dd5..a815425644d52 100644 --- a/client/beefy/rpc/src/notification.rs +++ b/client/beefy/rpc/src/notification.rs @@ -21,19 +21,19 @@ use serde::{Deserialize, Serialize}; use sp_runtime::traits::Block as BlockT; -/// An encoded signed commitment proving that the given header has been finalized. +/// An encoded finality proof proving that the given header has been finalized. /// The given bytes should be the SCALE-encoded representation of a -/// `beefy_primitives::SignedCommitment`. +/// `beefy_primitives::VersionedFinalityProof`. #[derive(Clone, Serialize, Deserialize)] -pub struct EncodedSignedCommitment(sp_core::Bytes); +pub struct EncodedVersionedFinalityProof(sp_core::Bytes); -impl EncodedSignedCommitment { +impl EncodedVersionedFinalityProof { pub fn new( - signed_commitment: beefy_gadget::justification::BeefySignedCommitment, + finality_proof: beefy_gadget::justification::BeefyVersionedFinalityProof, ) -> Self where Block: BlockT, { - EncodedSignedCommitment(signed_commitment.encode().into()) + EncodedVersionedFinalityProof(finality_proof.encode().into()) } } diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs index 7caeb49db5e50..129484199de89 100644 --- a/client/beefy/src/import.rs +++ b/client/beefy/src/import.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use beefy_primitives::{crypto::Signature, BeefyApi, VersionedFinalityProof, BEEFY_ENGINE_ID}; +use beefy_primitives::{BeefyApi, BEEFY_ENGINE_ID}; use codec::Encode; use log::error; use std::{collections::HashMap, sync::Arc}; @@ -34,7 +34,8 @@ use sc_client_api::backend::Backend; use sc_consensus::{BlockCheckParams, BlockImport, BlockImportParams, ImportResult}; use crate::{ - justification::decode_and_verify_commitment, notification::BeefySignedCommitmentSender, + justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, + notification::BeefyVersionedFinalityProofSender, }; /// A block-import handler for BEEFY. @@ -47,7 +48,7 @@ pub struct BeefyBlockImport { backend: Arc, runtime: Arc, inner: I, - justification_sender: BeefySignedCommitmentSender, + justification_sender: BeefyVersionedFinalityProofSender, } impl Clone for BeefyBlockImport { @@ -67,7 +68,7 @@ impl BeefyBlockImport { backend: Arc, runtime: Arc, inner: I, - justification_sender: BeefySignedCommitmentSender, + justification_sender: BeefyVersionedFinalityProofSender, ) -> BeefyBlockImport { BeefyBlockImport { backend, runtime, inner, justification_sender } } @@ -85,7 +86,7 @@ where encoded: &EncodedJustification, number: NumberFor, hash: ::Hash, - ) -> Result, Signature>, ConsensusError> { + ) -> Result, ConsensusError> { let block_id = BlockId::hash(hash); let validator_set = self .runtime @@ -94,7 +95,7 @@ where .map_err(|e| ConsensusError::ClientImport(e.to_string()))? .ok_or_else(|| ConsensusError::ClientImport("Unknown validator set".to_string()))?; - decode_and_verify_commitment::(&encoded[..], number, &validator_set) + decode_and_verify_finality_proof::(&encoded[..], number, &validator_set) } /// Import BEEFY justification: Send it to worker for processing and also append it to backend. @@ -105,7 +106,7 @@ where fn import_beefy_justification_unchecked( &self, number: NumberFor, - justification: VersionedFinalityProof, Signature>, + justification: BeefyVersionedFinalityProof, ) { // Append the justification to the block in the backend. if let Err(e) = self.backend.append_justification( @@ -115,14 +116,9 @@ where error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, justification); } // Send the justification to the BEEFY voter for processing. - match justification { - // TODO #11838: Should not unpack, these channels should also use - // `VersionedFinalityProof`. - VersionedFinalityProof::V1(signed_commitment) => self - .justification_sender - .notify(|| Ok::<_, ()>(signed_commitment)) - .expect("forwards closure result; the closure always returns Ok; qed."), - }; + self.justification_sender + .notify(|| Ok::<_, ()>(justification)) + .expect("forwards closure result; the closure always returns Ok; qed."); } } diff --git a/client/beefy/src/justification.rs b/client/beefy/src/justification.rs index 2a5191daec4b5..d9be18593dac7 100644 --- a/client/beefy/src/justification.rs +++ b/client/beefy/src/justification.rs @@ -25,17 +25,17 @@ use codec::{Decode, Encode}; use sp_consensus::Error as ConsensusError; use sp_runtime::traits::{Block as BlockT, NumberFor}; -/// A commitment with matching BEEFY authorities' signatures. -pub type BeefySignedCommitment = - beefy_primitives::SignedCommitment, beefy_primitives::crypto::Signature>; +/// A finality proof with matching BEEFY authorities' signatures. +pub type BeefyVersionedFinalityProof = + beefy_primitives::VersionedFinalityProof, Signature>; -/// Decode and verify a Beefy SignedCommitment. -pub(crate) fn decode_and_verify_commitment( +/// Decode and verify a Beefy FinalityProof. +pub(crate) fn decode_and_verify_finality_proof( encoded: &[u8], target_number: NumberFor, validator_set: &ValidatorSet, -) -> Result, Signature>, ConsensusError> { - let proof = , Signature>>::decode(&mut &*encoded) +) -> Result, ConsensusError> { + let proof = >::decode(&mut &*encoded) .map_err(|_| ConsensusError::InvalidJustification)?; verify_with_validator_set::(target_number, validator_set, &proof).map(|_| proof) } @@ -44,7 +44,7 @@ pub(crate) fn decode_and_verify_commitment( fn verify_with_validator_set( target_number: NumberFor, validator_set: &ValidatorSet, - proof: &VersionedFinalityProof, Signature>, + proof: &BeefyVersionedFinalityProof, ) -> Result<(), ConsensusError> { match proof { VersionedFinalityProof::V1(signed_commitment) => { @@ -80,17 +80,19 @@ fn verify_with_validator_set( #[cfg(test)] pub(crate) mod tests { - use beefy_primitives::{known_payload_ids, Commitment, Payload, SignedCommitment}; + use beefy_primitives::{ + known_payload_ids, Commitment, Payload, SignedCommitment, VersionedFinalityProof, + }; use substrate_test_runtime_client::runtime::Block; use super::*; use crate::{keystore::tests::Keyring, tests::make_beefy_ids}; - pub(crate) fn new_signed_commitment( + pub(crate) fn new_finality_proof( block_num: NumberFor, validator_set: &ValidatorSet, keys: &[Keyring], - ) -> BeefySignedCommitment { + ) -> BeefyVersionedFinalityProof { let commitment = Commitment { payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), block_number: block_num, @@ -98,7 +100,7 @@ pub(crate) mod tests { }; let message = commitment.encode(); let signatures = keys.iter().map(|key| Some(key.sign(&message))).collect(); - SignedCommitment { commitment, signatures } + VersionedFinalityProof::V1(SignedCommitment { commitment, signatures }) } #[test] @@ -108,7 +110,7 @@ pub(crate) mod tests { // build valid justification let block_num = 42; - let proof = new_signed_commitment(block_num, &validator_set, keys); + let proof = new_finality_proof(block_num, &validator_set, keys); let good_proof = proof.clone().into(); // should verify successfully @@ -132,7 +134,10 @@ pub(crate) mod tests { // wrong signatures length -> should fail verification let mut bad_proof = proof.clone(); // change length of signatures - bad_proof.signatures.pop().flatten().unwrap(); + let bad_signed_commitment = match bad_proof { + VersionedFinalityProof::V1(ref mut sc) => sc, + }; + bad_signed_commitment.signatures.pop().flatten().unwrap(); match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { Err(ConsensusError::InvalidJustification) => (), _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), @@ -140,8 +145,11 @@ pub(crate) mod tests { // not enough signatures -> should fail verification let mut bad_proof = proof.clone(); + let bad_signed_commitment = match bad_proof { + VersionedFinalityProof::V1(ref mut sc) => sc, + }; // remove a signature (but same length) - *bad_proof.signatures.first_mut().unwrap() = None; + *bad_signed_commitment.signatures.first_mut().unwrap() = None; match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { Err(ConsensusError::InvalidJustification) => (), _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), @@ -149,9 +157,12 @@ pub(crate) mod tests { // not enough _correct_ signatures -> should fail verification let mut bad_proof = proof.clone(); + let bad_signed_commitment = match bad_proof { + VersionedFinalityProof::V1(ref mut sc) => sc, + }; // change a signature to a different key - *bad_proof.signatures.first_mut().unwrap() = - Some(Keyring::Dave.sign(&proof.commitment.encode())); + *bad_signed_commitment.signatures.first_mut().unwrap() = + Some(Keyring::Dave.sign(&bad_signed_commitment.commitment.encode())); match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { Err(ConsensusError::InvalidJustification) => (), _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), @@ -159,19 +170,19 @@ pub(crate) mod tests { } #[test] - fn should_decode_and_verify_commitment() { + fn should_decode_and_verify_finality_proof() { let keys = &[Keyring::Alice, Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let block_num = 1; // build valid justification - let proof = new_signed_commitment(block_num, &validator_set, keys); - let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); + let proof = new_finality_proof(block_num, &validator_set, keys); + let versioned_proof: BeefyVersionedFinalityProof = proof.into(); let encoded = versioned_proof.encode(); // should successfully decode and verify let verified = - decode_and_verify_commitment::(&encoded, block_num, &validator_set).unwrap(); + decode_and_verify_finality_proof::(&encoded, block_num, &validator_set).unwrap(); assert_eq!(verified, versioned_proof); } } diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 81c72dec8cd08..bdf44e056786c 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -46,8 +46,8 @@ mod tests; use crate::{ import::BeefyBlockImport, notification::{ - BeefyBestBlockSender, BeefyBestBlockStream, BeefySignedCommitmentSender, - BeefySignedCommitmentStream, + BeefyBestBlockSender, BeefyBestBlockStream, BeefyVersionedFinalityProofSender, + BeefyVersionedFinalityProofStream, }, }; @@ -121,11 +121,11 @@ where pub struct BeefyVoterLinks { // BlockImport -> Voter links /// Stream of BEEFY signed commitments from block import to voter. - pub from_block_import_justif_stream: BeefySignedCommitmentStream, + pub from_block_import_justif_stream: BeefyVersionedFinalityProofStream, // Voter -> RPC links /// Sends BEEFY signed commitments from voter to RPC. - pub to_rpc_justif_sender: BeefySignedCommitmentSender, + pub to_rpc_justif_sender: BeefyVersionedFinalityProofSender, /// Sends BEEFY best block hashes from voter to RPC. pub to_rpc_best_block_sender: BeefyBestBlockSender, } @@ -134,7 +134,7 @@ pub struct BeefyVoterLinks { #[derive(Clone)] pub struct BeefyRPCLinks { /// Stream of signed commitments coming from the voter. - pub from_voter_justif_stream: BeefySignedCommitmentStream, + pub from_voter_justif_stream: BeefyVersionedFinalityProofStream, /// Stream of BEEFY best block hashes coming from the voter. pub from_voter_best_beefy_stream: BeefyBestBlockStream, } @@ -156,13 +156,13 @@ where { // Voter -> RPC links let (to_rpc_justif_sender, from_voter_justif_stream) = - notification::BeefySignedCommitmentStream::::channel(); + notification::BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = notification::BeefyBestBlockStream::::channel(); // BlockImport -> Voter links let (to_voter_justif_sender, from_block_import_justif_stream) = - notification::BeefySignedCommitmentStream::::channel(); + notification::BeefyVersionedFinalityProofStream::::channel(); // BlockImport let import = diff --git a/client/beefy/src/notification.rs b/client/beefy/src/notification.rs index 9479891714234..c673115e487f3 100644 --- a/client/beefy/src/notification.rs +++ b/client/beefy/src/notification.rs @@ -19,7 +19,7 @@ use sc_utils::notification::{NotificationSender, NotificationStream, TracingKeyStr}; use sp_runtime::traits::Block as BlockT; -use crate::justification::BeefySignedCommitment; +use crate::justification::BeefyVersionedFinalityProof; /// The sending half of the notifications channel(s) used to send /// notifications about best BEEFY block from the gadget side. @@ -31,13 +31,14 @@ pub type BeefyBestBlockStream = NotificationStream<::Hash, BeefyBestBlockTracingKey>; /// The sending half of the notifications channel(s) used to send notifications -/// about signed commitments generated at the end of a BEEFY round. -pub type BeefySignedCommitmentSender = NotificationSender>; +/// about versioned finality proof generated at the end of a BEEFY round. +pub type BeefyVersionedFinalityProofSender = + NotificationSender>; /// The receiving half of a notifications channel used to receive notifications -/// about signed commitments generated at the end of a BEEFY round. -pub type BeefySignedCommitmentStream = - NotificationStream, BeefySignedCommitmentTracingKey>; +/// about versioned finality proof generated at the end of a BEEFY round. +pub type BeefyVersionedFinalityProofStream = + NotificationStream, BeefyVersionedFinalityProofTracingKey>; /// Provides tracing key for BEEFY best block stream. #[derive(Clone)] @@ -46,9 +47,9 @@ impl TracingKeyStr for BeefyBestBlockTracingKey { const TRACING_KEY: &'static str = "mpsc_beefy_best_block_notification_stream"; } -/// Provides tracing key for BEEFY signed commitments stream. +/// Provides tracing key for BEEFY versioned finality proof stream. #[derive(Clone)] -pub struct BeefySignedCommitmentTracingKey; -impl TracingKeyStr for BeefySignedCommitmentTracingKey { - const TRACING_KEY: &'static str = "mpsc_beefy_signed_commitments_notification_stream"; +pub struct BeefyVersionedFinalityProofTracingKey; +impl TracingKeyStr for BeefyVersionedFinalityProofTracingKey { + const TRACING_KEY: &'static str = "mpsc_beefy_versioned_finality_proof_notification_stream"; } diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 9c8f443dd1f7e..134339009302b 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -397,17 +397,18 @@ fn run_for(duration: Duration, net: &Arc>, runtime: &mut Run pub(crate) fn get_beefy_streams( net: &mut BeefyTestNet, peers: &[BeefyKeyring], -) -> (Vec>, Vec>>) { +) -> (Vec>, Vec>>) +{ let mut best_block_streams = Vec::new(); - let mut signed_commitment_streams = Vec::new(); + let mut versioned_finality_proof_streams = Vec::new(); for peer_id in 0..peers.len() { let beefy_rpc_links = net.peer(peer_id).data.beefy_rpc_links.lock().clone().unwrap(); let BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream } = beefy_rpc_links; best_block_streams.push(from_voter_best_beefy_stream.subscribe()); - signed_commitment_streams.push(from_voter_justif_stream.subscribe()); + versioned_finality_proof_streams.push(from_voter_justif_stream.subscribe()); } - (best_block_streams, signed_commitment_streams) + (best_block_streams, versioned_finality_proof_streams) } fn wait_for_best_beefy_blocks( @@ -437,7 +438,7 @@ fn wait_for_best_beefy_blocks( } fn wait_for_beefy_signed_commitments( - streams: Vec>>, + streams: Vec>>, net: &Arc>, runtime: &mut Runtime, expected_commitment_block_nums: &[u64], @@ -446,9 +447,12 @@ fn wait_for_beefy_signed_commitments( let len = expected_commitment_block_nums.len(); streams.into_iter().for_each(|stream| { let mut expected = expected_commitment_block_nums.iter(); - wait_for.push(Box::pin(stream.take(len).for_each(move |signed_commitment| { + wait_for.push(Box::pin(stream.take(len).for_each(move |versioned_finality_proof| { let expected = expected.next(); async move { + let signed_commitment = match versioned_finality_proof { + beefy_primitives::VersionedFinalityProof::V1(sc) => sc, + }; let commitment_block_num = signed_commitment.commitment.block_number; assert_eq!(expected, Some(commitment_block_num).as_ref()); // TODO: also verify commitment payload, validator set id, and signatures. @@ -486,7 +490,7 @@ fn finalize_block_and_wait_for_beefy( finalize_targets: &[u64], expected_beefy: &[u64], ) { - let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); for block in finalize_targets { let finalize = BlockId::number(*block); @@ -499,11 +503,11 @@ fn finalize_block_and_wait_for_beefy( // run for quarter second then verify no new best beefy block available let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, runtime, None); } else { // run until expected beefy blocks are received wait_for_best_beefy_blocks(best_blocks, &net, runtime, expected_beefy); - wait_for_beefy_signed_commitments(signed_commitments, &net, runtime, expected_beefy); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, runtime, expected_beefy); } } @@ -574,19 +578,19 @@ fn lagging_validators() { // Alice finalizes #25, Bob lags behind let finalize = BlockId::number(25); - let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // Bob catches up and also finalizes #25 - let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); - wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[23, 24, 25]); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[30, 32], &[30, 31, 32]); @@ -596,20 +600,20 @@ fn lagging_validators() { // validator set). // Alice finalizes session-boundary mandatory block #60, Bob lags behind - let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); let finalize = BlockId::number(60); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) - let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // verify beefy skips intermediary votes, and successfully finalizes mandatory block #40 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); - wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[60]); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[60]); } #[test] @@ -647,7 +651,7 @@ fn correct_beefy_payload() { // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[1, 9]); - let (best_blocks, signed_commitments) = + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); // now 2 good validators and 1 bad one are voting @@ -673,10 +677,10 @@ fn correct_beefy_payload() { // verify consensus is _not_ reached let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // 3rd good validator catches up and votes as well - let (best_blocks, signed_commitments) = + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); net.lock() .peer(2) @@ -687,7 +691,7 @@ fn correct_beefy_payload() { // verify consensus is reached wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[11]); - wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[11]); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[11]); } #[test] @@ -746,7 +750,7 @@ fn beefy_importing_blocks() { let block_num = 2; let keys = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let proof = crate::justification::tests::new_signed_commitment(block_num, &validator_set, keys); + let proof = crate::justification::tests::new_finality_proof(block_num, &validator_set, keys); let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); let encoded = versioned_proof.encode(); let justif = Some(Justifications::from((BEEFY_ENGINE_ID, encoded))); @@ -781,7 +785,7 @@ fn beefy_importing_blocks() { let block_num = 3; let keys = &[BeefyKeyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - let proof = crate::justification::tests::new_signed_commitment(block_num, &validator_set, keys); + let proof = crate::justification::tests::new_finality_proof(block_num, &validator_set, keys); let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); let encoded = versioned_proof.encode(); let justif = Some(Justifications::from((BEEFY_ENGINE_ID, encoded))); diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 3bff0822ebdb4..2c4985c0e6966 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -49,7 +49,7 @@ use beefy_primitives::{ use crate::{ error::Error, gossip::{topic, GossipValidator}, - justification::BeefySignedCommitment, + justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metric_inc, metric_set, metrics::Metrics, @@ -212,7 +212,7 @@ pub(crate) struct BeefyWorker { /// Buffer holding votes for future processing. pending_votes: BTreeMap, Vec, AuthorityId, Signature>>>, /// Buffer holding justifications for future processing. - pending_justifications: BTreeMap, Vec>>, + pending_justifications: BTreeMap, Vec>>, /// Chooses which incoming votes to accept and which votes to generate. voting_oracle: VoterOracle, } @@ -381,9 +381,12 @@ where /// Expects `justification` to be valid. fn triage_incoming_justif( &mut self, - justification: BeefySignedCommitment, + justification: BeefyVersionedFinalityProof, ) -> Result<(), Error> { - let block_num = justification.commitment.block_number; + let signed_commitment = match justification { + VersionedFinalityProof::V1(ref sc) => sc, + }; + let block_num = signed_commitment.commitment.block_number; let best_grandpa = *self.best_grandpa_block_header.number(); match self.voting_oracle.triage_round(block_num, best_grandpa)? { RoundAction::Process => self.finalize(justification), @@ -417,39 +420,39 @@ where validator_set_id: rounds.validator_set_id(), }; - let signed_commitment = SignedCommitment { commitment, signatures }; + let finality_proof = + VersionedFinalityProof::V1(SignedCommitment { commitment, signatures }); metric_set!(self, beefy_round_concluded, block_num); - info!(target: "beefy", "🥩 Round #{} concluded, committed: {:?}.", round.1, signed_commitment); + info!(target: "beefy", "🥩 Round #{} concluded, finality_proof: {:?}.", round.1, finality_proof); if let Err(e) = self.backend.append_justification( BlockId::Number(block_num), - ( - BEEFY_ENGINE_ID, - VersionedFinalityProof::V1(signed_commitment.clone()).encode(), - ), + (BEEFY_ENGINE_ID, finality_proof.clone().encode()), ) { - debug!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, signed_commitment); + debug!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); } - // We created the `signed_commitment` and know to be valid. - self.finalize(signed_commitment); + // We created the `finality_proof` and know to be valid. + self.finalize(finality_proof); } } Ok(()) } - /// Provide BEEFY finality for block based on `signed_commitment`: + /// Provide BEEFY finality for block based on `finality_proof`: /// 1. Prune irrelevant past sessions from the oracle, /// 2. Set BEEFY best block, - /// 3. Send best block hash and `signed_commitment` to RPC worker. + /// 3. Send best block hash and `finality_proof` to RPC worker. /// - /// Expects `signed commitment` to be valid. - fn finalize(&mut self, signed_commitment: BeefySignedCommitment) { + /// Expects `finality proof` to be valid. + fn finalize(&mut self, finality_proof: BeefyVersionedFinalityProof) { // Prune any now "finalized" sessions from queue. self.voting_oracle.try_prune(); - + let signed_commitment = match finality_proof { + VersionedFinalityProof::V1(ref sc) => sc, + }; let block_num = signed_commitment.commitment.block_number; if Some(block_num) > self.best_beefy_block { // Set new best BEEFY block number. @@ -465,7 +468,7 @@ where self.links .to_rpc_justif_sender - .notify(|| Ok::<_, ()>(signed_commitment)) + .notify(|| Ok::<_, ()>(finality_proof)) .expect("forwards closure result; the closure always returns Ok; qed."); } else { debug!(target: "beefy", "🥩 Can't set best beefy to older: {}", block_num); @@ -832,7 +835,7 @@ pub(crate) mod tests { use super::*; use crate::{ keystore::tests::Keyring, - notification::{BeefyBestBlockStream, BeefySignedCommitmentStream}, + notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, tests::{ create_beefy_keystore, get_beefy_streams, make_beefy_ids, two_validators::TestApi, BeefyPeer, BeefyTestNet, BEEFY_PROTOCOL_NAME, @@ -859,10 +862,11 @@ pub(crate) mod tests { let keystore = create_beefy_keystore(*key); let (to_rpc_justif_sender, from_voter_justif_stream) = - BeefySignedCommitmentStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = BeefyBestBlockStream::::channel(); - let (_, from_block_import_justif_stream) = BeefySignedCommitmentStream::::channel(); + let (_, from_block_import_justif_stream) = + BeefyVersionedFinalityProofStream::::channel(); let beefy_rpc_links = BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream }; @@ -1168,37 +1172,37 @@ pub(crate) mod tests { let mut net = BeefyTestNet::new(1, 0); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - let (mut best_block_streams, mut signed_commitments) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - let mut signed_commitments = signed_commitments.drain(..).next().unwrap(); + let mut finality_proof = finality_proofs.drain(..).next().unwrap(); - let create_signed_commitment = |block_num: NumberFor| { + let create_finality_proof = |block_num: NumberFor| { let commitment = Commitment { payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), block_number: block_num, validator_set_id: validator_set.id(), }; - SignedCommitment { commitment, signatures: vec![None] } + VersionedFinalityProof::V1(SignedCommitment { commitment, signatures: vec![None] }) }; - // no 'best beefy block' or signed commitments + // no 'best beefy block' or finality proofs assert_eq!(worker.best_beefy_block, None); block_on(poll_fn(move |cx| { assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); - assert_eq!(signed_commitments.poll_next_unpin(cx), Poll::Pending); + assert_eq!(finality_proof.poll_next_unpin(cx), Poll::Pending); Poll::Ready(()) })); // unknown hash for block #1 - let (mut best_block_streams, mut signed_commitments) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - let mut signed_commitments = signed_commitments.drain(..).next().unwrap(); - let justif = create_signed_commitment(1); + let mut finality_proof = finality_proofs.drain(..).next().unwrap(); + let justif = create_finality_proof(1); worker.finalize(justif.clone()); assert_eq!(worker.best_beefy_block, Some(1)); block_on(poll_fn(move |cx| { assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); - match signed_commitments.poll_next_unpin(cx) { + match finality_proof.poll_next_unpin(cx) { // expect justification Poll::Ready(Some(received)) => assert_eq!(received, justif), v => panic!("unexpected value: {:?}", v), @@ -1211,7 +1215,7 @@ pub(crate) mod tests { let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); net.generate_blocks(2, 10, &validator_set, false); - let justif = create_signed_commitment(2); + let justif = create_finality_proof(2); worker.finalize(justif); assert_eq!(worker.best_beefy_block, Some(2)); block_on(poll_fn(move |cx| { From 9ece574bd1472275eed212acf8c69ae49c38f57a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 4 Aug 2022 11:34:19 +0200 Subject: [PATCH 104/135] Timestamp: `set_timestamp` sets `DidUpdate` (#11960) * Timestamp: `set_timestamp` sets `DidUpdate` There exists the `set_timestamp` in the Timestamp pallet for setting the current timestamp. The problem is that it doesn't set `DidUpdate`. This results in `on_finalize` panicking. There is no real reason why the function doesn't also set `DidUpdate`. * Update frame/timestamp/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Fix Babe tests Co-authored-by: Oliver Tale-Yazdi --- frame/babe/src/mock.rs | 2 +- frame/babe/src/tests.rs | 2 +- frame/timestamp/src/lib.rs | 2 ++ frame/timestamp/src/tests.rs | 5 ++--- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 5677eb7e28e49..c2ba3c2be06d8 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -429,7 +429,7 @@ pub fn generate_equivocation_proof( System::reset_events(); System::initialize(¤t_block, &parent_hash, &pre_digest); System::set_block_number(current_block); - Timestamp::set_timestamp(current_block); + Timestamp::set_timestamp(*current_slot * Babe::slot_duration()); System::finalize() }; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 0859bb7a40849..ece0883387709 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -659,7 +659,7 @@ fn report_equivocation_invalid_equivocation_proof() { equivocation_proof.second_header = equivocation_proof.first_header.clone(); assert_invalid_equivocation(equivocation_proof); - // missing preruntime digest from one header + // missing pre-runtime digest from one header let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 81ed67913c2e6..6a7f849d1329a 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -282,6 +282,8 @@ impl Pallet { #[cfg(any(feature = "runtime-benchmarks", feature = "std"))] pub fn set_timestamp(now: T::Moment) { Now::::put(now); + DidUpdate::::put(true); + >::on_timestamp_set(now); } } diff --git a/frame/timestamp/src/tests.rs b/frame/timestamp/src/tests.rs index f52ba7849c951..ef9fd6e39d4b5 100644 --- a/frame/timestamp/src/tests.rs +++ b/frame/timestamp/src/tests.rs @@ -23,7 +23,7 @@ use frame_support::assert_ok; #[test] fn timestamp_works() { new_test_ext().execute_with(|| { - Timestamp::set_timestamp(42); + crate::Now::::put(46); assert_ok!(Timestamp::set(Origin::none(), 69)); assert_eq!(Timestamp::now(), 69); assert_eq!(Some(69), get_captured_moment()); @@ -36,7 +36,6 @@ fn double_timestamp_should_fail() { new_test_ext().execute_with(|| { Timestamp::set_timestamp(42); assert_ok!(Timestamp::set(Origin::none(), 69)); - let _ = Timestamp::set(Origin::none(), 70); }); } @@ -46,7 +45,7 @@ fn double_timestamp_should_fail() { )] fn block_period_minimum_enforced() { new_test_ext().execute_with(|| { - Timestamp::set_timestamp(42); + crate::Now::::put(44); let _ = Timestamp::set(Origin::none(), 46); }); } From d09f5d92677c460bbb8b9b3e084bd9f3c1ffaf4c Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 4 Aug 2022 22:57:05 +0200 Subject: [PATCH 105/135] Prevent duplicated leaves in the backend (#11941) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Prevent duplicated leaves in the backend * Comments... * Use highest known heaf as a shortcut for not existing header detection * Apply code review suggestion Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- client/db/src/lib.rs | 44 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index e14d3a26aa557..0ce408a39b004 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1345,8 +1345,15 @@ impl Backend { let parent_hash = *pending_block.header.parent_hash(); let number = *pending_block.header.number(); + let highest_leaf = self + .blockchain + .leaves + .read() + .highest_leaf() + .map(|(n, _)| n) + .unwrap_or(Zero::zero()); let existing_header = - number <= best_num && self.blockchain.header(BlockId::hash(hash))?.is_some(); + number <= highest_leaf && self.blockchain.header(BlockId::hash(hash))?.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -3534,7 +3541,24 @@ pub(crate) mod tests { header.hash() }; - let block3_fork = insert_header_no_head(&backend, 3, block2, Default::default()); + let block3_fork = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + let header = Header { + number: 3, + parent_hash: block2, + state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), + digest: Default::default(), + extrinsics_root: H256::from_low_u64_le(42), + }; + + op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + header.hash() + }; assert!(backend.have_state_at(&block1, 1)); assert!(backend.have_state_at(&block2, 2)); @@ -3556,4 +3580,20 @@ pub(crate) mod tests { assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]); assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0); } + + #[test] + fn test_no_duplicated_leaves_allowed() { + let backend: Backend = Backend::new_test(10, 10); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + // Add block 2 not as the best block + let block2 = insert_header_no_head(&backend, 2, block1, Default::default()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); + assert_eq!(backend.blockchain().info().best_hash, block1); + + // Add block 2 as the best block + let block2 = insert_header(&backend, 2, block1, None, Default::default()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); + assert_eq!(backend.blockchain().info().best_hash, block2); + } } From 6527f0cc6e14f750d83a37f468e65a17d4088854 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 5 Aug 2022 09:50:57 +0300 Subject: [PATCH 106/135] Change on-the-wire protocol names to include genesis hash & fork id (#11938) * Rename transactions protocol to include genesis hash * Add protocol name generation to sc_network::utils * Use utils functions for transactions protocol name generation * Extract protocol name generation into public module * Use sc_network::protocol_name::standard_protocol_name() for BEEFY and GRANDPA * minor: add missing newline at EOF * Change block-announces protocol name to include genesis_hash & fork_id * Change protocol names to include genesis hash and fork id Protocols changed: - sync - state - light - sync/warp * Revert "Use sc_network::protocol_name::standard_protocol_name() for BEEFY and GRANDPA" This reverts commit 29aa556ef947e2cfd48264db2a9fc8bc528d7ddb. * Get rid of `protocol_name` module --- Cargo.lock | 2 + .../network/common/src/request_responses.rs | 3 ++ client/network/light/Cargo.toml | 1 + .../light/src/light_client_requests.rs | 23 +++++++++-- .../src/light_client_requests/handler.rs | 20 ++++++++-- client/network/src/config.rs | 6 ++- client/network/src/protocol.rs | 18 +++++++-- client/network/src/request_responses.rs | 10 ++++- client/network/src/service.rs | 13 +++++- client/network/src/service/tests.rs | 11 +++-- client/network/src/transactions.rs | 21 ++++++++-- client/network/sync/Cargo.toml | 1 + .../network/sync/src/block_request_handler.rs | 34 +++++++++++++--- .../network/sync/src/state_request_handler.rs | 40 +++++++++++++++---- .../network/sync/src/warp_request_handler.rs | 30 +++++++++++--- client/network/test/src/lib.rs | 21 +++++++--- client/service/src/builder.rs | 22 ++++++++-- 17 files changed, 226 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec5c5267142a9..703eeffce54a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8435,6 +8435,7 @@ name = "sc-network-light" version = "0.10.0-dev" dependencies = [ "futures", + "hex", "libp2p", "log", "parity-scale-codec", @@ -8455,6 +8456,7 @@ version = "0.10.0-dev" dependencies = [ "fork-tree", "futures", + "hex", "libp2p", "log", "lru", diff --git a/client/network/common/src/request_responses.rs b/client/network/common/src/request_responses.rs index 71570e6beb864..f409d1ac16d61 100644 --- a/client/network/common/src/request_responses.rs +++ b/client/network/common/src/request_responses.rs @@ -29,6 +29,9 @@ pub struct ProtocolConfig { /// Name of the protocol on the wire. Should be something like `/foo/bar`. pub name: Cow<'static, str>, + /// Fallback on the wire protocol names to support. + pub fallback_names: Vec>, + /// Maximum allowed size, in bytes, of a request. /// /// Any request larger than this value will be declined as a way to avoid allocating too diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index 0037177fb4046..c1a0fb4759320 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -21,6 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" +hex = "0.4.0" libp2p = "0.46.1" log = "0.4.16" prost = "0.10" diff --git a/client/network/light/src/light_client_requests.rs b/client/network/light/src/light_client_requests.rs index 9eccef41e833d..b58426cf15992 100644 --- a/client/network/light/src/light_client_requests.rs +++ b/client/network/light/src/light_client_requests.rs @@ -25,16 +25,31 @@ use sc_network_common::{config::ProtocolId, request_responses::ProtocolConfig}; use std::time::Duration; -/// Generate the light client protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the light client protocol name from the genesis hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/light/2", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/light/2", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy light client protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/light/2", protocol_id.as_ref()) } /// Generates a [`ProtocolConfig`] for the light client request protocol, refusing incoming /// requests. -pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { +pub fn generate_protocol_config>( + protocol_id: &ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 1 * 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(15), diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 3c87ccfd6ed9f..727a9b0d7e820 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -28,7 +28,7 @@ use futures::{channel::mpsc, prelude::*}; use libp2p::PeerId; use log::{debug, trace}; use prost::Message; -use sc_client_api::{ProofProvider, StorageProof}; +use sc_client_api::{BlockBackend, ProofProvider, StorageProof}; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, @@ -54,15 +54,27 @@ pub struct LightClientRequestHandler { impl LightClientRequestHandler where B: Block, - Client: ProofProvider + Send + Sync + 'static, + Client: BlockBackend + ProofProvider + Send + Sync + 'static, { /// Create a new [`LightClientRequestHandler`]. - pub fn new(protocol_id: &ProtocolId, client: Arc) -> (Self, ProtocolConfig) { + pub fn new( + protocol_id: &ProtocolId, + fork_id: Option<&str>, + client: Arc, + ) -> (Self, ProtocolConfig) { // For now due to lack of data on light client request handling in production systems, this // value is chosen to match the block request limit. let (tx, request_receiver) = mpsc::channel(20); - let mut protocol_config = super::generate_protocol_config(protocol_id); + let mut protocol_config = super::generate_protocol_config( + protocol_id, + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + fork_id, + ); protocol_config.inbound_queue = Some(tx); (Self { client, request_receiver, _block: PhantomData::default() }, protocol_config) diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 2622762da5fc9..afd61cae6c83b 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -87,9 +87,13 @@ where /// the network. pub transaction_pool: Arc>, - /// Name of the protocol to use on the wire. Should be different for each chain. + /// Legacy name of the protocol to use on the wire. Should be different for each chain. pub protocol_id: ProtocolId, + /// Fork ID to distinguish protocols of different hard forks. Part of the standard protocol + /// name on the wire. + pub fork_id: Option, + /// Import queue to use. /// /// The import queue is the component that verifies that blocks received from other nodes are diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 1c933fabcbb5d..a06fff2322ce6 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -276,6 +276,7 @@ where roles: Roles, chain: Arc, protocol_id: ProtocolId, + fork_id: &Option, network_config: &config::NetworkConfiguration, notifications_protocols_handshakes: Vec>, metrics_registry: Option<&Registry>, @@ -371,8 +372,17 @@ where sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets }) }; - let block_announces_protocol: Cow<'static, str> = - format!("/{}/block-announces/1", protocol_id.as_ref()).into(); + let block_announces_protocol = { + let genesis_hash = + chain.block_hash(0u32.into()).ok().flatten().expect("Genesis block exists; qed"); + if let Some(fork_id) = fork_id { + format!("/{}/{}/block-announces/1", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/block-announces/1", hex::encode(genesis_hash)) + } + }; + + let legacy_ba_protocol_name = format!("/{}/block-announces/1", protocol_id.as_ref()); let behaviour = { let best_number = info.best_number; @@ -384,8 +394,8 @@ where .encode(); let sync_protocol_config = notifications::ProtocolConfig { - name: block_announces_protocol, - fallback_names: Vec::new(), + name: block_announces_protocol.into(), + fallback_names: iter::once(legacy_ba_protocol_name.into()).collect(), handshake: block_announces_handshake, max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, }; diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index cec4aa2a07fba..0d8c6c33b1c95 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -220,7 +220,9 @@ impl RequestResponsesBehaviour { max_request_size: protocol.max_request_size, max_response_size: protocol.max_response_size, }, - iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), + iter::once(protocol.name.as_bytes().to_vec()) + .chain(protocol.fallback_names.iter().map(|name| name.as_bytes().to_vec())) + .zip(iter::repeat(protocol_support)), cfg, ); @@ -1027,6 +1029,7 @@ mod tests { let protocol_config = ProtocolConfig { name: From::from(protocol_name), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1127,6 +1130,7 @@ mod tests { let protocol_config = ProtocolConfig { name: From::from(protocol_name), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 8, // <-- important for the test request_timeout: Duration::from_secs(30), @@ -1223,6 +1227,7 @@ mod tests { let protocol_configs = vec![ ProtocolConfig { name: From::from(protocol_name_1), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1230,6 +1235,7 @@ mod tests { }, ProtocolConfig { name: From::from(protocol_name_2), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1247,6 +1253,7 @@ mod tests { let protocol_configs = vec![ ProtocolConfig { name: From::from(protocol_name_1), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1254,6 +1261,7 @@ mod tests { }, ProtocolConfig { name: From::from(protocol_name_2), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 409ed88c75c00..1210b0ca64224 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -224,8 +224,16 @@ where fs::create_dir_all(path)?; } - let transactions_handler_proto = - transactions::TransactionsHandlerPrototype::new(params.protocol_id.clone()); + let transactions_handler_proto = transactions::TransactionsHandlerPrototype::new( + params.protocol_id.clone(), + params + .chain + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + params.fork_id.clone(), + ); params .network_config .extra_sets @@ -243,6 +251,7 @@ where From::from(¶ms.role), params.chain.clone(), params.protocol_id.clone(), + ¶ms.fork_id, ¶ms.network_config, iter::once(Vec::new()) .chain( diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index de474ee8fe4d0..f757bf4891fbc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -92,21 +92,25 @@ fn build_test_full_node( let protocol_id = ProtocolId::from("/test-protocol-name"); + let fork_id = Some(String::from("test-fork-id")); + let block_request_protocol_config = { - let (handler, protocol_config) = BlockRequestHandler::new(&protocol_id, client.clone(), 50); + let (handler, protocol_config) = + BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); async_std::task::spawn(handler.run().boxed()); protocol_config }; let state_request_protocol_config = { - let (handler, protocol_config) = StateRequestHandler::new(&protocol_id, client.clone(), 50); + let (handler, protocol_config) = + StateRequestHandler::new(&protocol_id, None, client.clone(), 50); async_std::task::spawn(handler.run().boxed()); protocol_config }; let light_client_request_protocol_config = { let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, client.clone()); + LightClientRequestHandler::new(&protocol_id, None, client.clone()); async_std::task::spawn(handler.run().boxed()); protocol_config }; @@ -134,6 +138,7 @@ fn build_test_full_node( chain: client.clone(), transaction_pool: Arc::new(config::EmptyTransactionPool), protocol_id, + fork_id, import_queue, chain_sync: Box::new(chain_sync), metrics_registry: None, diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 043bdeff7ebfc..342b6a0430272 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -127,19 +127,34 @@ impl Future for PendingTransaction { /// Prototype for a [`TransactionsHandler`]. pub struct TransactionsHandlerPrototype { protocol_name: Cow<'static, str>, + fallback_protocol_names: Vec>, } impl TransactionsHandlerPrototype { /// Create a new instance. - pub fn new(protocol_id: ProtocolId) -> Self { - Self { protocol_name: format!("/{}/transactions/1", protocol_id.as_ref()).into() } + pub fn new>( + protocol_id: ProtocolId, + genesis_hash: Hash, + fork_id: Option, + ) -> Self { + let protocol_name = if let Some(fork_id) = fork_id { + format!("/{}/{}/transactions/1", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/transactions/1", hex::encode(genesis_hash)) + }; + let legacy_protocol_name = format!("/{}/transactions/1", protocol_id.as_ref()); + + Self { + protocol_name: protocol_name.into(), + fallback_protocol_names: iter::once(legacy_protocol_name.into()).collect(), + } } /// Returns the configuration of the set to put in the network configuration. pub fn set_config(&self) -> config::NonDefaultSetConfig { config::NonDefaultSetConfig { notifications_protocol: self.protocol_name.clone(), - fallback_names: Vec::new(), + fallback_names: self.fallback_protocol_names.clone(), max_notification_size: MAX_TRANSACTIONS_SIZE, set_config: config::SetConfig { in_peers: 0, diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 3e3526146400a..7c8f8adafd214 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -21,6 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" +hex = "0.4.0" libp2p = "0.46.1" log = "0.4.17" lru = "0.7.5" diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 30cbb32289d66..f4f10ac73c8d9 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -62,9 +62,15 @@ mod rep { } /// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. -pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { +pub fn generate_protocol_config>( + protocol_id: &ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(20), @@ -72,8 +78,17 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { } } -/// Generate the block protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the block protocol name from the genesis hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/sync/2", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/sync/2", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy block protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/sync/2", protocol_id.as_ref()) } @@ -129,6 +144,7 @@ where /// Create a new [`BlockRequestHandler`]. pub fn new( protocol_id: &ProtocolId, + fork_id: Option<&str>, client: Arc, num_peer_hint: usize, ) -> (Self, ProtocolConfig) { @@ -136,7 +152,15 @@ where // number of peers. let (tx, request_receiver) = mpsc::channel(num_peer_hint); - let mut protocol_config = generate_protocol_config(protocol_id); + let mut protocol_config = generate_protocol_config( + protocol_id, + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + fork_id, + ); protocol_config.inbound_queue = Some(tx); let seen_requests = LruCache::new(num_peer_hint * 2); diff --git a/client/network/sync/src/state_request_handler.rs b/client/network/sync/src/state_request_handler.rs index 8e0bae14046da..6cf6482a44f8b 100644 --- a/client/network/sync/src/state_request_handler.rs +++ b/client/network/sync/src/state_request_handler.rs @@ -27,7 +27,7 @@ use libp2p::PeerId; use log::{debug, trace}; use lru::LruCache; use prost::Message; -use sc_client_api::ProofProvider; +use sc_client_api::{BlockBackend, ProofProvider}; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, @@ -50,10 +50,16 @@ mod rep { pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times"); } -/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. -pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { +/// Generates a [`ProtocolConfig`] for the state request protocol, refusing incoming requests. +pub fn generate_protocol_config>( + protocol_id: &ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(40), @@ -61,8 +67,17 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { } } -/// Generate the state protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the state protocol name from the genesis hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/state/2", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/state/2", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy state protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/state/2", protocol_id.as_ref()) } @@ -104,11 +119,12 @@ pub struct StateRequestHandler { impl StateRequestHandler where B: BlockT, - Client: ProofProvider + Send + Sync + 'static, + Client: BlockBackend + ProofProvider + Send + Sync + 'static, { /// Create a new [`StateRequestHandler`]. pub fn new( protocol_id: &ProtocolId, + fork_id: Option<&str>, client: Arc, num_peer_hint: usize, ) -> (Self, ProtocolConfig) { @@ -116,7 +132,15 @@ where // number of peers. let (tx, request_receiver) = mpsc::channel(num_peer_hint); - let mut protocol_config = generate_protocol_config(protocol_id); + let mut protocol_config = generate_protocol_config( + protocol_id, + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + fork_id, + ); protocol_config.inbound_queue = Some(tx); let seen_requests = LruCache::new(num_peer_hint * 2); diff --git a/client/network/sync/src/warp_request_handler.rs b/client/network/sync/src/warp_request_handler.rs index 53ec216a1e668..394bc68449099 100644 --- a/client/network/sync/src/warp_request_handler.rs +++ b/client/network/sync/src/warp_request_handler.rs @@ -36,9 +36,15 @@ const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024; /// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing /// incoming requests. -pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig { +pub fn generate_request_response_config>( + protocol_id: ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> RequestResponseConfig { RequestResponseConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 32, max_response_size: MAX_RESPONSE_SIZE, request_timeout: Duration::from_secs(10), @@ -46,8 +52,17 @@ pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestRespo } } -/// Generate the grandpa warp sync protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: ProtocolId) -> String { +/// Generate the grandpa warp sync protocol name from the genesi hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/sync/warp", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/sync/warp", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy grandpa warp sync protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: ProtocolId) -> String { format!("/{}/sync/warp", protocol_id.as_ref()) } @@ -59,13 +74,16 @@ pub struct RequestHandler { impl RequestHandler { /// Create a new [`RequestHandler`]. - pub fn new( + pub fn new>( protocol_id: ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, backend: Arc>, ) -> (Self, RequestResponseConfig) { let (tx, request_receiver) = mpsc::channel(20); - let mut request_response_config = generate_request_response_config(protocol_id); + let mut request_response_config = + generate_request_response_config(protocol_id, genesis_hash, fork_id); request_response_config.inbound_queue = Some(tx); (Self { backend, request_receiver }, request_response_config) diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 4659684987f77..494ff2048f6c4 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -808,23 +808,25 @@ where let protocol_id = ProtocolId::from("test-protocol-name"); + let fork_id = Some(String::from("test-fork-id")); + let block_request_protocol_config = { let (handler, protocol_config) = - BlockRequestHandler::new(&protocol_id, client.clone(), 50); + BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); self.spawn_task(handler.run().boxed()); protocol_config }; let state_request_protocol_config = { let (handler, protocol_config) = - StateRequestHandler::new(&protocol_id, client.clone(), 50); + StateRequestHandler::new(&protocol_id, None, client.clone(), 50); self.spawn_task(handler.run().boxed()); protocol_config }; let light_client_request_protocol_config = { let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, client.clone()); + LightClientRequestHandler::new(&protocol_id, None, client.clone()); self.spawn_task(handler.run().boxed()); protocol_config }; @@ -832,8 +834,16 @@ where let warp_sync = Arc::new(TestWarpSyncProvider(client.clone())); let warp_protocol_config = { - let (handler, protocol_config) = - warp_request_handler::RequestHandler::new(protocol_id.clone(), warp_sync.clone()); + let (handler, protocol_config) = warp_request_handler::RequestHandler::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + None, + warp_sync.clone(), + ); self.spawn_task(handler.run().boxed()); protocol_config }; @@ -867,6 +877,7 @@ where chain: client.clone(), transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, + fork_id, import_queue, chain_sync: Box::new(chain_sync), metrics_registry: None, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index ec537a33b72d5..998ec12c1d05b 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -741,6 +741,7 @@ where // Allow both outgoing and incoming requests. let (handler, protocol_config) = BlockRequestHandler::new( &protocol_id, + config.chain_spec.fork_id(), client.clone(), config.network.default_peers_set.in_peers as usize + config.network.default_peers_set.out_peers as usize, @@ -753,6 +754,7 @@ where // Allow both outgoing and incoming requests. let (handler, protocol_config) = StateRequestHandler::new( &protocol_id, + config.chain_spec.fork_id(), client.clone(), config.network.default_peers_set_num_full as usize, ); @@ -763,8 +765,16 @@ where let (warp_sync_provider, warp_sync_protocol_config) = warp_sync .map(|provider| { // Allow both outgoing and incoming requests. - let (handler, protocol_config) = - WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone()); + let (handler, protocol_config) = WarpSyncRequestHandler::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + config.chain_spec.fork_id(), + provider.clone(), + ); spawn_handle.spawn("warp-sync-request-handler", Some("networking"), handler.run()); (Some(provider), Some(protocol_config)) }) @@ -772,8 +782,11 @@ where let light_client_request_protocol_config = { // Allow both outgoing and incoming requests. - let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, client.clone()); + let (handler, protocol_config) = LightClientRequestHandler::new( + &protocol_id, + config.chain_spec.fork_id(), + client.clone(), + ); spawn_handle.spawn("light-client-request-handler", Some("networking"), handler.run()); protocol_config }; @@ -808,6 +821,7 @@ where chain: client.clone(), transaction_pool: transaction_pool_adapter as _, protocol_id, + fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), From f64cd4fa987473ad1d05f409afe886d4a620782b Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 5 Aug 2022 12:48:05 +0300 Subject: [PATCH 107/135] `sync` protocol now can have negotiated fallback name (#11982) --- client/network/src/protocol.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index a06fff2322ce6..c51667017d15c 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1577,8 +1577,6 @@ where } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { - debug_assert!(negotiated_fallback.is_none()); - // `received_handshake` can be either a `Status` message if received from the // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block // announces substream. From c172d0f683fab3792b90d876fd6ca27056af9fe9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 5 Aug 2022 19:53:56 +0200 Subject: [PATCH 108/135] system_syncState: Always return highest block (#11979) Before `highestBlock` was an optional that was omitted when it was `None`. We recently changed the way the `highestBlock` is determined, this resulted in having this value in 99.99% of the time being `None` when the node is syncing blocks at the tip. Now we always return a block for `highestBlock`. If sync doesn't return us any best seen block, we return our own local best block as `highestBlock`. This should mainly reflect the same behavior to before we changed the way the best seen block is determined. --- client/rpc-api/src/system/helpers.rs | 12 ++++++------ client/rpc/src/system/tests.rs | 7 ++----- client/service/src/lib.rs | 6 ++++-- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index 4561fccc1e81b..7ddb3f813c249 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -88,10 +88,10 @@ pub struct SyncState { pub starting_block: Number, /// Height of the current best block of the node. pub current_block: Number, - /// Height of the highest block learned from the network. Missing if no block is known yet. - #[serde(default = "Default::default", skip_serializing_if = "Option::is_none")] - pub highest_block: Option, + /// Height of the highest block in the network. + pub highest_block: Number, } + #[cfg(test)] mod tests { use super::*; @@ -129,7 +129,7 @@ mod tests { ::serde_json::to_string(&SyncState { starting_block: 12u32, current_block: 50u32, - highest_block: Some(128u32), + highest_block: 128u32, }) .unwrap(), r#"{"startingBlock":12,"currentBlock":50,"highestBlock":128}"#, @@ -139,10 +139,10 @@ mod tests { ::serde_json::to_string(&SyncState { starting_block: 12u32, current_block: 50u32, - highest_block: None, + highest_block: 50u32, }) .unwrap(), - r#"{"startingBlock":12,"currentBlock":50}"#, + r#"{"startingBlock":12,"currentBlock":50,"highestBlock":50}"#, ); } } diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 77acdf8418ccc..facad7a0b347a 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -123,7 +123,7 @@ fn api>>(sync: T) -> RpcModule> { let _ = sender.send(SyncState { starting_block: 1, current_block: 2, - highest_block: Some(3), + highest_block: 3, }); }, }; @@ -297,10 +297,7 @@ async fn system_node_roles() { async fn system_sync_state() { let sync_state: SyncState = api(None).call("system_syncState", EmptyParams::new()).await.unwrap(); - assert_eq!( - sync_state, - SyncState { starting_block: 1, current_block: 2, highest_block: Some(3) } - ); + assert_eq!(sync_state, SyncState { starting_block: 1, current_block: 2, highest_block: 3 }); } #[tokio::test] diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 98bcb17174157..1de45e5527eec 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -264,10 +264,12 @@ async fn build_network_future< sc_rpc::system::Request::SyncState(sender) => { use sc_rpc::system::SyncState; + let best_number = client.info().best_number; + let _ = sender.send(SyncState { starting_block, - current_block: client.info().best_number, - highest_block: network.best_seen_block(), + current_block: best_number, + highest_block: network.best_seen_block().unwrap_or(best_number), }); } } From 8574647f2a3df620ef32eddb050bfe2adb34aeab Mon Sep 17 00:00:00 2001 From: Nikos Kontakis Date: Mon, 8 Aug 2022 11:31:26 +0200 Subject: [PATCH 109/135] Rename --pruning and --keep-blocks to be more similar to one another (#11934) * rename prunning and keep-blocks flags * Add aliases in keep-blocks and pruning for backward compatibility * Rename in code variables from and to and --- bin/node/cli/benches/block_production.rs | 4 +- bin/node/cli/benches/transaction_pool.rs | 4 +- bin/node/testing/src/bench.rs | 2 +- client/cli/src/commands/chain_info_cmd.rs | 2 +- client/cli/src/config.rs | 12 +++--- client/cli/src/params/pruning_params.rs | 22 +++++------ client/db/src/lib.rs | 26 ++++++------- client/network/test/src/lib.rs | 8 ++-- client/network/test/src/sync.rs | 2 +- client/service/src/builder.rs | 2 +- client/service/src/config.rs | 4 +- client/service/src/lib.rs | 2 +- client/service/test/src/client/mod.rs | 8 ++-- client/service/test/src/lib.rs | 6 +-- client/state-db/src/lib.rs | 46 +++++++++++------------ test-utils/client/src/lib.rs | 8 ++-- 16 files changed, 79 insertions(+), 79 deletions(-) diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 6d269ccaac271..8420a65f8cb80 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -28,7 +28,7 @@ use sc_consensus::{ }; use sc_service::{ config::{ - DatabaseSource, KeepBlocks, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, + BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, PruningMode, WasmExecutionMethod, WasmtimeInstantiationStrategy, }, BasePath, Configuration, Role, @@ -75,7 +75,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { state_cache_size: 67108864, state_cache_child_ratio: None, state_pruning: Some(PruningMode::ArchiveAll), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, chain_spec: spec, wasm_method: WasmExecutionMethod::Compiled { instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index 580a10d6a6678..f031f9dae3d21 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -26,7 +26,7 @@ use node_primitives::AccountId; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::{ config::{ - DatabaseSource, KeepBlocks, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, + BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, PruningMode, TransactionPoolOptions, WasmExecutionMethod, }, BasePath, Configuration, Role, @@ -69,7 +69,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { state_cache_size: 67108864, state_cache_child_ratio: None, state_pruning: Some(PruningMode::ArchiveAll), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, chain_spec: spec, wasm_method: WasmExecutionMethod::Interpreted, // NOTE: we enforce the use of the native runtime to make the errors more debuggable diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 18e979b95737f..de64910125b86 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -392,7 +392,7 @@ impl BenchDb { state_cache_child_ratio: Some((0, 100)), state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), - keep_blocks: sc_client_db::KeepBlocks::All, + blocks_pruning: sc_client_db::BlocksPruning::All, }; let task_executor = TaskExecutor::new(); diff --git a/client/cli/src/commands/chain_info_cmd.rs b/client/cli/src/commands/chain_info_cmd.rs index 0e57d1677efbb..c65092290cf2d 100644 --- a/client/cli/src/commands/chain_info_cmd.rs +++ b/client/cli/src/commands/chain_info_cmd.rs @@ -77,7 +77,7 @@ impl ChainInfoCmd { state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), state_pruning: config.state_pruning.clone(), source: config.database.clone(), - keep_blocks: config.keep_blocks.clone(), + blocks_pruning: config.blocks_pruning.clone(), }; let backend = sc_service::new_db_backend::(db_config)?; let info: ChainInfo = backend.blockchain().info().into(); diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 4ebbc8c72c19a..f513008b17adc 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -31,7 +31,7 @@ use sc_service::{ NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, Role, RpcMethods, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, }, - ChainSpec, KeepBlocks, TracingReceiver, + BlocksPruning, ChainSpec, TracingReceiver, }; use sc_tracing::logging::LoggerBuilder; use std::{net::SocketAddr, path::PathBuf}; @@ -257,11 +257,11 @@ pub trait CliConfiguration: Sized { /// Get the block pruning mode. /// /// By default this is retrieved from `block_pruning` if it is available. Otherwise its - /// `KeepBlocks::All`. - fn keep_blocks(&self) -> Result { + /// `BlocksPruning::All`. + fn blocks_pruning(&self) -> Result { self.pruning_params() - .map(|x| x.keep_blocks()) - .unwrap_or_else(|| Ok(KeepBlocks::All)) + .map(|x| x.blocks_pruning()) + .unwrap_or_else(|| Ok(BlocksPruning::All)) } /// Get the chain ID (string). @@ -536,7 +536,7 @@ pub trait CliConfiguration: Sized { state_cache_size: self.state_cache_size()?, state_cache_child_ratio: self.state_cache_child_ratio()?, state_pruning: self.state_pruning()?, - keep_blocks: self.keep_blocks()?, + blocks_pruning: self.blocks_pruning()?, wasm_method: self.wasm_method()?, wasm_runtime_overrides: self.wasm_runtime_overrides(), execution_strategies: self.execution_strategies(is_dev, is_validator)?, diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 9d471224cc59e..34a0982e63d95 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -18,7 +18,7 @@ use crate::error; use clap::Args; -use sc_service::{KeepBlocks, PruningMode}; +use sc_service::{BlocksPruning, PruningMode}; /// Parameters to define the pruning mode #[derive(Debug, Clone, PartialEq, Args)] @@ -28,37 +28,37 @@ pub struct PruningParams { /// Default is to keep only the last 256 blocks, /// otherwise, the state can be kept for all of the blocks (i.e 'archive'), /// or for all of the canonical blocks (i.e 'archive-canonical'). - #[clap(long, value_name = "PRUNING_MODE")] - pub pruning: Option, + #[clap(alias = "pruning", long, value_name = "PRUNING_MODE")] + pub state_pruning: Option, /// Specify the number of finalized blocks to keep in the database. /// /// Default is to keep all blocks. /// /// NOTE: only finalized blocks are subject for removal! - #[clap(long, value_name = "COUNT")] - pub keep_blocks: Option, + #[clap(alias = "keep-blocks", long, value_name = "COUNT")] + pub blocks_pruning: Option, } impl PruningParams { /// Get the pruning value from the parameters pub fn state_pruning(&self) -> error::Result> { - self.pruning + self.state_pruning .as_ref() .map(|s| match s.as_str() { "archive" => Ok(PruningMode::ArchiveAll), bc => bc .parse() .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string())) - .map(PruningMode::keep_blocks), + .map(PruningMode::blocks_pruning), }) .transpose() } /// Get the block pruning value from the parameters - pub fn keep_blocks(&self) -> error::Result { - Ok(match self.keep_blocks { - Some(n) => KeepBlocks::Some(n), - None => KeepBlocks::All, + pub fn blocks_pruning(&self) -> error::Result { + Ok(match self.blocks_pruning { + Some(n) => BlocksPruning::Some(n), + None => BlocksPruning::All, }) } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 0ce408a39b004..3214ee8f0d738 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -300,12 +300,12 @@ pub struct DatabaseSettings { /// Block pruning mode. /// /// NOTE: only finalized blocks are subject for removal! - pub keep_blocks: KeepBlocks, + pub blocks_pruning: BlocksPruning, } /// Block pruning settings. #[derive(Debug, Clone, Copy)] -pub enum KeepBlocks { +pub enum BlocksPruning { /// Keep full block history. All, /// Keep N recent finalized blocks. @@ -1012,7 +1012,7 @@ pub struct Backend { shared_cache: SharedCache, import_lock: Arc>, is_archive: bool, - keep_blocks: KeepBlocks, + blocks_pruning: BlocksPruning, io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, genesis_state: RwLock>>>, @@ -1043,21 +1043,21 @@ impl Backend { /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { - Self::new_test_with_tx_storage(keep_blocks, canonicalization_delay) + pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self { + Self::new_test_with_tx_storage(blocks_pruning, canonicalization_delay) } /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_with_tx_storage(keep_blocks: u32, canonicalization_delay: u64) -> Self { + pub fn new_test_with_tx_storage(blocks_pruning: u32, canonicalization_delay: u64) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); let db_setting = DatabaseSettings { state_cache_size: 16777216, state_cache_child_ratio: Some((50, 100)), - state_pruning: Some(PruningMode::keep_blocks(keep_blocks)), + state_pruning: Some(PruningMode::blocks_pruning(blocks_pruning)), source: DatabaseSource::Custom { db, require_create_flag: true }, - keep_blocks: KeepBlocks::Some(keep_blocks), + blocks_pruning: BlocksPruning::Some(blocks_pruning), }; Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") @@ -1124,7 +1124,7 @@ impl Backend { is_archive: is_archive_pruning, io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), state_usage: Arc::new(StateUsageStats::new()), - keep_blocks: config.keep_blocks, + blocks_pruning: config.blocks_pruning, genesis_state: RwLock::new(None), }; @@ -1698,9 +1698,9 @@ impl Backend { finalized: NumberFor, displaced: &FinalizationDisplaced>, ) -> ClientResult<()> { - if let KeepBlocks::Some(keep_blocks) = self.keep_blocks { + if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning { // Always keep the last finalized block - let keep = std::cmp::max(keep_blocks, 1); + let keep = std::cmp::max(blocks_pruning, 1); if finalized >= keep.into() { let number = finalized.saturating_sub(keep.into()); self.prune_block(transaction, BlockId::::number(number))?; @@ -2465,9 +2465,9 @@ pub(crate) mod tests { DatabaseSettings { state_cache_size: 16777216, state_cache_child_ratio: Some((50, 100)), - state_pruning: Some(PruningMode::keep_blocks(1)), + state_pruning: Some(PruningMode::blocks_pruning(1)), source: DatabaseSource::Custom { db: backing, require_create_flag: false }, - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, }, 0, ) diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 494ff2048f6c4..09b4139c213a6 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -676,7 +676,7 @@ pub struct FullPeerConfig { /// Pruning window size. /// /// NOTE: only finalized blocks are subject for removal! - pub keep_blocks: Option, + pub blocks_pruning: Option, /// Block announce validator. pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. @@ -742,10 +742,10 @@ where /// Add a full peer. fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { - let mut test_client_builder = match (config.keep_blocks, config.storage_chain) { - (Some(keep_blocks), true) => TestClientBuilder::with_tx_storage(keep_blocks), + let mut test_client_builder = match (config.blocks_pruning, config.storage_chain) { + (Some(blocks_pruning), true) => TestClientBuilder::with_tx_storage(blocks_pruning), (None, true) => TestClientBuilder::with_tx_storage(u32::MAX), - (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks), + (Some(blocks_pruning), false) => TestClientBuilder::with_pruning_window(blocks_pruning), (None, false) => TestClientBuilder::with_default_backend(), }; if let Some(storage) = config.extra_storage { diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 84a5c2ca13fa5..c0778767b75af 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -544,7 +544,7 @@ fn syncs_header_only_forks() { sp_tracing::try_init_simple(); let mut net = TestNet::new(0); net.add_full_peer_with_config(Default::default()); - net.add_full_peer_with_config(FullPeerConfig { keep_blocks: Some(3), ..Default::default() }); + net.add_full_peer_with_config(FullPeerConfig { blocks_pruning: Some(3), ..Default::default() }); net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 998ec12c1d05b..55c0006de33fb 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -207,7 +207,7 @@ where state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), state_pruning: config.state_pruning.clone(), source: config.database.clone(), - keep_blocks: config.keep_blocks, + blocks_pruning: config.blocks_pruning, }; let backend = new_db_backend(db_config)?; diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 0eeb6e05cee16..7860ff2281fe4 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -19,7 +19,7 @@ //! Service configuration. pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy}; -pub use sc_client_db::{Database, DatabaseSource, KeepBlocks, PruningMode}; +pub use sc_client_db::{BlocksPruning, Database, DatabaseSource, PruningMode}; pub use sc_executor::WasmExecutionMethod; #[cfg(feature = "wasmtime")] pub use sc_executor::WasmtimeInstantiationStrategy; @@ -79,7 +79,7 @@ pub struct Configuration { /// Number of blocks to keep in the db. /// /// NOTE: only finalized blocks are subject for removal! - pub keep_blocks: KeepBlocks, + pub blocks_pruning: BlocksPruning, /// Chain configuration. pub chain_spec: Box, /// Wasm execution method. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 1de45e5527eec..5291d219e8102 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -60,7 +60,7 @@ pub use self::{ error::Error, }; pub use config::{ - BasePath, Configuration, DatabaseSource, KeepBlocks, PruningMode, Role, RpcMethods, TaskType, + BasePath, BlocksPruning, Configuration, DatabaseSource, PruningMode, Role, RpcMethods, TaskType, }; pub use sc_chain_spec::{ ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 136efad088fae..f363b621d5bcd 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -23,7 +23,7 @@ use sc_block_builder::BlockBuilderProvider; use sc_client_api::{ in_mem, BlockBackend, BlockchainEvents, FinalityNotifications, StorageProvider, }; -use sc_client_db::{Backend, DatabaseSettings, DatabaseSource, KeepBlocks, PruningMode}; +use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; @@ -1200,7 +1200,7 @@ fn doesnt_import_blocks_that_revert_finality() { state_cache_size: 1 << 20, state_cache_child_ratio: None, state_pruning: Some(PruningMode::ArchiveAll), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, @@ -1426,8 +1426,8 @@ fn returns_status_for_pruned_blocks() { DatabaseSettings { state_cache_size: 1 << 20, state_cache_child_ratio: None, - state_pruning: Some(PruningMode::keep_blocks(1)), - keep_blocks: KeepBlocks::All, + state_pruning: Some(PruningMode::blocks_pruning(1)), + blocks_pruning: BlocksPruning::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 749c83c6eeac7..2d63362daffba 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -29,8 +29,8 @@ use sc_network::{ use sc_service::{ client::Client, config::{BasePath, DatabaseSource, KeystoreConfig}, - ChainSpecExtension, Configuration, Error, GenericChainSpec, KeepBlocks, Role, RuntimeGenesis, - SpawnTaskHandle, TaskManager, + BlocksPruning, ChainSpecExtension, Configuration, Error, GenericChainSpec, Role, + RuntimeGenesis, SpawnTaskHandle, TaskManager, }; use sc_transaction_pool_api::TransactionPool; use sp_api::BlockId; @@ -234,7 +234,7 @@ fn node_config< state_cache_size: 16777216, state_cache_child_ratio: None, state_pruning: Default::default(), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, chain_spec: Box::new((*spec).clone()), wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, wasm_runtime_overrides: Default::default(), diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index d5cca9a342187..1c7140777e16e 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -227,7 +227,7 @@ pub enum PruningMode { impl PruningMode { /// Create a mode that keeps given number of blocks. - pub fn keep_blocks(n: u32) -> PruningMode { + pub fn blocks_pruning(n: u32) -> PruningMode { PruningMode::Constrained(Constraints { max_blocks: Some(n), max_mem: None }) } @@ -835,34 +835,34 @@ mod tests { #[test] fn pruning_mode_compatibility() { for (created, reopened, expected) in [ - (None, None, Ok(PruningMode::keep_blocks(256))), - (None, Some(PruningMode::keep_blocks(256)), Ok(PruningMode::keep_blocks(256))), - (None, Some(PruningMode::keep_blocks(128)), Ok(PruningMode::keep_blocks(128))), - (None, Some(PruningMode::keep_blocks(512)), Ok(PruningMode::keep_blocks(512))), + (None, None, Ok(PruningMode::blocks_pruning(256))), + (None, Some(PruningMode::blocks_pruning(256)), Ok(PruningMode::blocks_pruning(256))), + (None, Some(PruningMode::blocks_pruning(128)), Ok(PruningMode::blocks_pruning(128))), + (None, Some(PruningMode::blocks_pruning(512)), Ok(PruningMode::blocks_pruning(512))), (None, Some(PruningMode::ArchiveAll), Err(())), (None, Some(PruningMode::ArchiveCanonical), Err(())), - (Some(PruningMode::keep_blocks(256)), None, Ok(PruningMode::keep_blocks(256))), + (Some(PruningMode::blocks_pruning(256)), None, Ok(PruningMode::blocks_pruning(256))), ( - Some(PruningMode::keep_blocks(256)), - Some(PruningMode::keep_blocks(256)), - Ok(PruningMode::keep_blocks(256)), + Some(PruningMode::blocks_pruning(256)), + Some(PruningMode::blocks_pruning(256)), + Ok(PruningMode::blocks_pruning(256)), ), ( - Some(PruningMode::keep_blocks(256)), - Some(PruningMode::keep_blocks(128)), - Ok(PruningMode::keep_blocks(128)), + Some(PruningMode::blocks_pruning(256)), + Some(PruningMode::blocks_pruning(128)), + Ok(PruningMode::blocks_pruning(128)), ), ( - Some(PruningMode::keep_blocks(256)), - Some(PruningMode::keep_blocks(512)), - Ok(PruningMode::keep_blocks(512)), + Some(PruningMode::blocks_pruning(256)), + Some(PruningMode::blocks_pruning(512)), + Ok(PruningMode::blocks_pruning(512)), ), - (Some(PruningMode::keep_blocks(256)), Some(PruningMode::ArchiveAll), Err(())), - (Some(PruningMode::keep_blocks(256)), Some(PruningMode::ArchiveCanonical), Err(())), + (Some(PruningMode::blocks_pruning(256)), Some(PruningMode::ArchiveAll), Err(())), + (Some(PruningMode::blocks_pruning(256)), Some(PruningMode::ArchiveCanonical), Err(())), (Some(PruningMode::ArchiveAll), None, Ok(PruningMode::ArchiveAll)), - (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(256)), Err(())), - (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(128)), Err(())), - (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(512)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(256)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(128)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(512)), Err(())), ( Some(PruningMode::ArchiveAll), Some(PruningMode::ArchiveAll), @@ -870,9 +870,9 @@ mod tests { ), (Some(PruningMode::ArchiveAll), Some(PruningMode::ArchiveCanonical), Err(())), (Some(PruningMode::ArchiveCanonical), None, Ok(PruningMode::ArchiveCanonical)), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(256)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(128)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(512)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(256)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(128)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(512)), Err(())), (Some(PruningMode::ArchiveCanonical), Some(PruningMode::ArchiveAll), Err(())), ( Some(PruningMode::ArchiveCanonical), diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 148f34246044d..3115be58425e6 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -95,14 +95,14 @@ impl } /// Create new `TestClientBuilder` with default backend and pruning window size - pub fn with_pruning_window(keep_blocks: u32) -> Self { - let backend = Arc::new(Backend::new_test(keep_blocks, 0)); + pub fn with_pruning_window(blocks_pruning: u32) -> Self { + let backend = Arc::new(Backend::new_test(blocks_pruning, 0)); Self::with_backend(backend) } /// Create new `TestClientBuilder` with default backend and storage chain mode - pub fn with_tx_storage(keep_blocks: u32) -> Self { - let backend = Arc::new(Backend::new_test_with_tx_storage(keep_blocks, 0)); + pub fn with_tx_storage(blocks_pruning: u32) -> Self { + let backend = Arc::new(Backend::new_test_with_tx_storage(blocks_pruning, 0)); Self::with_backend(backend) } } From 4ad36db74762629c65d5c1af1084f7e7683bc7a8 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Mon, 8 Aug 2022 17:46:57 +0800 Subject: [PATCH 110/135] Fix 16bit func_id (#11985) --- frame/contracts/src/chain_extension.rs | 4 ++-- frame/contracts/src/tests.rs | 29 +++++++++++++------------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index 23242a2a542c1..57101668f794d 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -38,7 +38,7 @@ //! //! However, only extensions implementing [`RegisteredChainExtension`] can be put into a tuple. //! This is because the [`RegisteredChainExtension::ID`] is used to decide which of those extensions -//! should should be used when the contract calls a chain extensions. Extensions which are generally +//! should be used when the contract calls a chain extensions. Extensions which are generally //! useful should claim their `ID` with [the registry](https://github.com/paritytech/chainextension-registry) //! so that no collisions with other vendors will occur. //! @@ -215,7 +215,7 @@ where /// It returns the two least significant bytes of the `id` passed by a contract as the other /// two bytes represent the chain extension itself (the code which is calling this function). pub fn func_id(&self) -> u16 { - (self.inner.id & 0x00FF) as u16 + (self.inner.id & 0x0000FFFF) as u16 } /// The chain extension id within the `id` passed by a contract. diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 0febfec929b6e..30417d8544489 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -158,14 +158,14 @@ impl ChainExtension for TestExtension { let func_id = env.func_id(); let id = env.ext_id() as u32 | func_id as u32; match func_id { - 0 => { + 0x8000 => { let mut env = env.buf_in_buf_out(); let input = env.read(8)?; env.write(&input, false, None)?; TEST_EXTENSION.with(|e| e.borrow_mut().last_seen_buffer = input); Ok(RetVal::Converging(id)) }, - 1 => { + 0x8001 => { let env = env.only_in(); TEST_EXTENSION.with(|e| { e.borrow_mut().last_seen_inputs = @@ -173,13 +173,13 @@ impl ChainExtension for TestExtension { }); Ok(RetVal::Converging(id)) }, - 2 => { + 0x8002 => { let mut env = env.buf_in_buf_out(); let weight = env.read(5)?[4].into(); env.charge_weight(weight)?; Ok(RetVal::Converging(id)) }, - 3 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), + 0x8003 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), _ => { panic!("Passed unknown id to test chain extension: {}", func_id); }, @@ -1646,21 +1646,22 @@ fn chain_extension_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - // 0 = read input buffer and pass it through as output - let input: Vec = ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); + // 0x8000 = read input buffer and pass it through as output + let input: Vec = + ExtensionInput { extension_id: 0, func_id: 0x8000, extra: &[99] }.into(); let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); assert_eq!(TestExtension::last_seen_buffer(), input); assert_eq!(result.result.unwrap().data, Bytes(input)); - // 1 = treat inputs as integer primitives and store the supplied integers + // 0x8001 = treat inputs as integer primitives and store the supplied integers Contracts::bare_call( ALICE, addr.clone(), 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into(), + ExtensionInput { extension_id: 0, func_id: 0x8001, extra: &[] }.into(), false, ) .result @@ -1668,14 +1669,14 @@ fn chain_extension_works() { // those values passed in the fixture assert_eq!(TestExtension::last_seen_inputs(), (4, 4, 16, 12)); - // 2 = charge some extra weight (amount supplied in the fifth byte) + // 0x8002 = charge some extra weight (amount supplied in the fifth byte) let result = Contracts::bare_call( ALICE, addr.clone(), 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &[0] }.into(), + ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[0] }.into(), false, ); assert_ok!(result.result); @@ -1686,7 +1687,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &[42] }.into(), + ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[42] }.into(), false, ); assert_ok!(result.result); @@ -1697,20 +1698,20 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &[95] }.into(), + ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[95] }.into(), false, ); assert_ok!(result.result); assert_eq!(result.gas_consumed, gas_consumed + 95); - // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer + // 0x8003 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer let result = Contracts::bare_call( ALICE, addr.clone(), 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into(), + ExtensionInput { extension_id: 0, func_id: 0x8003, extra: &[] }.into(), false, ) .result From ef890f5598e0d3fd1b6a5fcc8c3322f7570089d1 Mon Sep 17 00:00:00 2001 From: yjh Date: Mon, 8 Aug 2022 17:49:00 +0800 Subject: [PATCH 111/135] chore: fix typos and sort trait impls (#11989) --- frame/contracts/src/wasm/code_cache.rs | 6 +++--- frame/contracts/src/wasm/env_def/mod.rs | 6 +++--- frame/contracts/src/wasm/prepare.rs | 2 +- frame/contracts/src/wasm/runtime.rs | 18 +++++++++--------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 10de436bfb155..61826c7c323aa 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -215,7 +215,7 @@ impl Token for CodeToken { use self::CodeToken::*; // In case of `Load` we already covered the general costs of // calling the storage but still need to account for the actual size of the - // contract code. This is why we substract `T::*::(0)`. We need to do this at this + // contract code. This is why we subtract `T::*::(0)`. We need to do this at this // point because when charging the general weight for calling the contract we not know the // size of the contract. match *self { @@ -223,8 +223,8 @@ impl Token for CodeToken { Load(len) => { let computation = T::WeightInfo::call_with_code_per_byte(len) .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)); - let bandwith = T::ContractAccessWeight::get().saturating_mul(len.into()); - computation.max(bandwith) + let bandwidth = T::ContractAccessWeight::get().saturating_mul(len.into()); + computation.max(bandwidth) }, } } diff --git a/frame/contracts/src/wasm/env_def/mod.rs b/frame/contracts/src/wasm/env_def/mod.rs index b4c5ffe81e7c1..c904445f8aea8 100644 --- a/frame/contracts/src/wasm/env_def/mod.rs +++ b/frame/contracts/src/wasm/env_def/mod.rs @@ -31,8 +31,8 @@ pub trait ConvertibleToWasm: Sized { fn from_typed_value(_: Value) -> Option; } impl ConvertibleToWasm for i32 { - type NativeType = i32; const VALUE_TYPE: ValueType = ValueType::I32; + type NativeType = i32; fn to_typed_value(self) -> Value { Value::I32(self) } @@ -41,8 +41,8 @@ impl ConvertibleToWasm for i32 { } } impl ConvertibleToWasm for u32 { - type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; + type NativeType = u32; fn to_typed_value(self) -> Value { Value::I32(self as i32) } @@ -54,8 +54,8 @@ impl ConvertibleToWasm for u32 { } } impl ConvertibleToWasm for u64 { - type NativeType = u64; const VALUE_TYPE: ValueType = ValueType::I64; + type NativeType = u64; fn to_typed_value(self) -> Value { Value::I64(self as i64) } diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index f6fff20de6b1a..aec5dc2db611d 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -540,7 +540,7 @@ mod tests { [seal0] nop(_ctx, _unused: u64) => { unreachable!(); }, - // new version of nop with other data type for argumebt + // new version of nop with other data type for argument [seal1] nop(_ctx, _unused: i32) => { unreachable!(); }, ); } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 3d5e62f2c1333..562b29fbaeb1b 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -1209,7 +1209,7 @@ define_env!(Env, , // // # Parameters // - // - flags: See [`CallFlags`] for a documenation of the supported flags. + // - flags: See [`CallFlags`] for a documentation of the supported flags. // - callee_ptr: a pointer to the address of the callee contract. // Should be decodable as an `T::AccountId`. Traps otherwise. // - gas: how much gas to devote to the execution. @@ -1753,7 +1753,7 @@ define_env!(Env, , // # Note // // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity + // backwards compatibility [seal0] seal_restore_to( ctx, _dest_ptr: u32, @@ -1774,7 +1774,7 @@ define_env!(Env, , // # Note // // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity + // backwards compatibility [seal1] seal_restore_to( ctx, _dest_ptr: u32, @@ -1857,7 +1857,7 @@ define_env!(Env, , // # Note // // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity. + // backwards compatibility. [seal0] seal_set_rent_allowance(ctx, _value_ptr: u32, _value_len: u32) => { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) @@ -1868,7 +1868,7 @@ define_env!(Env, , // # Note // // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity. + // backwards compatibility. [seal1] seal_set_rent_allowance(ctx, _value_ptr: u32) => { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) @@ -1879,7 +1879,7 @@ define_env!(Env, , // # Note // // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity. + // backwards compatibility. [seal0] seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::Balance)?; let rent_allowance = >::max_value().encode(); @@ -2085,15 +2085,15 @@ define_env!(Env, , // // # Return Value // - // Returns `ReturnCode::Success` when the dispatchable was succesfully executed and - // returned `Ok`. When the dispatchable was exeuted but returned an error + // Returns `ReturnCode::Success` when the dispatchable was successfully executed and + // returned `Ok`. When the dispatchable was executed but returned an error // `ReturnCode::CallRuntimeReturnedError` is returned. The full error is not // provided because it is not guaranteed to be stable. // // # Comparison with `ChainExtension` // // Just as a chain extension this API allows the runtime to extend the functionality - // of contracts. While making use of this function is generelly easier it cannot be + // of contracts. While making use of this function is generally easier it cannot be // used in call cases. Consider writing a chain extension if you need to do perform // one of the following tasks: // From 74a6370e805ebaf88b7939e496818531f762cadf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 8 Aug 2022 15:32:00 +0100 Subject: [PATCH 112/135] contracts: Apply depth limit when decoding (#11991) --- frame/contracts/src/wasm/runtime.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 562b29fbaeb1b..f21dc7c76e44c 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -26,7 +26,7 @@ use crate::{ }; use bitflags::bitflags; -use codec::{Decode, DecodeAll, Encode, MaxEncodedLen}; +use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use sp_core::{crypto::UncheckedFrom, Bytes}; @@ -36,6 +36,9 @@ use sp_sandbox::SandboxMemory; use sp_std::prelude::*; use wasm_instrument::parity_wasm::elements::ValueType; +/// The maximum nesting depth a contract can use when encoding types. +const MAX_DECODE_NESTING: u32 = 256; + /// Type of a storage key. #[allow(dead_code)] enum KeyType { @@ -575,7 +578,7 @@ where ptr: u32, ) -> Result { let buf = self.read_sandbox_memory(ptr, D::max_encoded_len() as u32)?; - let decoded = D::decode_all(&mut &buf[..]) + let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } @@ -597,7 +600,7 @@ where len: u32, ) -> Result { let buf = self.read_sandbox_memory(ptr, len)?; - let decoded = D::decode_all(&mut &buf[..]) + let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } From 8e600ee0515122667cd091d632104708d343ca34 Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 9 Aug 2022 20:04:28 +0900 Subject: [PATCH 113/135] Restore `wasmtime`'s default stack size limit to 1MB (#11993) * Restore `wasmtime`'s default stack size limit to 1MB * Add extra comments * Enforce different maximum call depth in release mode * Split the call depth limit in two --- client/executor/wasmtime/src/runtime.rs | 20 ++++--- client/executor/wasmtime/src/tests.rs | 79 +++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 7 deletions(-) diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 2feb9d0eea502..871de4e2300d2 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -324,13 +324,19 @@ fn common_config(semantics: &Semantics) -> std::result::Result native_stack_max, + + // In `wasmtime` 0.35 the default stack size limit was changed from 1MB to 512KB. + // + // This broke at least one parachain which depended on the original 1MB limit, + // so here we restore it to what it was originally. + None => 1024 * 1024, + }; + + config + .max_wasm_stack(native_stack_max as usize) + .map_err(|e| WasmError::Other(format!("cannot set max wasm stack: {:#}", e)))?; config.parallel_compilation(semantics.parallel_compilation); diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs index e0fd9fbce0c57..1ca5dde4c2b93 100644 --- a/client/executor/wasmtime/src/tests.rs +++ b/client/executor/wasmtime/src/tests.rs @@ -174,6 +174,85 @@ impl RuntimeBuilder { } } +fn deep_call_stack_wat(depth: usize) -> String { + format!( + r#" + (module + (memory $0 32) + (export "memory" (memory $0)) + (global (export "__heap_base") i32 (i32.const 0)) + (func (export "overflow") call 0) + + (func $overflow (param $0 i32) + (block $label$1 + (br_if $label$1 + (i32.ge_u + (local.get $0) + (i32.const {depth}) + ) + ) + (call $overflow + (i32.add + (local.get $0) + (i32.const 1) + ) + ) + ) + ) + + (func (export "main") + (param i32 i32) (result i64) + (call $overflow (i32.const 0)) + (i64.const 0) + ) + ) + "# + ) +} + +// These two tests ensure that the `wasmtime`'s stack size limit and the amount of +// stack space used by a single stack frame doesn't suddenly change without us noticing. +// +// If they do (e.g. because we've pulled in a new version of `wasmtime`) we want to know +// that it did, regardless of how small the change was. +// +// If these tests starting failing it doesn't necessarily mean that something is broken; +// what it means is that one (or multiple) of the following has to be done: +// a) the tests may need to be updated for the new call depth, +// b) the stack limit may need to be changed to maintain backwards compatibility, +// c) the root cause of the new call depth limit determined, and potentially fixed, +// d) the new call depth limit may need to be validated to ensure it doesn't prevent any +// existing chain from syncing (if it was effectively decreased) + +// We need two limits here since depending on whether the code is compiled in debug +// or in release mode the maximum call depth is slightly different. +const CALL_DEPTH_LOWER_LIMIT: usize = 65478; +const CALL_DEPTH_UPPER_LIMIT: usize = 65514; + +test_wasm_execution!(test_consume_under_1mb_of_stack_does_not_trap); +fn test_consume_under_1mb_of_stack_does_not_trap(instantiation_strategy: InstantiationStrategy) { + let wat = deep_call_stack_wat(CALL_DEPTH_LOWER_LIMIT); + let mut builder = RuntimeBuilder::new(instantiation_strategy).use_wat(wat); + let runtime = builder.build(); + let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); + instance.call_export("main", &[]).unwrap(); +} + +test_wasm_execution!(test_consume_over_1mb_of_stack_does_trap); +fn test_consume_over_1mb_of_stack_does_trap(instantiation_strategy: InstantiationStrategy) { + let wat = deep_call_stack_wat(CALL_DEPTH_UPPER_LIMIT + 1); + let mut builder = RuntimeBuilder::new(instantiation_strategy).use_wat(wat); + let runtime = builder.build(); + let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); + match instance.call_export("main", &[]).unwrap_err() { + Error::AbortedDueToTrap(error) => { + let expected = "wasm trap: call stack exhausted"; + assert_eq!(error.message, expected); + }, + error => panic!("unexpected error: {:?}", error), + } +} + test_wasm_execution!(test_nan_canonicalization); fn test_nan_canonicalization(instantiation_strategy: InstantiationStrategy) { let mut builder = RuntimeBuilder::new(instantiation_strategy).canonicalize_nans(true); From fece0657f20e15df94be5833b164dfacd44823eb Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Tue, 9 Aug 2022 21:28:32 +0300 Subject: [PATCH 114/135] Network sync refactoring (part 6) (#11940) * Extract `NetworkKVProvider` trait in `sc-authority-discovery` and remove unnecessary dependency * Extract `NetworkSyncForkRequest` trait in `sc-finality-grandpa` * Relax requirements on `SyncOracle` trait, remove extra native methods from `NetworkService` that are already provided by trait impls * Move `NetworkSigner` trait from `sc-authority-discovery` into `sc-network-common` and de-duplicate methods on `NetworkService` * Move `NetworkKVProvider` trait from `sc-authority-discovery` into `sc-network-common` and de-duplicate methods on `NetworkService` * Minimize `sc-authority-discovery` dependency on `sc-network` * Move `NetworkSyncForkRequest` trait from `sc-finality-grandpa` to `sc-network-common` and de-duplicate methods in `NetworkService` * Extract `NetworkStatusProvider` trait and de-duplicate methods on `NetworkService` * Extract `NetworkPeers` trait and de-duplicate methods on `NetworkService` * Extract `NetworkEventStream` trait and de-duplicate methods on `NetworkService` * Move more methods from `NetworkService` into `NetworkPeers` trait * Move `NetworkStateInfo` trait into `sc-network-common` * Extract `NetworkNotification` trait and de-duplicate methods on `NetworkService` * Extract `NetworkRequest` trait and de-duplicate methods on `NetworkService` * Remove `NetworkService::local_peer_id()`, it is already provided by `NetworkStateInfo` impl * Extract `NetworkTransaction` trait and de-duplicate methods on `NetworkService` * Extract `NetworkBlock` trait and de-duplicate methods on `NetworkService` * Remove dependencies on `NetworkService` from most of the methods of `sc-service` * Address simple review comments --- Cargo.lock | 11 +- bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/service.rs | 3 +- client/authority-discovery/Cargo.toml | 2 +- client/authority-discovery/src/lib.rs | 3 +- client/authority-discovery/src/service.rs | 2 +- client/authority-discovery/src/tests.rs | 4 +- client/authority-discovery/src/worker.rs | 76 +- .../src/worker/addr_cache.rs | 7 +- .../src/worker/schema/tests.rs | 5 +- .../authority-discovery/src/worker/tests.rs | 63 +- client/consensus/pow/src/lib.rs | 2 +- client/consensus/slots/src/lib.rs | 2 +- .../src/communication/gossip.rs | 3 +- .../finality-grandpa/src/communication/mod.rs | 45 +- .../src/communication/tests.rs | 143 +++- client/informant/Cargo.toml | 2 +- client/informant/src/display.rs | 8 +- client/informant/src/lib.rs | 7 +- client/network-gossip/Cargo.toml | 1 + client/network-gossip/src/bridge.rs | 138 ++- client/network-gossip/src/lib.rs | 79 +- client/network-gossip/src/state_machine.rs | 126 ++- client/network-gossip/src/validator.rs | 3 +- client/network/common/Cargo.toml | 3 + client/network/common/src/lib.rs | 2 + client/network/common/src/protocol.rs | 19 + .../{ => common}/src/protocol/event.rs | 4 +- .../network/common/src/request_responses.rs | 39 +- client/network/common/src/service.rs | 660 +++++++++++++++ .../{ => common}/src/service/signature.rs | 5 +- client/network/src/behaviour.rs | 12 +- client/network/src/lib.rs | 58 +- client/network/src/protocol.rs | 3 +- client/network/src/request_responses.rs | 41 +- client/network/src/service.rs | 794 +++++++----------- client/network/src/service/out_events.rs | 3 +- client/network/src/service/tests.rs | 59 +- client/network/src/transactions.rs | 10 +- client/network/test/src/lib.rs | 16 +- client/offchain/Cargo.toml | 1 + client/offchain/src/api.rs | 79 +- client/offchain/src/lib.rs | 104 ++- client/service/src/builder.rs | 58 +- client/service/src/lib.rs | 2 + client/service/src/metrics.rs | 8 +- client/service/test/Cargo.toml | 1 + client/service/test/src/lib.rs | 9 +- primitives/consensus/common/src/lib.rs | 18 +- 49 files changed, 1802 insertions(+), 942 deletions(-) create mode 100644 client/network/common/src/protocol.rs rename client/network/{ => common}/src/protocol/event.rs (96%) create mode 100644 client/network/common/src/service.rs rename client/network/{ => common}/src/service/signature.rs (96%) diff --git a/Cargo.lock b/Cargo.lock index 703eeffce54a1..38063e8060835 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4686,6 +4686,7 @@ dependencies = [ "sc-finality-grandpa", "sc-keystore", "sc-network", + "sc-network-common", "sc-rpc", "sc-service", "sc-service-test", @@ -7702,7 +7703,6 @@ dependencies = [ name = "sc-authority-discovery" version = "0.10.0-dev" dependencies = [ - "async-trait", "futures", "futures-timer", "ip_network", @@ -7715,6 +7715,7 @@ dependencies = [ "rand 0.7.3", "sc-client-api", "sc-network", + "sc-network-common", "sp-api", "sp-authority-discovery", "sp-blockchain", @@ -8316,7 +8317,7 @@ dependencies = [ "log", "parity-util-mem", "sc-client-api", - "sc-network", + "sc-network-common", "sc-transaction-pool-api", "sp-blockchain", "sp-runtime", @@ -8398,7 +8399,9 @@ dependencies = [ name = "sc-network-common" version = "0.10.0-dev" dependencies = [ + "async-trait", "bitflags", + "bytes", "futures", "libp2p", "parity-scale-codec", @@ -8409,6 +8412,7 @@ dependencies = [ "sp-consensus", "sp-finality-grandpa", "sp-runtime", + "thiserror", ] [[package]] @@ -8424,6 +8428,7 @@ dependencies = [ "lru", "quickcheck", "sc-network", + "sc-network-common", "sp-runtime", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -8533,6 +8538,7 @@ dependencies = [ "sc-client-api", "sc-client-db", "sc-network", + "sc-network-common", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", @@ -8741,6 +8747,7 @@ dependencies = [ "sc-consensus", "sc-executor", "sc-network", + "sc-network-common", "sc-service", "sc-transaction-pool-api", "sp-api", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index fe7c5332e2b45..89ca3ebb45576 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -66,6 +66,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sc-network = { version = "0.10.0-dev", path = "../../../client/network" } +sc-network-common = { version = "0.10.0-dev", path = "../../../client/network/common" } sc-consensus-slots = { version = "0.10.0-dev", path = "../../../client/consensus/slots" } sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } sc-consensus-uncles = { version = "0.10.0-dev", path = "../../../client/consensus/uncles" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index e0644f462cf20..b20a3ac59a96a 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -29,7 +29,8 @@ use node_primitives::Block; use sc_client_api::{BlockBackend, ExecutorProvider}; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; -use sc_network::{Event, NetworkService}; +use sc_network::NetworkService; +use sc_network_common::{protocol::event::Event, service::NetworkEventStream}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_api::ProvideRuntimeApi; diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 012e6096aab80..544df4c8d4812 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -17,7 +17,6 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] -async-trait = "0.1" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } futures = "0.3.21" futures-timer = "3.0.1" @@ -29,6 +28,7 @@ rand = "0.7.2" thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network = { version = "0.10.0-dev", path = "../network" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-authority-discovery = { version = "4.0.0-dev", path = "../../primitives/authority-discovery" } diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 8522da9984a6f..f0ef374551617 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -39,8 +39,9 @@ use futures::{ Stream, }; +use libp2p::{Multiaddr, PeerId}; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{DhtEvent, Multiaddr, PeerId}; +use sc_network_common::protocol::event::DhtEvent; use sp_api::ProvideRuntimeApi; use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId}; use sp_runtime::traits::Block as BlockT; diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index c240e5d0c2287..df09b6ea43216 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -25,7 +25,7 @@ use futures::{ SinkExt, }; -use sc_network::{Multiaddr, PeerId}; +use libp2p::{Multiaddr, PeerId}; use sp_authority_discovery::AuthorityId; /// Service to interact with the [`crate::Worker`]. diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index e9f94b6a186db..334b2638ca58c 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -87,12 +87,12 @@ fn get_addresses_and_authority_id() { fn cryptos_are_compatible() { use sp_core::crypto::Pair; - let libp2p_secret = sc_network::Keypair::generate_ed25519(); + let libp2p_secret = libp2p::identity::Keypair::generate_ed25519(); let libp2p_public = libp2p_secret.public(); let sp_core_secret = { let libp2p_ed_secret = match libp2p_secret.clone() { - sc_network::Keypair::Ed25519(x) => x, + libp2p::identity::Keypair::Ed25519(x) => x, _ => panic!("generate_ed25519 should have generated an Ed25519 key ¯\\_(ツ)_/¯"), }; sp_core::ed25519::Pair::from_seed_slice(&libp2p_ed_secret.secret().as_ref()).unwrap() diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 87cc72ba7a69c..f13a1cf33581b 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -32,19 +32,22 @@ use std::{ use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt}; use addr_cache::AddrCache; -use async_trait::async_trait; use codec::Decode; use ip_network::IpNetwork; use libp2p::{ core::multiaddr, multihash::{Multihash, MultihashDigest}, + Multiaddr, PeerId, }; use log::{debug, error, log_enabled}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; use prost::Message; use rand::{seq::SliceRandom, thread_rng}; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{DhtEvent, ExHashT, Multiaddr, NetworkStateInfo, PeerId}; +use sc_network_common::{ + protocol::event::DhtEvent, + service::{KademliaKey, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, Signature}, +}; use sp_api::ProvideRuntimeApi; use sp_authority_discovery::{ AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, @@ -136,7 +139,7 @@ pub struct Worker { /// Queue of throttled lookups pending to be passed to the network. pending_lookups: Vec, /// Set of in-flight lookups. - in_flight_lookups: HashMap, + in_flight_lookups: HashMap, addr_cache: addr_cache::AddrCache, @@ -464,10 +467,7 @@ where } } - fn handle_dht_value_found_event( - &mut self, - values: Vec<(sc_network::KademliaKey, Vec)>, - ) -> Result<()> { + fn handle_dht_value_found_event(&mut self, values: Vec<(KademliaKey, Vec)>) -> Result<()> { // Ensure `values` is not empty and all its keys equal. let remote_key = single(values.iter().map(|(key, _)| key.clone())) .map_err(|_| Error::ReceivingDhtValueFoundEventWithDifferentKeys)? @@ -523,11 +523,11 @@ where // properly signed by the owner of the PeerId if let Some(peer_signature) = peer_signature { - let public_key = - sc_network::PublicKey::from_protobuf_encoding(&peer_signature.public_key) - .map_err(Error::ParsingLibp2pIdentity)?; - let signature = - sc_network::Signature { public_key, bytes: peer_signature.signature }; + let public_key = libp2p::identity::PublicKey::from_protobuf_encoding( + &peer_signature.public_key, + ) + .map_err(Error::ParsingLibp2pIdentity)?; + let signature = Signature { public_key, bytes: peer_signature.signature }; if !signature.verify(record, &remote_peer_id) { return Err(Error::VerifyingDhtPayload) @@ -590,55 +590,15 @@ where } } -pub trait NetworkSigner { - /// Sign a message in the name of `self.local_peer_id()` - fn sign_with_local_identity( - &self, - msg: impl AsRef<[u8]>, - ) -> std::result::Result; -} - /// NetworkProvider provides [`Worker`] with all necessary hooks into the /// underlying Substrate networking. Using this trait abstraction instead of -/// [`sc_network::NetworkService`] directly is necessary to unit test [`Worker`]. -#[async_trait] -pub trait NetworkProvider: NetworkStateInfo + NetworkSigner { - /// Start putting a value in the Dht. - fn put_value(&self, key: sc_network::KademliaKey, value: Vec); - - /// Start getting a value from the Dht. - fn get_value(&self, key: &sc_network::KademliaKey); -} +/// `sc_network::NetworkService` directly is necessary to unit test [`Worker`]. +pub trait NetworkProvider: NetworkDHTProvider + NetworkStateInfo + NetworkSigner {} -impl NetworkSigner for sc_network::NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn sign_with_local_identity( - &self, - msg: impl AsRef<[u8]>, - ) -> std::result::Result { - self.sign_with_local_identity(msg) - } -} - -#[async_trait::async_trait] -impl NetworkProvider for sc_network::NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn put_value(&self, key: sc_network::KademliaKey, value: Vec) { - self.put_value(key, value) - } - fn get_value(&self, key: &sc_network::KademliaKey) { - self.get_value(key) - } -} +impl NetworkProvider for T where T: NetworkDHTProvider + NetworkStateInfo + NetworkSigner {} -fn hash_authority_id(id: &[u8]) -> sc_network::KademliaKey { - sc_network::KademliaKey::new(&libp2p::multihash::Code::Sha2_256.digest(id).digest()) +fn hash_authority_id(id: &[u8]) -> KademliaKey { + KademliaKey::new(&libp2p::multihash::Code::Sha2_256.digest(id).digest()) } // Makes sure all values are the same and returns it @@ -685,7 +645,7 @@ async fn sign_record_with_authority_ids( peer_signature: Option, key_store: &dyn CryptoStore, keys: Vec, -) -> Result)>> { +) -> Result)>> { let signatures = key_store .sign_with_all(key_types::AUTHORITY_DISCOVERY, keys.clone(), &serialized_record) .await diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index f768b9c4e66a7..19bbbf0b62e7e 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -16,9 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use libp2p::core::multiaddr::{Multiaddr, Protocol}; - -use sc_network::PeerId; +use libp2p::{ + core::multiaddr::{Multiaddr, Protocol}, + PeerId, +}; use sp_authority_discovery::AuthorityId; use std::collections::{hash_map::Entry, HashMap, HashSet}; diff --git a/client/authority-discovery/src/worker/schema/tests.rs b/client/authority-discovery/src/worker/schema/tests.rs index b85a4ce37447d..60147d6762e50 100644 --- a/client/authority-discovery/src/worker/schema/tests.rs +++ b/client/authority-discovery/src/worker/schema/tests.rs @@ -21,9 +21,8 @@ mod schema_v1 { } use super::*; -use libp2p::multiaddr::Multiaddr; +use libp2p::{multiaddr::Multiaddr, PeerId}; use prost::Message; -use sc_network::PeerId; #[test] fn v2_decodes_v1() { @@ -56,7 +55,7 @@ fn v2_decodes_v1() { #[test] fn v1_decodes_v2() { - let peer_secret = sc_network::Keypair::generate_ed25519(); + let peer_secret = libp2p::identity::Keypair::generate_ed25519(); let peer_public = peer_secret.public(); let peer_id = peer_public.to_peer_id(); let multiaddress: Multiaddr = diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index a1a699bc30dd2..5a60d3353db52 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -22,7 +22,6 @@ use std::{ task::Poll, }; -use async_trait::async_trait; use futures::{ channel::mpsc::{self, channel}, executor::{block_on, LocalPool}, @@ -30,9 +29,10 @@ use futures::{ sink::SinkExt, task::LocalSpawn, }; -use libp2p::{core::multiaddr, PeerId}; +use libp2p::{core::multiaddr, identity::Keypair, PeerId}; use prometheus_endpoint::prometheus::default_registry; +use sc_network_common::service::{KademliaKey, Signature, SigningError}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_keystore::{testing::KeyStore, CryptoStore}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; @@ -111,18 +111,18 @@ sp_api::mock_impl_runtime_apis! { #[derive(Debug)] pub enum TestNetworkEvent { - GetCalled(sc_network::KademliaKey), - PutCalled(sc_network::KademliaKey, Vec), + GetCalled(KademliaKey), + PutCalled(KademliaKey, Vec), } pub struct TestNetwork { peer_id: PeerId, - identity: sc_network::Keypair, + identity: Keypair, external_addresses: Vec, // Whenever functions on `TestNetwork` are called, the function arguments are added to the // vectors below. - pub put_value_call: Arc)>>>, - pub get_value_call: Arc>>, + pub put_value_call: Arc)>>>, + pub get_value_call: Arc>>, event_sender: mpsc::UnboundedSender, event_receiver: Option>, } @@ -136,7 +136,7 @@ impl TestNetwork { impl Default for TestNetwork { fn default() -> Self { let (tx, rx) = mpsc::unbounded(); - let identity = sc_network::Keypair::generate_ed25519(); + let identity = Keypair::generate_ed25519(); TestNetwork { peer_id: identity.public().to_peer_id(), identity, @@ -153,21 +153,20 @@ impl NetworkSigner for TestNetwork { fn sign_with_local_identity( &self, msg: impl AsRef<[u8]>, - ) -> std::result::Result { - sc_network::Signature::sign_message(msg, &self.identity) + ) -> std::result::Result { + Signature::sign_message(msg, &self.identity) } } -#[async_trait] -impl NetworkProvider for TestNetwork { - fn put_value(&self, key: sc_network::KademliaKey, value: Vec) { +impl NetworkDHTProvider for TestNetwork { + fn put_value(&self, key: KademliaKey, value: Vec) { self.put_value_call.lock().unwrap().push((key.clone(), value.clone())); self.event_sender .clone() .unbounded_send(TestNetworkEvent::PutCalled(key, value)) .unwrap(); } - fn get_value(&self, key: &sc_network::KademliaKey) { + fn get_value(&self, key: &KademliaKey) { self.get_value_call.lock().unwrap().push(key.clone()); self.event_sender .clone() @@ -186,12 +185,16 @@ impl NetworkStateInfo for TestNetwork { } } -impl NetworkSigner for sc_network::Keypair { +struct TestSigner<'a> { + keypair: &'a Keypair, +} + +impl<'a> NetworkSigner for TestSigner<'a> { fn sign_with_local_identity( &self, msg: impl AsRef<[u8]>, - ) -> std::result::Result { - sc_network::Signature::sign_message(msg, self) + ) -> std::result::Result { + Signature::sign_message(msg, self.keypair) } } @@ -200,7 +203,7 @@ async fn build_dht_event( public_key: AuthorityId, key_store: &dyn CryptoStore, network: Option<&Signer>, -) -> Vec<(sc_network::KademliaKey, Vec)> { +) -> Vec<(KademliaKey, Vec)> { let serialized_record = serialize_authority_record(serialize_addresses(addresses.into_iter())).unwrap(); @@ -313,7 +316,7 @@ fn publish_discover_cycle() { let dht_event = { let (key, value) = network.put_value_call.lock().unwrap().pop().unwrap(); - sc_network::DhtEvent::ValueFound(vec![(key, value)]) + DhtEvent::ValueFound(vec![(key, value)]) }; // Node B discovering node A's address. @@ -469,7 +472,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { None, ) .await; - sc_network::DhtEvent::ValueFound(kv_pairs) + DhtEvent::ValueFound(kv_pairs) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -487,7 +490,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { struct DhtValueFoundTester { pub remote_key_store: KeyStore, pub remote_authority_public: sp_core::sr25519::Public, - pub remote_node_key: sc_network::Keypair, + pub remote_node_key: Keypair, pub local_worker: Option< Worker< TestApi, @@ -496,7 +499,7 @@ struct DhtValueFoundTester { sp_runtime::generic::Header, substrate_test_runtime_client::runtime::Extrinsic, >, - std::pin::Pin>>, + std::pin::Pin>>, >, >, } @@ -508,7 +511,7 @@ impl DhtValueFoundTester { block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) .unwrap(); - let remote_node_key = sc_network::Keypair::generate_ed25519(); + let remote_node_key = Keypair::generate_ed25519(); Self { remote_key_store, remote_authority_public, remote_node_key, local_worker: None } } @@ -523,7 +526,7 @@ impl DhtValueFoundTester { fn process_value_found( &mut self, strict_record_validation: bool, - values: Vec<(sc_network::KademliaKey, Vec)>, + values: Vec<(KademliaKey, Vec)>, ) -> Option<&HashSet> { let (_dht_event_tx, dht_event_rx) = channel(1); let local_test_api = @@ -583,7 +586,7 @@ fn strict_accept_address_with_peer_signature() { vec![addr.clone()], tester.remote_authority_public.clone().into(), &tester.remote_key_store, - Some(&tester.remote_node_key), + Some(&TestSigner { keypair: &tester.remote_node_key }), )); let cached_remote_addresses = tester.process_value_found(true, kv_pairs); @@ -598,12 +601,12 @@ fn strict_accept_address_with_peer_signature() { #[test] fn reject_address_with_rogue_peer_signature() { let mut tester = DhtValueFoundTester::new(); - let rogue_remote_node_key = sc_network::Keypair::generate_ed25519(); + let rogue_remote_node_key = Keypair::generate_ed25519(); let kv_pairs = block_on(build_dht_event( vec![tester.multiaddr_with_peer_id(1)], tester.remote_authority_public.clone().into(), &tester.remote_key_store, - Some(&rogue_remote_node_key), + Some(&TestSigner { keypair: &rogue_remote_node_key }), )); let cached_remote_addresses = tester.process_value_found(false, kv_pairs); @@ -621,7 +624,7 @@ fn reject_address_with_invalid_peer_signature() { vec![tester.multiaddr_with_peer_id(1)], tester.remote_authority_public.clone().into(), &tester.remote_key_store, - Some(&tester.remote_node_key), + Some(&TestSigner { keypair: &tester.remote_node_key }), )); // tamper with the signature let mut record = schema::SignedAuthorityRecord::decode(kv_pairs[0].1.as_slice()).unwrap(); @@ -808,7 +811,7 @@ fn lookup_throttling() { None, ) .await; - sc_network::DhtEvent::ValueFound(kv_pairs) + DhtEvent::ValueFound(kv_pairs) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -822,7 +825,7 @@ fn lookup_throttling() { // Make second one fail. let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); - let dht_event = sc_network::DhtEvent::ValueNotFound(remote_hash); + let dht_event = DhtEvent::ValueNotFound(remote_hash); dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); // Assert worker to trigger another lookup. diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 6f9ee6f864ad8..f63e453a48026 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -518,7 +518,7 @@ pub fn start_mining_worker( select_chain: S, algorithm: Algorithm, mut env: E, - mut sync_oracle: SO, + sync_oracle: SO, justification_sync_link: L, pre_runtime: Option>, create_inherent_data_providers: CIDP, diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 7c3de13444c1a..39b40a32f18ca 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -490,7 +490,7 @@ pub async fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, - mut sync_oracle: SO, + sync_oracle: SO, create_inherent_data_providers: CIDP, can_author_with: CAW, ) where diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 65d7dfb783aa3..9e9f2fb98b0d1 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -89,7 +89,8 @@ use log::{debug, trace}; use parity_scale_codec::{Decode, Encode}; use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; use rand::seq::SliceRandom; -use sc_network::{ObservedRole, PeerId, ReputationChange}; +use sc_network::{PeerId, ReputationChange}; +use sc_network_common::protocol::event::ObservedRole; use sc_network_gossip::{MessageIntent, ValidatorContext}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 378501cffdd62..7a47ebe214950 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -45,7 +45,7 @@ use finality_grandpa::{ Message::{Precommit, Prevote, PrimaryPropose}, }; use parity_scale_codec::{Decode, Encode}; -use sc_network::{NetworkService, ReputationChange}; +use sc_network::ReputationChange; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_keystore::SyncCryptoStorePtr; @@ -58,6 +58,7 @@ use crate::{ use gossip::{ FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, }; +use sc_network_common::service::{NetworkBlock, NetworkSyncForkRequest}; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_finality_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber}; @@ -156,34 +157,26 @@ const TELEMETRY_VOTERS_LIMIT: usize = 10; /// /// Something that provides both the capabilities needed for the `gossip_network::Network` trait as /// well as the ability to set a fork sync request for a particular block. -pub trait Network: GossipNetwork + Clone + Send + 'static { - /// Notifies the sync service to try and sync the given block from the given - /// peers. - /// - /// If the given vector of peers is empty then the underlying implementation - /// should make a best effort to fetch the block from any peers it is - /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request( - &self, - peers: Vec, - hash: Block::Hash, - number: NumberFor, - ); +pub trait Network: + NetworkSyncForkRequest> + + NetworkBlock> + + GossipNetwork + + Clone + + Send + + 'static +{ } -impl Network for Arc> +impl Network for T where - B: BlockT, - H: sc_network::ExHashT, + Block: BlockT, + T: NetworkSyncForkRequest> + + NetworkBlock> + + GossipNetwork + + Clone + + Send + + 'static, { - fn set_sync_fork_request( - &self, - peers: Vec, - hash: B::Hash, - number: NumberFor, - ) { - NetworkService::set_sync_fork_request(self, peers, hash, number) - } } /// Create a unique topic for a round and set-id combo. @@ -467,7 +460,7 @@ impl> NetworkBridge { hash: B::Hash, number: NumberFor, ) { - Network::set_sync_fork_request(&self.service, peers, hash, number) + self.service.set_sync_fork_request(peers, hash, number) } } diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 0ec5092a2a047..59935cef6a095 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -25,7 +25,14 @@ use super::{ use crate::{communication::grandpa_protocol_name, environment::SharedVoterSetState}; use futures::prelude::*; use parity_scale_codec::Encode; -use sc_network::{config::Role, Event as NetworkEvent, ObservedRole, PeerId}; +use sc_network::{config::Role, Multiaddr, PeerId, ReputationChange}; +use sc_network_common::{ + protocol::event::{Event as NetworkEvent, ObservedRole}, + service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NetworkSyncForkRequest, NotificationSender, NotificationSenderError, + }, +}; use sc_network_gossip::Validator; use sc_network_test::{Block, Hash}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -34,6 +41,7 @@ use sp_keyring::Ed25519Keyring; use sp_runtime::traits::NumberFor; use std::{ borrow::Cow, + collections::HashSet, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -42,8 +50,8 @@ use std::{ #[derive(Debug)] pub(crate) enum Event { EventStream(TracingUnboundedSender), - WriteNotification(sc_network::PeerId, Vec), - Report(sc_network::PeerId, sc_network::ReputationChange), + WriteNotification(PeerId, Vec), + Report(PeerId, ReputationChange), Announce(Hash), } @@ -52,49 +60,122 @@ pub(crate) struct TestNetwork { sender: TracingUnboundedSender, } -impl sc_network_gossip::Network for TestNetwork { - fn event_stream(&self) -> Pin + Send>> { - let (tx, rx) = tracing_unbounded("test"); - let _ = self.sender.unbounded_send(Event::EventStream(tx)); - Box::pin(rx) +impl NetworkPeers for TestNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!(); + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); } - fn report_peer(&self, who: sc_network::PeerId, cost_benefit: sc_network::ReputationChange) { + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) {} + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set(&self, _protocol: Cow<'static, str>, _peers: Vec) {} - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } - fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) {} + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } - fn write_notification(&self, who: PeerId, _: Cow<'static, str>, message: Vec) { - let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); + fn sync_num_connected(&self) -> usize { + unimplemented!(); } +} - fn announce(&self, block: Hash, _associated_data: Option>) { - let _ = self.sender.unbounded_send(Event::Announce(block)); +impl NetworkEventStream for TestNetwork { + fn event_stream( + &self, + _name: &'static str, + ) -> Pin + Send>> { + let (tx, rx) = tracing_unbounded("test"); + let _ = self.sender.unbounded_send(Event::EventStream(tx)); + Box::pin(rx) } } -impl super::Network for TestNetwork { - fn set_sync_fork_request( +impl NetworkNotification for TestNetwork { + fn write_notification(&self, target: PeerId, _protocol: Cow<'static, str>, message: Vec) { + let _ = self.sender.unbounded_send(Event::WriteNotification(target, message)); + } + + fn notification_sender( &self, - _peers: Vec, - _hash: Hash, - _number: NumberFor, - ) { + _target: PeerId, + _protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + unimplemented!(); + } +} + +impl NetworkBlock> for TestNetwork { + fn announce_block(&self, hash: Hash, _data: Option>) { + let _ = self.sender.unbounded_send(Event::Announce(hash)); + } + + fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor) { + unimplemented!(); } } +impl NetworkSyncForkRequest> for TestNetwork { + fn set_sync_fork_request(&self, _peers: Vec, _hash: Hash, _number: NumberFor) {} +} + impl sc_network_gossip::ValidatorContext for TestNetwork { fn broadcast_topic(&mut self, _: Hash, _: bool) {} fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} - fn send_message(&mut self, who: &sc_network::PeerId, data: Vec) { - >::write_notification( + fn send_message(&mut self, who: &PeerId, data: Vec) { + ::write_notification( self, who.clone(), grandpa_protocol_name::NAME.into(), @@ -102,7 +183,7 @@ impl sc_network_gossip::ValidatorContext for TestNetwork { ); } - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} + fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} } pub(crate) struct Tester { @@ -207,8 +288,8 @@ struct NoopContext; impl sc_network_gossip::ValidatorContext for NoopContext { fn broadcast_topic(&mut self, _: Hash, _: bool) {} fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} - fn send_message(&mut self, _: &sc_network::PeerId, _: Vec) {} - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} + fn send_message(&mut self, _: &PeerId, _: Vec) {} + fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} } #[test] @@ -252,7 +333,7 @@ fn good_commit_leads_to_relay() { }) .encode(); - let id = sc_network::PeerId::random(); + let id = PeerId::random(); let global_topic = super::global_topic::(set_id); let test = make_test_network() @@ -301,7 +382,7 @@ fn good_commit_leads_to_relay() { }); // Add a random peer which will be the recipient of this message - let receiver_id = sc_network::PeerId::random(); + let receiver_id = PeerId::random(); let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: receiver_id.clone(), protocol: grandpa_protocol_name::NAME.into(), @@ -403,7 +484,7 @@ fn bad_commit_leads_to_report() { }) .encode(); - let id = sc_network::PeerId::random(); + let id = PeerId::random(); let global_topic = super::global_topic::(set_id); let test = make_test_network() @@ -484,7 +565,7 @@ fn bad_commit_leads_to_report() { #[test] fn peer_with_higher_view_leads_to_catch_up_request() { - let id = sc_network::PeerId::random(); + let id = PeerId::random(); let (tester, mut net) = make_test_network(); let test = tester diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 528365d62c18b..b3ac5d892fd58 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -19,7 +19,7 @@ futures-timer = "3.0.1" log = "0.4.17" parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 446ddf47b4cab..0441011b0ec42 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -20,7 +20,13 @@ use crate::OutputFormat; use ansi_term::Colour; use log::info; use sc_client_api::ClientInfo; -use sc_network::{NetworkStatus, SyncState, WarpSyncPhase, WarpSyncProgress}; +use sc_network_common::{ + service::NetworkStatus, + sync::{ + warp::{WarpSyncPhase, WarpSyncProgress}, + SyncState, + }, +}; use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Saturating, Zero}; use std::{fmt, time::Instant}; diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 5dca77f1a7433..52f1c95fe0198 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -24,7 +24,7 @@ use futures_timer::Delay; use log::{debug, info, trace}; use parity_util_mem::MallocSizeOf; use sc_client_api::{BlockchainEvents, UsageProvider}; -use sc_network::NetworkService; +use sc_network_common::service::NetworkStatusProvider; use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; @@ -53,12 +53,13 @@ impl Default for OutputFormat { } /// Builds the informant and returns a `Future` that drives the informant. -pub async fn build( +pub async fn build( client: Arc, - network: Arc::Hash>>, + network: N, pool: Arc

, format: OutputFormat, ) where + N: NetworkStatusProvider, C: UsageProvider + HeaderMetadata + BlockchainEvents, >::Error: Display, P: TransactionPool + MallocSizeOf, diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 144c5ad168996..185c37985b585 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -23,6 +23,7 @@ lru = "0.7.5" tracing = "0.1.29" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 2d086e89b4a10..8a6c3358e4409 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -21,7 +21,8 @@ use crate::{ Network, Validator, }; -use sc_network::{Event, ReputationChange}; +use sc_network::ReputationChange; +use sc_network_common::protocol::event::Event; use futures::{ channel::mpsc::{channel, Receiver, Sender}, @@ -85,7 +86,7 @@ impl GossipEngine { B: 'static, { let protocol = protocol.into(); - let network_event_stream = network.event_stream(); + let network_event_stream = network.event_stream("network-gossip"); GossipEngine { state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry), @@ -162,7 +163,7 @@ impl GossipEngine { /// Note: this method isn't strictly related to gossiping and should eventually be moved /// somewhere else. pub fn announce(&self, block: B::Hash, associated_data: Option>) { - self.network.announce(block, associated_data); + self.network.announce_block(block, associated_data); } } @@ -181,7 +182,10 @@ impl Future for GossipEngine { this.network.add_set_reserved(remote, this.protocol.clone()); }, Event::SyncDisconnected { remote } => { - this.network.remove_set_reserved(remote, this.protocol.clone()); + this.network.remove_peers_from_reserved_set( + this.protocol.clone(), + vec![remote], + ); }, Event::NotificationStreamOpened { remote, protocol, role, .. } => { if protocol != this.protocol { @@ -304,7 +308,7 @@ impl futures::future::FusedFuture for GossipEngine { #[cfg(test)] mod tests { use super::*; - use crate::{ValidationResult, ValidatorContext}; + use crate::{multiaddr::Multiaddr, ValidationResult, ValidatorContext}; use async_std::task::spawn; use futures::{ channel::mpsc::{unbounded, UnboundedSender}, @@ -312,10 +316,20 @@ mod tests { future::poll_fn, }; use quickcheck::{Arbitrary, Gen, QuickCheck}; - use sc_network::ObservedRole; - use sp_runtime::{testing::H256, traits::Block as BlockT}; + use sc_network_common::{ + protocol::event::ObservedRole, + service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NotificationSender, NotificationSenderError, + }, + }; + use sp_runtime::{ + testing::H256, + traits::{Block as BlockT, NumberFor}, + }; use std::{ borrow::Cow, + collections::HashSet, sync::{Arc, Mutex}, }; use substrate_test_runtime_client::runtime::Block; @@ -330,29 +344,119 @@ mod tests { event_senders: Vec>, } - impl Network for TestNetwork { - fn event_stream(&self) -> Pin + Send>> { + impl NetworkPeers for TestNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!(); + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) {} + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { + } + + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } + + fn sync_num_connected(&self) -> usize { + unimplemented!(); + } + } + + impl NetworkEventStream for TestNetwork { + fn event_stream(&self, _name: &'static str) -> Pin + Send>> { let (tx, rx) = unbounded(); self.inner.lock().unwrap().event_senders.push(tx); Box::pin(rx) } + } - fn report_peer(&self, _: PeerId, _: ReputationChange) {} - - fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { + impl NetworkNotification for TestNetwork { + fn write_notification( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + _message: Vec, + ) { unimplemented!(); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn notification_sender( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + unimplemented!(); + } + } - fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { + impl NetworkBlock<::Hash, NumberFor> for TestNetwork { + fn announce_block(&self, _hash: ::Hash, _data: Option>) { unimplemented!(); } - fn announce(&self, _: B::Hash, _: Option>) { + fn new_best_block_imported( + &self, + _hash: ::Hash, + _number: NumberFor, + ) { unimplemented!(); } } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 4b83708702466..b9dff0bcd4d00 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -41,9 +41,9 @@ //! messages. //! //! The [`GossipEngine`] will automatically use [`Network::add_set_reserved`] and -//! [`Network::remove_set_reserved`] to maintain a set of peers equal to the set of peers the -//! node is syncing from. See the documentation of `sc-network` for more explanations about the -//! concepts of peer sets. +//! [`NetworkPeers::remove_peers_from_reserved_set`] to maintain a set of peers equal to the set of +//! peers the node is syncing from. See the documentation of `sc-network` for more explanations +//! about the concepts of peer sets. //! //! # What is a validator? //! @@ -67,74 +67,35 @@ pub use self::{ validator::{DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext}, }; -use futures::prelude::*; -use sc_network::{multiaddr, Event, ExHashT, NetworkService, PeerId, ReputationChange}; -use sp_runtime::traits::Block as BlockT; -use std::{borrow::Cow, iter, pin::Pin, sync::Arc}; +use sc_network::{multiaddr, PeerId}; +use sc_network_common::service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, +}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{borrow::Cow, iter}; mod bridge; mod state_machine; mod validator; /// Abstraction over a network. -pub trait Network { - /// Returns a stream of events representing what happens on the network. - fn event_stream(&self) -> Pin + Send>>; - - /// Adjust the reputation of a node. - fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange); - - /// Adds the peer to the set of peers to be connected to with this protocol. - fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); - - /// Removes the peer from the set of peers to be connected to with this protocol. - fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); - - /// Force-disconnect a peer. - fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>); - - /// Send a notification to a peer. - fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec); - - /// Notify everyone we're connected to that we have the given block. - /// - /// Note: this method isn't strictly related to gossiping and should eventually be moved - /// somewhere else. - fn announce(&self, block: B::Hash, associated_data: Option>); -} - -impl Network for Arc> { - fn event_stream(&self) -> Pin + Send>> { - Box::pin(NetworkService::event_stream(self, "network-gossip")) - } - - fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange) { - NetworkService::report_peer(self, peer_id, reputation); - } - +pub trait Network: + NetworkPeers + NetworkEventStream + NetworkNotification + NetworkBlock> +{ fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { let addr = iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); - let result = - NetworkService::add_peers_to_reserved_set(self, protocol, iter::once(addr).collect()); + let result = self.add_peers_to_reserved_set(protocol, iter::once(addr).collect()); if let Err(err) = result { log::error!(target: "gossip", "add_set_reserved failed: {}", err); } } +} - fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { - NetworkService::remove_peers_from_reserved_set(self, protocol, iter::once(who).collect()); - } - - fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { - NetworkService::disconnect_peer(self, who, protocol) - } - - fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec) { - NetworkService::write_notification(self, who, protocol, message) - } - - fn announce(&self, block: B::Hash, associated_data: Option>) { - NetworkService::announce_block(self, block, associated_data) - } +impl Network for T where + T: NetworkPeers + + NetworkEventStream + + NetworkNotification + + NetworkBlock> +{ } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 8a016cbaab3da..4cc4e25529af4 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -22,7 +22,7 @@ use ahash::AHashSet; use libp2p::PeerId; use lru::LruCache; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network::ObservedRole; +use sc_network_common::protocol::event::ObservedRole; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; use std::{borrow::Cow, collections::HashMap, iter, sync::Arc, time, time::Instant}; @@ -511,11 +511,23 @@ impl Metrics { #[cfg(test)] mod tests { use super::*; + use crate::multiaddr::Multiaddr; use futures::prelude::*; - use sc_network::{Event, ReputationChange}; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256}; + use sc_network::ReputationChange; + use sc_network_common::{ + protocol::event::Event, + service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NotificationSender, NotificationSenderError, + }, + }; + use sp_runtime::{ + testing::{Block as RawBlock, ExtrinsicWrapper, H256}, + traits::NumberFor, + }; use std::{ borrow::Cow, + collections::HashSet, pin::Pin, sync::{Arc, Mutex}, }; @@ -569,28 +581,118 @@ mod tests { peer_reports: Vec<(PeerId, ReputationChange)>, } - impl Network for NoOpNetwork { - fn event_stream(&self) -> Pin + Send>> { + impl NetworkPeers for NoOpNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!(); + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + self.inner.lock().unwrap().peer_reports.push((who, cost_benefit)); + } + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { unimplemented!(); } - fn report_peer(&self, peer_id: PeerId, reputation_change: ReputationChange) { - self.inner.lock().unwrap().peer_reports.push((peer_id, reputation_change)); + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { } - fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { unimplemented!(); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn sync_num_connected(&self) -> usize { + unimplemented!(); + } + } - fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { + impl NetworkEventStream for NoOpNetwork { + fn event_stream(&self, _name: &'static str) -> Pin + Send>> { unimplemented!(); } + } - fn announce(&self, _: B::Hash, _: Option>) { + impl NetworkNotification for NoOpNetwork { + fn write_notification( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + _message: Vec, + ) { + unimplemented!(); + } + + fn notification_sender( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + unimplemented!(); + } + } + + impl NetworkBlock<::Hash, NumberFor> for NoOpNetwork { + fn announce_block(&self, _hash: ::Hash, _data: Option>) { + unimplemented!(); + } + + fn new_best_block_imported( + &self, + _hash: ::Hash, + _number: NumberFor, + ) { unimplemented!(); } } diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index 7d60f7b31397f..a98c62ab5a9eb 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -16,7 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_network::{ObservedRole, PeerId}; +use sc_network::PeerId; +use sc_network_common::protocol::event::ObservedRole; use sp_runtime::traits::Block as BlockT; /// Validates consensus messages. diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index b0e3a8fe42a83..1b10bd248292c 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -17,7 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] +async-trait = "0.1.50" bitflags = "1.3.2" +bytes = "1" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } @@ -29,3 +31,4 @@ sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +thiserror = "1.0" diff --git a/client/network/common/src/lib.rs b/client/network/common/src/lib.rs index 9fbedc542c123..3a30d24900199 100644 --- a/client/network/common/src/lib.rs +++ b/client/network/common/src/lib.rs @@ -20,5 +20,7 @@ pub mod config; pub mod message; +pub mod protocol; pub mod request_responses; +pub mod service; pub mod sync; diff --git a/client/network/common/src/protocol.rs b/client/network/common/src/protocol.rs new file mode 100644 index 0000000000000..0fd36cb511112 --- /dev/null +++ b/client/network/common/src/protocol.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub mod event; diff --git a/client/network/src/protocol/event.rs b/client/network/common/src/protocol/event.rs similarity index 96% rename from client/network/src/protocol/event.rs rename to client/network/common/src/protocol/event.rs index 26c9544960605..c6fb4a2faf9f9 100644 --- a/client/network/src/protocol/event.rs +++ b/client/network/common/src/protocol/event.rs @@ -67,14 +67,14 @@ pub enum Event { remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. /// This is always equal to the value of - /// [`crate::config::NonDefaultSetConfig::notifications_protocol`] of one of the + /// `sc_network::config::NonDefaultSetConfig::notifications_protocol` of one of the /// configured sets. protocol: Cow<'static, str>, /// If the negotiation didn't use the main name of the protocol (the one in /// `notifications_protocol`), then this field contains which name has actually been /// used. /// Always contains a value equal to the value in - /// [`crate::config::NonDefaultSetConfig::fallback_names`]. + /// `sc_network::config::NonDefaultSetConfig::fallback_names`. negotiated_fallback: Option>, /// Role of the remote. role: ObservedRole, diff --git a/client/network/common/src/request_responses.rs b/client/network/common/src/request_responses.rs index f409d1ac16d61..a216c9c2d254d 100644 --- a/client/network/common/src/request_responses.rs +++ b/client/network/common/src/request_responses.rs @@ -19,7 +19,7 @@ //! Collection of generic data structures for request-response protocols. use futures::channel::{mpsc, oneshot}; -use libp2p::PeerId; +use libp2p::{request_response::OutboundFailure, PeerId}; use sc_peerset::ReputationChange; use std::{borrow::Cow, time::Duration}; @@ -115,3 +115,40 @@ pub struct OutgoingResponse { /// > written to the buffer managed by the operating system. pub sent_feedback: Option>, } + +/// When sending a request, what to do on a disconnected recipient. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum IfDisconnected { + /// Try to connect to the peer. + TryConnect, + /// Just fail if the destination is not yet connected. + ImmediateError, +} + +/// Convenience functions for `IfDisconnected`. +impl IfDisconnected { + /// Shall we connect to a disconnected peer? + pub fn should_connect(self) -> bool { + match self { + Self::TryConnect => true, + Self::ImmediateError => false, + } + } +} + +/// Error in a request. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum RequestFailure { + #[error("We are not currently connected to the requested peer.")] + NotConnected, + #[error("Given protocol hasn't been registered.")] + UnknownProtocol, + #[error("Remote has closed the substream before answering, thereby signaling that it considers the request as valid, but refused to answer it.")] + Refused, + #[error("The remote replied, but the local node is no longer interested in the response.")] + Obsolete, + /// Problem on the network. + #[error("Problem on the network: {0}")] + Network(OutboundFailure), +} diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs new file mode 100644 index 0000000000000..0fff32b12d66c --- /dev/null +++ b/client/network/common/src/service.rs @@ -0,0 +1,660 @@ +// This file is part of Substrate. +// +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +// +// If you read this, you are very thorough, congratulations. + +use crate::{ + protocol::event::Event, + request_responses::{IfDisconnected, RequestFailure}, + sync::{warp::WarpSyncProgress, StateDownloadProgress, SyncState}, +}; +use futures::{channel::oneshot, Stream}; +pub use libp2p::{identity::error::SigningError, kad::record::Key as KademliaKey}; +use libp2p::{Multiaddr, PeerId}; +use sc_peerset::ReputationChange; +pub use signature::Signature; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{borrow::Cow, collections::HashSet, future::Future, pin::Pin, sync::Arc}; + +mod signature; + +/// Signer with network identity +pub trait NetworkSigner { + /// Signs the message with the `KeyPair` that defines the local [`PeerId`]. + fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result; +} + +impl NetworkSigner for Arc +where + T: ?Sized, + T: NetworkSigner, +{ + fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result { + T::sign_with_local_identity(self, msg) + } +} + +/// Provides access to the networking DHT. +pub trait NetworkDHTProvider { + /// Start getting a value from the DHT. + fn get_value(&self, key: &KademliaKey); + + /// Start putting a value in the DHT. + fn put_value(&self, key: KademliaKey, value: Vec); +} + +impl NetworkDHTProvider for Arc +where + T: ?Sized, + T: NetworkDHTProvider, +{ + fn get_value(&self, key: &KademliaKey) { + T::get_value(self, key) + } + + fn put_value(&self, key: KademliaKey, value: Vec) { + T::put_value(self, key, value) + } +} + +/// Provides an ability to set a fork sync request for a particular block. +pub trait NetworkSyncForkRequest { + /// Notifies the sync service to try and sync the given block from the given + /// peers. + /// + /// If the given vector of peers is empty then the underlying implementation + /// should make a best effort to fetch the block from any peers it is + /// connected to (NOTE: this assumption will change in the future #3629). + fn set_sync_fork_request(&self, peers: Vec, hash: BlockHash, number: BlockNumber); +} + +impl NetworkSyncForkRequest for Arc +where + T: ?Sized, + T: NetworkSyncForkRequest, +{ + fn set_sync_fork_request(&self, peers: Vec, hash: BlockHash, number: BlockNumber) { + T::set_sync_fork_request(self, peers, hash, number) + } +} + +/// Overview status of the network. +#[derive(Clone)] +pub struct NetworkStatus { + /// Current global sync state. + pub sync_state: SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_sync_peers: u32, + /// Total number of connected peers + pub num_connected_peers: usize, + /// Total number of active peers. + pub num_active_peers: usize, + /// The total number of bytes received. + pub total_bytes_inbound: u64, + /// The total number of bytes sent. + pub total_bytes_outbound: u64, + /// State sync in progress. + pub state_sync: Option, + /// Warp sync in progress. + pub warp_sync: Option>, +} + +/// Provides high-level status information about network. +#[async_trait::async_trait] +pub trait NetworkStatusProvider { + /// High-level network status information. + /// + /// Returns an error if the `NetworkWorker` is no longer running. + async fn status(&self) -> Result, ()>; +} + +// Manual implementation to avoid extra boxing here +impl NetworkStatusProvider for Arc +where + T: ?Sized, + T: NetworkStatusProvider, +{ + fn status<'life0, 'async_trait>( + &'life0 self, + ) -> Pin, ()>> + Send + 'async_trait>> + where + 'life0: 'async_trait, + Self: 'async_trait, + { + T::status(self) + } +} + +/// Provides low-level API for manipulating network peers. +pub trait NetworkPeers { + /// Set authorized peers. + /// + /// Need a better solution to manage authorized peers, but now just use reserved peers for + /// prototyping. + fn set_authorized_peers(&self, peers: HashSet); + + /// Set authorized_only flag. + /// + /// Need a better solution to decide authorized_only, but now just use reserved_only flag for + /// prototyping. + fn set_authorized_only(&self, reserved_only: bool); + + /// Adds an address known to a node. + fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr); + + /// Report a given peer as either beneficial (+) or costly (-) according to the + /// given scalar. + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange); + + /// Disconnect from a node as soon as possible. + /// + /// This triggers the same effects as if the connection had closed itself spontaneously. + /// + /// See also [`NetworkPeers::remove_from_peers_set`], which has the same effect but also + /// prevents the local node from re-establishing an outgoing substream to this peer until it + /// is added again. + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>); + + /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. + fn accept_unreserved_peers(&self); + + /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing + /// purposes. + fn deny_unreserved_peers(&self); + + /// Adds a `PeerId` and its address as reserved. The string should encode the address + /// and peer ID of the remote node. + /// + /// Returns an `Err` if the given string is not a valid multiaddress + /// or contains an invalid peer ID (which includes the local peer ID). + fn add_reserved_peer(&self, peer: String) -> Result<(), String>; + + /// Removes a `PeerId` from the list of reserved peers. + fn remove_reserved_peer(&self, peer_id: PeerId); + + /// Sets the reserved set of a protocol to the given set of peers. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// The node will start establishing/accepting connections and substreams to/from peers in this + /// set, if it doesn't have any substream open with them yet. + /// + /// Note however, if a call to this function results in less peers on the reserved set, they + /// will not necessarily get disconnected (depending on available free slots in the peer set). + /// If you want to also disconnect those removed peers, you will have to call + /// `remove_from_peers_set` on those in addition to updating the reserved set. You can omit + /// this step if the peer set is in reserved only mode. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn set_reserved_peers( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String>; + + /// Add peers to a peer set. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn add_peers_to_reserved_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String>; + + /// Remove peers from a peer set. + fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec); + + /// Add a peer to a set of peers. + /// + /// If the set has slots available, it will try to open a substream with this peer. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn add_to_peers_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String>; + + /// Remove peers from a peer set. + /// + /// If we currently have an open substream with this peer, it will soon be closed. + fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec); + + /// Returns the number of peers in the sync peer set we're connected to. + fn sync_num_connected(&self) -> usize; +} + +// Manual implementation to avoid extra boxing here +impl NetworkPeers for Arc +where + T: ?Sized, + T: NetworkPeers, +{ + fn set_authorized_peers(&self, peers: HashSet) { + T::set_authorized_peers(self, peers) + } + + fn set_authorized_only(&self, reserved_only: bool) { + T::set_authorized_only(self, reserved_only) + } + + fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + T::add_known_address(self, peer_id, addr) + } + + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + T::report_peer(self, who, cost_benefit) + } + + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + T::disconnect_peer(self, who, protocol) + } + + fn accept_unreserved_peers(&self) { + T::accept_unreserved_peers(self) + } + + fn deny_unreserved_peers(&self) { + T::deny_unreserved_peers(self) + } + + fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + T::add_reserved_peer(self, peer) + } + + fn remove_reserved_peer(&self, peer_id: PeerId) { + T::remove_reserved_peer(self, peer_id) + } + + fn set_reserved_peers( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { + T::set_reserved_peers(self, protocol, peers) + } + + fn add_peers_to_reserved_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { + T::add_peers_to_reserved_set(self, protocol, peers) + } + + fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { + T::remove_peers_from_reserved_set(self, protocol, peers) + } + + fn add_to_peers_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { + T::add_to_peers_set(self, protocol, peers) + } + + fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { + T::remove_from_peers_set(self, protocol, peers) + } + + fn sync_num_connected(&self) -> usize { + T::sync_num_connected(self) + } +} + +/// Provides access to network-level event stream. +pub trait NetworkEventStream { + /// Returns a stream containing the events that happen on the network. + /// + /// If this method is called multiple times, the events are duplicated. + /// + /// The stream never ends (unless the `NetworkWorker` gets shut down). + /// + /// The name passed is used to identify the channel in the Prometheus metrics. Note that the + /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having + /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory + fn event_stream(&self, name: &'static str) -> Pin + Send>>; +} + +impl NetworkEventStream for Arc +where + T: ?Sized, + T: NetworkEventStream, +{ + fn event_stream(&self, name: &'static str) -> Pin + Send>> { + T::event_stream(self, name) + } +} + +/// Trait for providing information about the local network state +pub trait NetworkStateInfo { + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec; + + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId; +} + +impl NetworkStateInfo for Arc +where + T: ?Sized, + T: NetworkStateInfo, +{ + fn external_addresses(&self) -> Vec { + T::external_addresses(self) + } + + fn local_peer_id(&self) -> PeerId { + T::local_peer_id(self) + } +} + +/// Reserved slot in the notifications buffer, ready to accept data. +pub trait NotificationSenderReady { + /// Consumes this slots reservation and actually queues the notification. + /// + /// NOTE: Traits can't consume itself, but calling this method second time will return an error. + fn send(&mut self, notification: Vec) -> Result<(), NotificationSenderError>; +} + +/// A `NotificationSender` allows for sending notifications to a peer with a chosen protocol. +#[async_trait::async_trait] +pub trait NotificationSender { + /// Returns a future that resolves when the `NotificationSender` is ready to send a + /// notification. + async fn ready(&self) + -> Result, NotificationSenderError>; +} + +/// Error returned by [`NetworkNotification::notification_sender`]. +#[derive(Debug, thiserror::Error)] +pub enum NotificationSenderError { + /// The notification receiver has been closed, usually because the underlying connection + /// closed. + /// + /// Some of the notifications most recently sent may not have been received. However, + /// the peer may still be connected and a new `NotificationSender` for the same + /// protocol obtained from [`NetworkNotification::notification_sender`]. + #[error("The notification receiver has been closed")] + Closed, + /// Protocol name hasn't been registered. + #[error("Protocol name hasn't been registered")] + BadProtocol, +} + +/// Provides ability to send network notifications. +pub trait NetworkNotification { + /// Appends a notification to the buffer of pending outgoing notifications with the given peer. + /// Has no effect if the notifications channel with this protocol name is not open. + /// + /// If the buffer of pending outgoing notifications with that peer is full, the notification + /// is silently dropped and the connection to the remote will start being shut down. This + /// happens if you call this method at a higher rate than the rate at which the peer processes + /// these notifications, or if the available network bandwidth is too low. + /// + /// For this reason, this method is considered soft-deprecated. You are encouraged to use + /// [`NetworkNotification::notification_sender`] instead. + /// + /// > **Note**: The reason why this is a no-op in the situation where we have no channel is + /// > that we don't guarantee message delivery anyway. Networking issues can cause + /// > connections to drop at any time, and higher-level logic shouldn't differentiate + /// > between the remote voluntarily closing a substream or a network error + /// > preventing the message from being delivered. + /// + /// The protocol must have been registered with + /// `crate::config::NetworkConfiguration::notifications_protocols`. + fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec); + + /// Obtains a [`NotificationSender`] for a connected peer, if it exists. + /// + /// A `NotificationSender` is scoped to a particular connection to the peer that holds + /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two + /// steps: + /// + /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready + /// for another notification, yielding a [`NotificationSenderReady`] token. + /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation + /// can only fail if the underlying notification substream or connection has suddenly closed. + /// + /// An error is returned by [`NotificationSenderReady::send`] if there exists no open + /// notifications substream with that combination of peer and protocol, or if the remote + /// has asked to close the notifications substream. If that happens, it is guaranteed that an + /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by + /// [`NetworkEventStream::event_stream`]. + /// + /// If the remote requests to close the notifications substream, all notifications successfully + /// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the + /// substream actually gets closed, but attempting to enqueue more notifications will now + /// return an error. It is however possible for the entire connection to be abruptly closed, + /// in which case enqueued notifications will be lost. + /// + /// The protocol must have been registered with + /// `crate::config::NetworkConfiguration::notifications_protocols`. + /// + /// # Usage + /// + /// This method returns a struct that allows waiting until there is space available in the + /// buffer of messages towards the given peer. If the peer processes notifications at a slower + /// rate than we send them, this buffer will quickly fill up. + /// + /// As such, you should never do something like this: + /// + /// ```ignore + /// // Do NOT do this + /// for peer in peers { + /// if let Ok(n) = network.notification_sender(peer, ...) { + /// if let Ok(s) = n.ready().await { + /// let _ = s.send(...); + /// } + /// } + /// } + /// ``` + /// + /// Doing so would slow down all peers to the rate of the slowest one. A malicious or + /// malfunctioning peer could intentionally process notifications at a very slow rate. + /// + /// Instead, you are encouraged to maintain your own buffer of notifications on top of the one + /// maintained by `sc-network`, and use `notification_sender` to progressively send out + /// elements from your buffer. If this additional buffer is full (which will happen at some + /// point if the peer is too slow to process notifications), appropriate measures can be taken, + /// such as removing non-critical notifications from the buffer or disconnecting the peer + /// using [`NetworkPeers::disconnect_peer`]. + /// + /// + /// Notifications Per-peer buffer + /// broadcast +-------> of notifications +--> `notification_sender` +--> Internet + /// ^ (not covered by + /// | sc-network) + /// + + /// Notifications should be dropped + /// if buffer is full + /// + /// + /// See also the `sc-network-gossip` crate for a higher-level way to send notifications. + fn notification_sender( + &self, + target: PeerId, + protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError>; +} + +impl NetworkNotification for Arc +where + T: ?Sized, + T: NetworkNotification, +{ + fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { + T::write_notification(self, target, protocol, message) + } + + fn notification_sender( + &self, + target: PeerId, + protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + T::notification_sender(self, target, protocol) + } +} + +/// Provides ability to send network requests. +#[async_trait::async_trait] +pub trait NetworkRequest { + /// Sends a single targeted request to a specific peer. On success, returns the response of + /// the peer. + /// + /// Request-response protocols are a way to complement notifications protocols, but + /// notifications should remain the default ways of communicating information. For example, a + /// peer can announce something through a notification, after which the recipient can obtain + /// more information by performing a request. + /// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way + /// you will get an error immediately for disconnected peers, instead of waiting for a + /// potentially very long connection attempt, which would suggest that something is wrong + /// anyway, as you are supposed to be connected because of the notification protocol. + /// + /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. + /// Such restrictions, if desired, need to be enforced at the call site(s). + /// + /// The protocol must have been registered through + /// `NetworkConfiguration::request_response_protocols`. + async fn request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + connect: IfDisconnected, + ) -> Result, RequestFailure>; + + /// Variation of `request` which starts a request whose response is delivered on a provided + /// channel. + /// + /// Instead of blocking and waiting for a reply, this function returns immediately, sending + /// responses via the passed in sender. This alternative API exists to make it easier to + /// integrate with message passing APIs. + /// + /// Keep in mind that the connected receiver might receive a `Canceled` event in case of a + /// closing connection. This is expected behaviour. With `request` you would get a + /// `RequestFailure::Network(OutboundFailure::ConnectionClosed)` in that case. + fn start_request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ); +} + +// Manual implementation to avoid extra boxing here +impl NetworkRequest for Arc +where + T: ?Sized, + T: NetworkRequest, +{ + fn request<'life0, 'async_trait>( + &'life0 self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + connect: IfDisconnected, + ) -> Pin, RequestFailure>> + Send + 'async_trait>> + where + 'life0: 'async_trait, + Self: 'async_trait, + { + T::request(self, target, protocol, request, connect) + } + + fn start_request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ) { + T::start_request(self, target, protocol, request, tx, connect) + } +} + +/// Provides ability to propagate transactions over the network. +pub trait NetworkTransaction { + /// You may call this when new transactions are imported by the transaction pool. + /// + /// All transactions will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + fn trigger_repropagate(&self); + + /// You must call when new transaction is imported by the transaction pool. + /// + /// This transaction will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + fn propagate_transaction(&self, hash: H); +} + +impl NetworkTransaction for Arc +where + T: ?Sized, + T: NetworkTransaction, +{ + fn trigger_repropagate(&self) { + T::trigger_repropagate(self) + } + + fn propagate_transaction(&self, hash: H) { + T::propagate_transaction(self, hash) + } +} + +/// Provides ability to announce blocks to the network. +pub trait NetworkBlock { + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. This function forces such an announcement. + fn announce_block(&self, hash: BlockHash, data: Option>); + + /// Inform the network service about new best imported block. + fn new_best_block_imported(&self, hash: BlockHash, number: BlockNumber); +} + +impl NetworkBlock for Arc +where + T: ?Sized, + T: NetworkBlock, +{ + fn announce_block(&self, hash: BlockHash, data: Option>) { + T::announce_block(self, hash, data) + } + + fn new_best_block_imported(&self, hash: BlockHash, number: BlockNumber) { + T::new_best_block_imported(self, hash, number) + } +} diff --git a/client/network/src/service/signature.rs b/client/network/common/src/service/signature.rs similarity index 96% rename from client/network/src/service/signature.rs rename to client/network/common/src/service/signature.rs index d21d28a3007b5..602ef3d82979a 100644 --- a/client/network/src/service/signature.rs +++ b/client/network/common/src/service/signature.rs @@ -18,7 +18,10 @@ // // If you read this, you are very thorough, congratulations. -use super::*; +use libp2p::{ + identity::{error::SigningError, Keypair, PublicKey}, + PeerId, +}; /// A result of signing a message with a network identity. Since `PeerId` is potentially a hash of a /// `PublicKey`, you need to reveal the `PublicKey` next to the signature, so the verifier can check diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 515608df13d0f..4ff415788f4ea 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -21,7 +21,7 @@ use crate::{ discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, peer_info, protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, - request_responses, DhtEvent, ObservedRole, + request_responses, }; use bytes::Bytes; @@ -41,7 +41,11 @@ use log::debug; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::import_queue::{IncomingBlock, Origin}; -use sc_network_common::{config::ProtocolId, request_responses::ProtocolConfig}; +use sc_network_common::{ + config::ProtocolId, + protocol::event::{DhtEvent, ObservedRole}, + request_responses::{IfDisconnected, ProtocolConfig, RequestFailure}, +}; use sc_peerset::PeersetHandle; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus::BlockOrigin; @@ -57,9 +61,7 @@ use std::{ time::Duration, }; -pub use crate::request_responses::{ - IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, RequestId, ResponseFailure, -}; +pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, ResponseFailure}; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 83bc1075b8bad..b1d70c28289bd 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -262,22 +262,26 @@ pub mod transactions; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; -pub use protocol::{ - event::{DhtEvent, Event, ObservedRole}, - PeerInfo, -}; -pub use sc_network_common::sync::{ - warp::{WarpSyncPhase, WarpSyncProgress}, - StateDownloadProgress, SyncState, +pub use protocol::PeerInfo; +pub use sc_network_common::{ + protocol::event::{DhtEvent, Event, ObservedRole}, + request_responses::{IfDisconnected, RequestFailure}, + service::{ + KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkRequest, NetworkSigner, + NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, + NetworkTransaction, Signature, SigningError, + }, + sync::{ + warp::{WarpSyncPhase, WarpSyncProgress}, + StateDownloadProgress, SyncState, + }, }; pub use service::{ - DecodingError, IfDisconnected, KademliaKey, Keypair, NetworkService, NetworkWorker, - NotificationSender, NotificationSenderReady, OutboundFailure, PublicKey, RequestFailure, - Signature, SigningError, + DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, + NotificationSenderReady, OutboundFailure, PublicKey, }; pub use sc_peerset::ReputationChange; -use sp_runtime::traits::{Block as BlockT, NumberFor}; /// The maximum allowed number of established connections per peer. /// @@ -296,35 +300,3 @@ pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} - -/// Trait for providing information about the local network state -pub trait NetworkStateInfo { - /// Returns the local external addresses. - fn external_addresses(&self) -> Vec; - - /// Returns the local Peer ID. - fn local_peer_id(&self) -> PeerId; -} - -/// Overview status of the network. -#[derive(Clone)] -pub struct NetworkStatus { - /// Current global sync state. - pub sync_state: SyncState, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_sync_peers: u32, - /// Total number of connected peers - pub num_connected_peers: usize, - /// Total number of active peers. - pub num_active_peers: usize, - /// The total number of bytes received. - pub total_bytes_inbound: u64, - /// The total number of bytes sent. - pub total_bytes_outbound: u64, - /// State sync in progress. - pub state_sync: Option, - /// Warp sync in progress. - pub warp_sync: Option>, -} diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c51667017d15c..351e7d207ad1e 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -18,7 +18,6 @@ use crate::{ config, error, - request_responses::RequestFailure, utils::{interval, LruHashSet}, }; @@ -45,6 +44,7 @@ use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::{BlockImportError, BlockImportStatus, IncomingBlock, Origin}; use sc_network_common::{ config::ProtocolId, + request_responses::RequestFailure, sync::{ message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, BlockState, @@ -76,7 +76,6 @@ use std::{ mod notifications; -pub mod event; pub mod message; pub use notifications::{NotificationsSink, NotifsHandlerError, Ready}; diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 0d8c6c33b1c95..9eab85a4c1ce1 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -50,7 +50,9 @@ use libp2p::{ NetworkBehaviourAction, PollParameters, }, }; -use sc_network_common::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; +use sc_network_common::request_responses::{ + IfDisconnected, IncomingRequest, OutgoingResponse, ProtocolConfig, RequestFailure, +}; use std::{ borrow::Cow, collections::{hash_map::Entry, HashMap}, @@ -118,26 +120,6 @@ impl From<(Cow<'static, str>, RequestId)> for ProtocolRequestId { } } -/// When sending a request, what to do on a disconnected recipient. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum IfDisconnected { - /// Try to connect to the peer. - TryConnect, - /// Just fail if the destination is not yet connected. - ImmediateError, -} - -/// Convenience functions for `IfDisconnected`. -impl IfDisconnected { - /// Shall we connect to a disconnected peer? - pub fn should_connect(self) -> bool { - match self { - Self::TryConnect => true, - Self::ImmediateError => false, - } - } -} - /// Implementation of `NetworkBehaviour` that provides support for request-response protocols. pub struct RequestResponsesBehaviour { /// The multiple sub-protocols, by name. @@ -787,23 +769,6 @@ pub enum RegisterError { DuplicateProtocol(Cow<'static, str>), } -/// Error in a request. -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum RequestFailure { - #[error("We are not currently connected to the requested peer.")] - NotConnected, - #[error("Given protocol hasn't been registered.")] - UnknownProtocol, - #[error("Remote has closed the substream before answering, thereby signaling that it considers the request as valid, but refused to answer it.")] - Refused, - #[error("The remote replied, but the local node is no longer interested in the response.")] - Obsolete, - /// Problem on the network. - #[error("Problem on the network: {0}")] - Network(OutboundFailure), -} - /// Error when processing a request sent by a remote. #[derive(Debug, thiserror::Error)] pub enum ResponseFailure { diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 1210b0ca64224..bf15a9250d82c 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -37,16 +37,17 @@ use crate::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, protocol::{ - self, event::Event, message::generic::Roles, NotificationsSink, NotifsHandlerError, - PeerInfo, Protocol, Ready, + self, message::generic::Roles, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, + Ready, }, - transactions, transport, DhtEvent, ExHashT, NetworkStateInfo, NetworkStatus, ReputationChange, + transactions, transport, ExHashT, ReputationChange, }; use codec::Encode as _; use futures::{channel::oneshot, prelude::*}; use libp2p::{ core::{either::EitherError, upgrade, ConnectedPoint, Executor}, + kad::record::Key as KademliaKey, multiaddr, ping::Failure as PingFailure, swarm::{ @@ -60,7 +61,17 @@ use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; -use sc_network_common::sync::{SyncState, SyncStatus}; +use sc_network_common::{ + protocol::event::{DhtEvent, Event}, + request_responses::{IfDisconnected, RequestFailure}, + service::{ + NetworkDHTProvider, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSigner, + NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, + NotificationSender as NotificationSenderT, NotificationSenderError, + NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, + }, + sync::{SyncState, SyncStatus}, +}; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; @@ -81,24 +92,15 @@ use std::{ task::Poll, }; -pub use behaviour::{ - IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, ResponseFailure, -}; +pub use behaviour::{InboundFailure, OutboundFailure, ResponseFailure}; mod metrics; mod out_events; -mod signature; #[cfg(test)] mod tests; -pub use libp2p::{ - identity::{ - error::{DecodingError, SigningError}, - Keypair, PublicKey, - }, - kad::record::Key as KademliaKey, -}; -pub use signature::Signature; +pub use libp2p::identity::{error::DecodingError, Keypair, PublicKey}; +use sc_network_common::service::{NetworkBlock, NetworkRequest, NetworkTransaction}; /// Substrate network service. Handles network IO and manages connectivity. pub struct NetworkService { @@ -723,289 +725,6 @@ where } impl NetworkService { - /// Returns the local `PeerId`. - pub fn local_peer_id(&self) -> &PeerId { - &self.local_peer_id - } - - /// Signs the message with the `KeyPair` that defined the local `PeerId`. - pub fn sign_with_local_identity( - &self, - msg: impl AsRef<[u8]>, - ) -> Result { - Signature::sign_message(msg.as_ref(), &self.local_identity) - } - - /// Set authorized peers. - /// - /// Need a better solution to manage authorized peers, but now just use reserved peers for - /// prototyping. - pub fn set_authorized_peers(&self, peers: HashSet) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); - } - - /// Set authorized_only flag. - /// - /// Need a better solution to decide authorized_only, but now just use reserved_only flag for - /// prototyping. - pub fn set_authorized_only(&self, reserved_only: bool) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); - } - - /// Adds an address known to a node. - pub fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); - } - - /// Appends a notification to the buffer of pending outgoing notifications with the given peer. - /// Has no effect if the notifications channel with this protocol name is not open. - /// - /// If the buffer of pending outgoing notifications with that peer is full, the notification - /// is silently dropped and the connection to the remote will start being shut down. This - /// happens if you call this method at a higher rate than the rate at which the peer processes - /// these notifications, or if the available network bandwidth is too low. - /// - /// For this reason, this method is considered soft-deprecated. You are encouraged to use - /// [`NetworkService::notification_sender`] instead. - /// - /// > **Note**: The reason why this is a no-op in the situation where we have no channel is - /// > that we don't guarantee message delivery anyway. Networking issues can cause - /// > connections to drop at any time, and higher-level logic shouldn't differentiate - /// > between the remote voluntarily closing a substream or a network error - /// > preventing the message from being delivered. - /// - /// The protocol must have been registered with - /// `crate::config::NetworkConfiguration::notifications_protocols`. - pub fn write_notification( - &self, - target: PeerId, - protocol: Cow<'static, str>, - message: Vec, - ) { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - // Notification silently discarded, as documented. - debug!( - target: "sub-libp2p", - "Attempted to send notification on missing or closed substream: {}, {:?}", - target, protocol, - ); - return - } - }; - - if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { - notifications_sizes_metric - .with_label_values(&["out", &protocol]) - .observe(message.len() as f64); - } - - // Sending is communicated to the `NotificationsSink`. - trace!( - target: "sub-libp2p", - "External API => Notification({:?}, {:?}, {} bytes)", - target, protocol, message.len() - ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); - sink.send_sync_notification(message); - } - - /// Obtains a [`NotificationSender`] for a connected peer, if it exists. - /// - /// A `NotificationSender` is scoped to a particular connection to the peer that holds - /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two - /// steps: - /// - /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready - /// for another notification, yielding a [`NotificationSenderReady`] token. - /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation - /// can only fail if the underlying notification substream or connection has suddenly closed. - /// - /// An error is returned by [`NotificationSenderReady::send`] if there exists no open - /// notifications substream with that combination of peer and protocol, or if the remote - /// has asked to close the notifications substream. If that happens, it is guaranteed that an - /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by - /// [`NetworkService::event_stream`]. - /// - /// If the remote requests to close the notifications substream, all notifications successfully - /// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the - /// substream actually gets closed, but attempting to enqueue more notifications will now - /// return an error. It is however possible for the entire connection to be abruptly closed, - /// in which case enqueued notifications will be lost. - /// - /// The protocol must have been registered with - /// `crate::config::NetworkConfiguration::notifications_protocols`. - /// - /// # Usage - /// - /// This method returns a struct that allows waiting until there is space available in the - /// buffer of messages towards the given peer. If the peer processes notifications at a slower - /// rate than we send them, this buffer will quickly fill up. - /// - /// As such, you should never do something like this: - /// - /// ```ignore - /// // Do NOT do this - /// for peer in peers { - /// if let Ok(n) = network.notification_sender(peer, ...) { - /// if let Ok(s) = n.ready().await { - /// let _ = s.send(...); - /// } - /// } - /// } - /// ``` - /// - /// Doing so would slow down all peers to the rate of the slowest one. A malicious or - /// malfunctioning peer could intentionally process notifications at a very slow rate. - /// - /// Instead, you are encouraged to maintain your own buffer of notifications on top of the one - /// maintained by `sc-network`, and use `notification_sender` to progressively send out - /// elements from your buffer. If this additional buffer is full (which will happen at some - /// point if the peer is too slow to process notifications), appropriate measures can be taken, - /// such as removing non-critical notifications from the buffer or disconnecting the peer - /// using [`NetworkService::disconnect_peer`]. - /// - /// - /// Notifications Per-peer buffer - /// broadcast +-------> of notifications +--> `notification_sender` +--> Internet - /// ^ (not covered by - /// | sc-network) - /// + - /// Notifications should be dropped - /// if buffer is full - /// - /// - /// See also the `sc-network-gossip` crate for a higher-level way to send notifications. - pub fn notification_sender( - &self, - target: PeerId, - protocol: Cow<'static, str>, - ) -> Result { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - return Err(NotificationSenderError::Closed) - } - }; - - let notification_size_metric = self - .notifications_sizes_metric - .as_ref() - .map(|histogram| histogram.with_label_values(&["out", &protocol])); - - Ok(NotificationSender { sink, protocol_name: protocol, notification_size_metric }) - } - - /// Returns a stream containing the events that happen on the network. - /// - /// If this method is called multiple times, the events are duplicated. - /// - /// The stream never ends (unless the `NetworkWorker` gets shut down). - /// - /// The name passed is used to identify the channel in the Prometheus metrics. Note that the - /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having - /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory - pub fn event_stream(&self, name: &'static str) -> impl Stream { - let (tx, rx) = out_events::channel(name); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); - rx - } - - /// Sends a single targeted request to a specific peer. On success, returns the response of - /// the peer. - /// - /// Request-response protocols are a way to complement notifications protocols, but - /// notifications should remain the default ways of communicating information. For example, a - /// peer can announce something through a notification, after which the recipient can obtain - /// more information by performing a request. - /// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way - /// you will get an error immediately for disconnected peers, instead of waiting for a - /// potentially very long connection attempt, which would suggest that something is wrong - /// anyway, as you are supposed to be connected because of the notification protocol. - /// - /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. - /// Such restrictions, if desired, need to be enforced at the call site(s). - /// - /// The protocol must have been registered through - /// [`NetworkConfiguration::request_response_protocols`]( - /// crate::config::NetworkConfiguration::request_response_protocols). - pub async fn request( - &self, - target: PeerId, - protocol: impl Into>, - request: Vec, - connect: IfDisconnected, - ) -> Result, RequestFailure> { - let (tx, rx) = oneshot::channel(); - - self.start_request(target, protocol, request, tx, connect); - - match rx.await { - Ok(v) => v, - // The channel can only be closed if the network worker no longer exists. If the - // network worker no longer exists, then all connections to `target` are necessarily - // closed, and we legitimately report this situation as a "ConnectionClosed". - Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), - } - } - - /// Variation of `request` which starts a request whose response is delivered on a provided - /// channel. - /// - /// Instead of blocking and waiting for a reply, this function returns immediately, sending - /// responses via the passed in sender. This alternative API exists to make it easier to - /// integrate with message passing APIs. - /// - /// Keep in mind that the connected receiver might receive a `Canceled` event in case of a - /// closing connection. This is expected behaviour. With `request` you would get a - /// `RequestFailure::Network(OutboundFailure::ConnectionClosed)` in that case. - pub fn start_request( - &self, - target: PeerId, - protocol: impl Into>, - request: Vec, - tx: oneshot::Sender, RequestFailure>>, - connect: IfDisconnected, - ) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { - target, - protocol: protocol.into(), - request, - pending_response: tx, - connect, - }); - } - - /// High-level network status information. - /// - /// Returns an error if the `NetworkWorker` is no longer running. - pub async fn status(&self) -> Result, ()> { - let (tx, rx) = oneshot::channel(); - - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::NetworkStatus { pending_response: tx }); - - match rx.await { - Ok(v) => v.map_err(|_| ()), - // The channel can only be closed if the network worker no longer exists. - Err(_) => Err(()), - } - } - /// Get network state. /// /// **Note**: Use this only for debugging. This API is unstable. There are warnings literally @@ -1026,74 +745,97 @@ impl NetworkService { } } - /// You may call this when new transactions are imported by the transaction pool. - /// - /// All transactions will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - pub fn trigger_repropagate(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); - } - - /// You must call when new transaction is imported by the transaction pool. + /// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates. /// - /// This transaction will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - pub fn propagate_transaction(&self, hash: H) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); - } + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn split_multiaddr_and_peer_id( + &self, + peers: HashSet, + ) -> Result, String> { + peers + .into_iter() + .map(|mut addr| { + let peer = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) + .map_err(|_| "Invalid PeerId format".to_string())?, + _ => return Err("Missing PeerId from address".to_string()), + }; - /// Make sure an important block is propagated to peers. - /// - /// In chain-based consensus, we often need to make sure non-best forks are - /// at least temporarily synced. This function forces such an announcement. - pub fn announce_block(&self, hash: B::Hash, data: Option>) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); + // Make sure the local peer ID is never added to the PSM + // or added as a "known address", even if given. + if peer == self.local_peer_id { + Err("Local peer ID in peer set.".to_string()) + } else { + Ok((peer, addr)) + } + }) + .collect::, String>>() } +} - /// Report a given peer as either beneficial (+) or costly (-) according to the - /// given scalar. - pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - self.peerset.report_peer(who, cost_benefit); +impl sp_consensus::SyncOracle for NetworkService { + fn is_major_syncing(&self) -> bool { + self.is_major_syncing.load(Ordering::Relaxed) } - /// Disconnect from a node as soon as possible. - /// - /// This triggers the same effects as if the connection had closed itself spontaneously. - /// - /// See also [`NetworkService::remove_from_peers_set`], which has the same effect but also - /// prevents the local node from re-establishing an outgoing substream to this peer until it - /// is added again. - pub fn disconnect_peer(&self, who: PeerId, protocol: impl Into>) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol.into())); + fn is_offline(&self) -> bool { + self.num_connected.load(Ordering::Relaxed) == 0 } +} +impl sc_consensus::JustificationSyncLink for NetworkService { /// Request a justification for the given block from the network. /// /// On success, the justification will be passed to the import queue that was part at /// initialization as part of the configuration. - pub fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { let _ = self .to_worker .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); } - /// Clear all pending justification requests. - pub fn clear_justification_requests(&self) { + fn clear_justification_requests(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); } +} - /// Are we in the process of downloading the chain? - pub fn is_major_syncing(&self) -> bool { - self.is_major_syncing.load(Ordering::Relaxed) +impl NetworkStateInfo for NetworkService +where + B: sp_runtime::traits::Block, + H: ExHashT, +{ + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec { + self.external_addresses.lock().clone() + } + + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId { + self.local_peer_id } +} +impl NetworkSigner for NetworkService +where + B: sp_runtime::traits::Block, + H: ExHashT, +{ + fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result { + Signature::sign_message(msg.as_ref(), &self.local_identity) + } +} + +impl NetworkDHTProvider for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ /// Start getting a value from the DHT. /// /// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an /// item on the [`NetworkWorker`] stream. - pub fn get_value(&self, key: &KademliaKey) { + fn get_value(&self, key: &KademliaKey) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); } @@ -1101,27 +843,86 @@ impl NetworkService { /// /// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an /// item on the [`NetworkWorker`] stream. - pub fn put_value(&self, key: KademliaKey, value: Vec) { + fn put_value(&self, key: KademliaKey, value: Vec) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); } +} + +impl NetworkSyncForkRequest> for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + /// Configure an explicit fork sync request. + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// `set_sync_fork_request` should only be used if external code detects that there's + /// a stale fork missing. + /// Passing empty `peers` set effectively removes the sync request. + fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + } +} - /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. - pub fn accept_unreserved_peers(&self) { +#[async_trait::async_trait] +impl NetworkStatusProvider for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + async fn status(&self) -> Result, ()> { + let (tx, rx) = oneshot::channel(); + + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NetworkStatus { pending_response: tx }); + + match rx.await { + Ok(v) => v.map_err(|_| ()), + // The channel can only be closed if the network worker no longer exists. + Err(_) => Err(()), + } + } +} + +impl NetworkPeers for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn set_authorized_peers(&self, peers: HashSet) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); + } + + fn set_authorized_only(&self, reserved_only: bool) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); + } + + fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + } + + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + self.peerset.report_peer(who, cost_benefit); + } + + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol)); + } + + fn accept_unreserved_peers(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(false)); } - /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing - /// purposes. - pub fn deny_unreserved_peers(&self) { + fn deny_unreserved_peers(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true)); } - /// Adds a `PeerId` and its address as reserved. The string should encode the address - /// and peer ID of the remote node. - /// - /// Returns an `Err` if the given string is not a valid multiaddress - /// or contains an invalid peer ID (which includes the local peer ID). - pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + fn add_reserved_peer(&self, peer: String) -> Result<(), String> { let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?; // Make sure the local peer ID is never added to the PSM. if peer_id == self.local_peer_id { @@ -1135,28 +936,11 @@ impl NetworkService { Ok(()) } - /// Removes a `PeerId` from the list of reserved peers. - pub fn remove_reserved_peer(&self, peer_id: PeerId) { + fn remove_reserved_peer(&self, peer_id: PeerId) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id)); } - /// Sets the reserved set of a protocol to the given set of peers. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// The node will start establishing/accepting connections and substreams to/from peers in this - /// set, if it doesn't have any substream open with them yet. - /// - /// Note however, if a call to this function results in less peers on the reserved set, they - /// will not necessarily get disconnected (depending on available free slots in the peer set). - /// If you want to also disconnect those removed peers, you will have to call - /// `remove_from_peers_set` on those in addition to updating the reserved set. You can omit - /// this step if the peer set is in reserved only mode. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - pub fn set_reserved_peers( + fn set_reserved_peers( &self, protocol: Cow<'static, str>, peers: HashSet, @@ -1187,14 +971,7 @@ impl NetworkService { Ok(()) } - /// Add peers to a peer set. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - pub fn add_peers_to_reserved_set( + fn add_peers_to_reserved_set( &self, protocol: Cow<'static, str>, peers: HashSet, @@ -1220,8 +997,7 @@ impl NetworkService { Ok(()) } - /// Remove peers from a peer set. - pub fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { + fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { for peer_id in peers.into_iter() { let _ = self .to_worker @@ -1229,26 +1005,7 @@ impl NetworkService { } } - /// Configure an explicit fork sync request. - /// Note that this function should not be used for recent blocks. - /// Sync should be able to download all the recent forks normally. - /// `set_sync_fork_request` should only be used if external code detects that there's - /// a stale fork missing. - /// Passing empty `peers` set effectively removes the sync request. - pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); - } - - /// Add a peer to a set of peers. - /// - /// If the set has slots available, it will try to open a substream with this peer. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - pub fn add_to_peers_set( + fn add_to_peers_set( &self, protocol: Cow<'static, str>, peers: HashSet, @@ -1274,10 +1031,7 @@ impl NetworkService { Ok(()) } - /// Remove peers from a peer set. - /// - /// If we currently have an open substream with this peer, it will soon be closed. - pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { + fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { for peer_id in peers.into_iter() { let _ = self .to_worker @@ -1285,90 +1039,158 @@ impl NetworkService { } } - /// Returns the number of peers we're connected to. - pub fn num_connected(&self) -> usize { + fn sync_num_connected(&self) -> usize { self.num_connected.load(Ordering::Relaxed) } +} - /// Inform the network service about new best imported block. - pub fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); +impl NetworkEventStream for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn event_stream(&self, name: &'static str) -> Pin + Send>> { + let (tx, rx) = out_events::channel(name); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); + Box::pin(rx) } +} - /// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - fn split_multiaddr_and_peer_id( - &self, - peers: HashSet, - ) -> Result, String> { - peers - .into_iter() - .map(|mut addr| { - let peer = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) - .map_err(|_| "Invalid PeerId format".to_string())?, - _ => return Err("Missing PeerId from address".to_string()), - }; +impl NetworkNotification for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { + sink.clone() + } else { + // Notification silently discarded, as documented. + debug!( + target: "sub-libp2p", + "Attempted to send notification on missing or closed substream: {}, {:?}", + target, protocol, + ); + return + } + }; - // Make sure the local peer ID is never added to the PSM - // or added as a "known address", even if given. - if peer == self.local_peer_id { - Err("Local peer ID in peer set.".to_string()) - } else { - Ok((peer, addr)) - } - }) - .collect::, String>>() - } -} + if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { + notifications_sizes_metric + .with_label_values(&["out", &protocol]) + .observe(message.len() as f64); + } -impl sp_consensus::SyncOracle for NetworkService { - fn is_major_syncing(&mut self) -> bool { - Self::is_major_syncing(self) + // Sending is communicated to the `NotificationsSink`. + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?}, {} bytes)", + target, protocol, message.len() + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); + sink.send_sync_notification(message); } - fn is_offline(&mut self) -> bool { - self.num_connected.load(Ordering::Relaxed) == 0 + fn notification_sender( + &self, + target: PeerId, + protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { + sink.clone() + } else { + return Err(NotificationSenderError::Closed) + } + }; + + let notification_size_metric = self + .notifications_sizes_metric + .as_ref() + .map(|histogram| histogram.with_label_values(&["out", &protocol])); + + Ok(Box::new(NotificationSender { sink, protocol_name: protocol, notification_size_metric })) } } -impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle for &'a NetworkService { - fn is_major_syncing(&mut self) -> bool { - NetworkService::is_major_syncing(self) +#[async_trait::async_trait] +impl NetworkRequest for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + async fn request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + connect: IfDisconnected, + ) -> Result, RequestFailure> { + let (tx, rx) = oneshot::channel(); + + self.start_request(target, protocol, request, tx, connect); + + match rx.await { + Ok(v) => v, + // The channel can only be closed if the network worker no longer exists. If the + // network worker no longer exists, then all connections to `target` are necessarily + // closed, and we legitimately report this situation as a "ConnectionClosed". + Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), + } } - fn is_offline(&mut self) -> bool { - self.num_connected.load(Ordering::Relaxed) == 0 + fn start_request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { + target, + protocol: protocol.into(), + request, + pending_response: tx, + connect, + }); } } -impl sc_consensus::JustificationSyncLink for NetworkService { - fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - Self::request_justification(self, hash, number); +impl NetworkTransaction for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn trigger_repropagate(&self) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); } - fn clear_justification_requests(&self) { - Self::clear_justification_requests(self); + fn propagate_transaction(&self, hash: H) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); } } -impl NetworkStateInfo for NetworkService +impl NetworkBlock> for NetworkService where - B: sp_runtime::traits::Block, + B: BlockT + 'static, H: ExHashT, { - /// Returns the local external addresses. - fn external_addresses(&self) -> Vec { - self.external_addresses.lock().clone() + fn announce_block(&self, hash: B::Hash, data: Option>) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); } - /// Returns the local Peer ID. - fn local_peer_id(&self) -> PeerId { - self.local_peer_id + fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); } } @@ -1385,26 +1207,27 @@ pub struct NotificationSender { notification_size_metric: Option, } -impl NotificationSender { - /// Returns a future that resolves when the `NotificationSender` is ready to send a - /// notification. - pub async fn ready(&self) -> Result, NotificationSenderError> { - Ok(NotificationSenderReady { +#[async_trait::async_trait] +impl NotificationSenderT for NotificationSender { + async fn ready( + &self, + ) -> Result, NotificationSenderError> { + Ok(Box::new(NotificationSenderReady { ready: match self.sink.reserve_notification().await { - Ok(r) => r, + Ok(r) => Some(r), Err(()) => return Err(NotificationSenderError::Closed), }, peer_id: self.sink.peer_id(), protocol_name: &self.protocol_name, notification_size_metric: self.notification_size_metric.clone(), - }) + })) } } /// Reserved slot in the notifications buffer, ready to accept data. #[must_use] pub struct NotificationSenderReady<'a> { - ready: Ready<'a>, + ready: Option>, /// Target of the notification. peer_id: &'a PeerId, @@ -1417,11 +1240,8 @@ pub struct NotificationSenderReady<'a> { notification_size_metric: Option, } -impl<'a> NotificationSenderReady<'a> { - /// Consumes this slots reservation and actually queues the notification. - pub fn send(self, notification: impl Into>) -> Result<(), NotificationSenderError> { - let notification = notification.into(); - +impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { + fn send(&mut self, notification: Vec) -> Result<(), NotificationSenderError> { if let Some(notification_size_metric) = &self.notification_size_metric { notification_size_metric.observe(notification.len() as f64); } @@ -1433,26 +1253,14 @@ impl<'a> NotificationSenderReady<'a> { ); trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id); - self.ready.send(notification).map_err(|()| NotificationSenderError::Closed) + self.ready + .take() + .ok_or(NotificationSenderError::Closed)? + .send(notification) + .map_err(|()| NotificationSenderError::Closed) } } -/// Error returned by [`NetworkService::send_notification`]. -#[derive(Debug, thiserror::Error)] -pub enum NotificationSenderError { - /// The notification receiver has been closed, usually because the underlying connection - /// closed. - /// - /// Some of the notifications most recently sent may not have been received. However, - /// the peer may still be connected and a new `NotificationSender` for the same - /// protocol obtained from [`NetworkService::notification_sender`]. - #[error("The notification receiver has been closed")] - Closed, - /// Protocol name hasn't been registered. - #[error("Protocol name hasn't been registered")] - BadProtocol, -} - /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index c95b46af4cefa..4144d7f19551e 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -31,11 +31,10 @@ //! - Send events by calling [`OutChannels::send`]. Events are cloned for each sender in the //! collection. -use crate::Event; - use futures::{channel::mpsc, prelude::*, ready, stream::FusedStream}; use parking_lot::Mutex; use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; +use sc_network_common::protocol::event::Event; use std::{ cell::RefCell, fmt, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index f757bf4891fbc..6ccca17650b67 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -16,11 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, Event, NetworkService, NetworkWorker}; +use crate::{config, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; -use sc_network_common::config::ProtocolId; +use sc_network_common::{ + config::ProtocolId, + protocol::event::Event, + service::{NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo}, +}; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, @@ -192,7 +196,7 @@ fn build_nodes_one_proto() -> ( set_config: config::SetConfig { reserved_nodes: vec![config::MultiaddrWithPeerId { multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), + peer_id: node1.local_peer_id(), }], ..Default::default() }, @@ -214,18 +218,10 @@ fn notifications_state_consistent() { // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { - node1.write_notification( - node2.local_peer_id().clone(), - PROTOCOL_NAME, - b"hello world".to_vec(), - ); + node1.write_notification(node2.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec()); } for _ in 0..(rand::random::() % 5) { - node2.write_notification( - node1.local_peer_id().clone(), - PROTOCOL_NAME, - b"hello world".to_vec(), - ); + node2.write_notification(node1.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec()); } async_std::task::block_on(async move { @@ -249,14 +245,14 @@ fn notifications_state_consistent() { // test consists in ensuring that notifications get ignored if the stream isn't open. if rand::random::() % 5 >= 3 { node1.write_notification( - node2.local_peer_id().clone(), + node2.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); } if rand::random::() % 5 >= 3 { node2.write_notification( - node1.local_peer_id().clone(), + node1.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); @@ -264,10 +260,10 @@ fn notifications_state_consistent() { // Also randomly disconnect the two nodes from time to time. if rand::random::() % 20 == 0 { - node1.disconnect_peer(node2.local_peer_id().clone(), PROTOCOL_NAME); + node1.disconnect_peer(node2.local_peer_id(), PROTOCOL_NAME); } if rand::random::() % 20 == 0 { - node2.disconnect_peer(node1.local_peer_id().clone(), PROTOCOL_NAME); + node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME); } // Grab next event from either `events_stream1` or `events_stream2`. @@ -295,7 +291,7 @@ fn notifications_state_consistent() { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; - assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(remote, node2.local_peer_id()); }, future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. @@ -304,7 +300,7 @@ fn notifications_state_consistent() { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; - assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(remote, node1.local_peer_id()); }, future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. @@ -312,7 +308,7 @@ fn notifications_state_consistent() { if protocol == PROTOCOL_NAME { assert!(node1_to_node2_open); node1_to_node2_open = false; - assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(remote, node2.local_peer_id()); }, future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. @@ -320,14 +316,14 @@ fn notifications_state_consistent() { if protocol == PROTOCOL_NAME { assert!(node2_to_node1_open); node2_to_node1_open = false; - assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(remote, node1.local_peer_id()); }, future::Either::Left(Event::NotificationsReceived { remote, .. }) => { assert!(node1_to_node2_open); - assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(remote, node2.local_peer_id()); if rand::random::() % 5 >= 4 { node1.write_notification( - node2.local_peer_id().clone(), + node2.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); @@ -335,10 +331,10 @@ fn notifications_state_consistent() { }, future::Either::Right(Event::NotificationsReceived { remote, .. }) => { assert!(node2_to_node1_open); - assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(remote, node1.local_peer_id()); if rand::random::() % 5 >= 4 { node2.write_notification( - node1.local_peer_id().clone(), + node1.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); @@ -373,7 +369,7 @@ fn lots_of_incoming_peers_works() { ..config::NetworkConfiguration::new_local() }); - let main_node_peer_id = *main_node.local_peer_id(); + let main_node_peer_id = main_node.local_peer_id(); // We spawn background tasks and push them in this `Vec`. They will all be waited upon before // this test ends. @@ -476,8 +472,13 @@ fn notifications_back_pressure() { // Sending! for num in 0..TOTAL_NOTIFS { - let notif = node1.notification_sender(node2_id.clone(), PROTOCOL_NAME).unwrap(); - notif.ready().await.unwrap().send(format!("hello #{}", num)).unwrap(); + let notif = node1.notification_sender(node2_id, PROTOCOL_NAME).unwrap(); + notif + .ready() + .await + .unwrap() + .send(format!("hello #{}", num).into_bytes()) + .unwrap(); } receiver.await; @@ -514,7 +515,7 @@ fn fallback_name_working() { set_config: config::SetConfig { reserved_nodes: vec![config::MultiaddrWithPeerId { multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), + peer_id: node1.local_peer_id(), }], ..Default::default() }, diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 342b6a0430272..f7e4d774ca812 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -32,7 +32,7 @@ use crate::{ protocol::message, service::NetworkService, utils::{interval, LruHashSet}, - Event, ExHashT, ObservedRole, + ExHashT, }; use codec::{Decode, Encode}; @@ -40,7 +40,11 @@ use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network_common::config::ProtocolId; +use sc_network_common::{ + config::ProtocolId, + protocol::event::{Event, ObservedRole}, + service::{NetworkEventStream, NetworkNotification, NetworkPeers}, +}; use sp_runtime::traits::Block as BlockT; use std::{ borrow::Cow, @@ -176,7 +180,7 @@ impl TransactionsHandlerPrototype { transaction_pool: Arc>, metrics_registry: Option<&Registry>, ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { - let event_stream = service.event_stream("transactions-handler").boxed(); + let event_stream = service.event_stream("transactions-handler"); let (to_handler, from_controller) = mpsc::unbounded(); let gossip_enabled = Arc::new(AtomicBool::new(false)); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 09b4139c213a6..3ba663aba2267 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -45,7 +45,8 @@ use sc_client_api::{ }; use sc_consensus::{ BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxJustificationImport, - ForkChoiceStrategy, ImportResult, JustificationImport, LongestChain, Verifier, + ForkChoiceStrategy, ImportResult, JustificationImport, JustificationSyncLink, LongestChain, + Verifier, }; pub use sc_network::config::EmptyTransactionPool; use sc_network::{ @@ -56,8 +57,9 @@ use sc_network::{ Multiaddr, NetworkService, NetworkWorker, }; pub use sc_network_common::config::ProtocolId; -use sc_network_common::sync::warp::{ - AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider, +use sc_network_common::{ + service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, + sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ @@ -71,7 +73,7 @@ use sp_blockchain::{ }; use sp_consensus::{ block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator}, - BlockOrigin, Error as ConsensusError, + BlockOrigin, Error as ConsensusError, SyncOracle, }; use sp_core::H256; use sp_runtime::{ @@ -243,7 +245,7 @@ where { /// Get this peer ID. pub fn id(&self) -> PeerId { - *self.network.service().local_peer_id() + self.network.service().local_peer_id() } /// Returns true if we're major syncing. @@ -797,7 +799,7 @@ where let addrs = connect_to .iter() .map(|v| { - let peer_id = *self.peer(*v).network_service().local_peer_id(); + let peer_id = self.peer(*v).network_service().local_peer_id(); let multiaddr = self.peer(*v).listen_addr.clone(); MultiaddrWithPeerId { peer_id, multiaddr } }) @@ -893,7 +895,7 @@ where self.mut_peers(move |peers| { for peer in peers.iter_mut() { peer.network - .add_known_address(*network.service().local_peer_id(), listen_addr.clone()); + .add_known_address(network.service().local_peer_id(), listen_addr.clone()); } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 8da2d4be3adde..ab26c0c38596c 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -29,6 +29,7 @@ threadpool = "1.7" tracing = "0.1.29" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index c80b511c84d17..f379ebad17bbf 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -325,19 +325,88 @@ impl AsyncApi { mod tests { use super::*; use sc_client_db::offchain::LocalStorage; - use sc_network::{NetworkStateInfo, PeerId}; + use sc_network::{PeerId, ReputationChange}; + use sc_network_common::service::{NetworkPeers, NetworkStateInfo}; use sp_core::offchain::{DbExternalities, Externalities}; - use std::time::SystemTime; + use std::{borrow::Cow, time::SystemTime}; pub(super) struct TestNetwork(); - impl NetworkProvider for TestNetwork { + impl NetworkPeers for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!() + unimplemented!(); } fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!() + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) { + unimplemented!(); + } + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { + unimplemented!(); + } + + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } + + fn sync_num_connected(&self) -> usize { + unimplemented!(); } } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index d54d491b04c43..14a2408e61a70 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -35,14 +35,14 @@ #![warn(missing_docs)] -use std::{collections::HashSet, fmt, marker::PhantomData, sync::Arc}; +use std::{fmt, marker::PhantomData, sync::Arc}; use futures::{ future::{ready, Future}, prelude::*, }; use parking_lot::Mutex; -use sc_network::{ExHashT, NetworkService, NetworkStateInfo, PeerId}; +use sc_network_common::service::{NetworkPeers, NetworkStateInfo}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::{offchain, traits::SpawnNamed, ExecutionContext}; use sp_runtime::{ @@ -60,27 +60,9 @@ const LOG_TARGET: &str = "offchain-worker"; /// NetworkProvider provides [`OffchainWorkers`] with all necessary hooks into the /// underlying Substrate networking. -pub trait NetworkProvider: NetworkStateInfo { - /// Set the authorized peers. - fn set_authorized_peers(&self, peers: HashSet); +pub trait NetworkProvider: NetworkStateInfo + NetworkPeers {} - /// Set the authorized only flag. - fn set_authorized_only(&self, reserved_only: bool); -} - -impl NetworkProvider for NetworkService -where - B: traits::Block + 'static, - H: ExHashT, -{ - fn set_authorized_peers(&self, peers: HashSet) { - NetworkService::set_authorized_peers(self, peers) - } - - fn set_authorized_only(&self, reserved_only: bool) { - NetworkService::set_authorized_only(self, reserved_only) - } -} +impl NetworkProvider for T where T: NetworkStateInfo + NetworkPeers {} /// Options for [`OffchainWorkers`] pub struct OffchainWorkerOptions { @@ -266,11 +248,11 @@ mod tests { use futures::executor::block_on; use sc_block_builder::BlockBuilderProvider as _; use sc_client_api::Backend as _; - use sc_network::{Multiaddr, PeerId}; + use sc_network::{Multiaddr, PeerId, ReputationChange}; use sc_transaction_pool::{BasicPool, FullChainApi}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; - use std::sync::Arc; + use std::{borrow::Cow, collections::HashSet, sync::Arc}; use substrate_test_runtime_client::{ runtime::Block, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClient, TestClientBuilderExt, @@ -288,13 +270,81 @@ mod tests { } } - impl NetworkProvider for TestNetwork { + impl NetworkPeers for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!() + unimplemented!(); } fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!() + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) { + unimplemented!(); + } + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { + unimplemented!(); + } + + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } + + fn sync_num_connected(&self) -> usize { + unimplemented!(); } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 55c0006de33fb..f81143583eacf 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -38,7 +38,10 @@ use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; use sc_network::{config::SyncMode, NetworkService}; -use sc_network_common::sync::warp::WarpSyncProvider; +use sc_network_common::{ + service::{NetworkStateInfo, NetworkStatusProvider, NetworkTransaction}, + sync::warp::WarpSyncProvider, +}; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, @@ -319,6 +322,31 @@ where ) } +/// Shared network instance implementing a set of mandatory traits. +pub trait SpawnTaskNetwork: + sc_offchain::NetworkProvider + + NetworkStateInfo + + NetworkTransaction + + NetworkStatusProvider + + Send + + Sync + + 'static +{ +} + +impl SpawnTaskNetwork for T +where + Block: BlockT, + T: sc_offchain::NetworkProvider + + NetworkStateInfo + + NetworkTransaction + + NetworkStatusProvider + + Send + + Sync + + 'static, +{ +} + /// Parameters to pass into `build`. pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { /// The service configuration. @@ -337,7 +365,7 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub rpc_builder: Box Result, Error>>, /// A shared network instance. - pub network: Arc::Hash>>, + pub network: Arc>, /// A Sender for RPC requests. pub system_rpc_tx: TracingUnboundedSender>, /// Telemetry instance for this node. @@ -349,7 +377,7 @@ pub fn build_offchain_workers( config: &Configuration, spawn_handle: SpawnTaskHandle, client: Arc, - network: Arc::Hash>>, + network: Arc, ) -> Option>> where TBl: BlockT, @@ -516,13 +544,14 @@ where Ok(rpc_handlers) } -async fn transaction_notifications( - transaction_pool: Arc, - network: Arc::Hash>>, +async fn transaction_notifications( + transaction_pool: Arc, + network: Network, telemetry: Option, ) where - TBl: BlockT, - TExPool: MaintainedTransactionPool::Hash>, + Block: BlockT, + ExPool: MaintainedTransactionPool::Hash>, + Network: NetworkTransaction<::Hash> + Send + Sync, { // transaction notifications transaction_pool @@ -542,13 +571,18 @@ async fn transaction_notifications( .await; } -fn init_telemetry>( +fn init_telemetry( config: &mut Configuration, - network: Arc::Hash>>, - client: Arc, + network: Network, + client: Arc, telemetry: &mut Telemetry, sysinfo: Option, -) -> sc_telemetry::Result { +) -> sc_telemetry::Result +where + Block: BlockT, + Client: BlockBackend, + Network: NetworkStateInfo, +{ let genesis_hash = client.block_hash(Zero::zero()).ok().flatten().unwrap_or_default(); let connection_message = ConnectionMessage { name: config.network.node_name.to_owned(), diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 5291d219e8102..2f35a04451e79 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -42,9 +42,11 @@ use jsonrpsee::{core::Error as JsonRpseeError, RpcModule}; use log::{debug, error, warn}; use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider}; use sc_network::PeerId; +use sc_network_common::service::NetworkBlock; use sc_rpc_server::WsConfig; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; +use sp_consensus::SyncOracle; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index ef3132f61ab99..13b249a7b9563 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -22,7 +22,8 @@ use crate::config::Configuration; use futures_timer::Delay; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{ClientInfo, UsageProvider}; -use sc_network::{config::Role, NetworkService, NetworkStatus}; +use sc_network::config::Role; +use sc_network_common::service::{NetworkStatus, NetworkStatusProvider}; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::{MaintainedTransactionPool, PoolStatus}; use sc_utils::metrics::register_globals; @@ -182,15 +183,16 @@ impl MetricsService { /// Returns a never-ending `Future` that performs the /// metric and telemetry updates with information from /// the given sources. - pub async fn run( + pub async fn run( mut self, client: Arc, transactions: Arc, - network: Arc::Hash>>, + network: TNet, ) where TBl: Block, TCl: ProvideRuntimeApi + UsageProvider, TExPool: MaintainedTransactionPool::Hash>, + TNet: NetworkStatusProvider, { let mut timer = Delay::new(Duration::from_secs(0)); let timer_interval = Duration::from_secs(5); diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index d003db57eb7ac..01c3ee2348ef5 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -27,6 +27,7 @@ sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../.. sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-executor = { version = "0.10.0-dev", path = "../../executor" } sc-network = { version = "0.10.0-dev", path = "../../network" } +sc-network-common = { version = "0.10.0-dev", path = "../../network/common" } sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../../service" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 2d63362daffba..9c720c6fedea0 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -26,6 +26,7 @@ use sc_network::{ config::{NetworkConfiguration, TransportConfig}, multiaddr, Multiaddr, }; +use sc_network_common::service::{NetworkBlock, NetworkPeers, NetworkStateInfo}; use sc_service::{ client::Client, config::{BasePath, DatabaseSource, KeystoreConfig}, @@ -320,7 +321,7 @@ where handle.spawn(service.clone().map_err(|_| ())); let addr = - addr.with(multiaddr::Protocol::P2p((*service.network().local_peer_id()).into())); + addr.with(multiaddr::Protocol::P2p((service.network().local_peer_id()).into())); self.authority_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -340,7 +341,7 @@ where handle.spawn(service.clone().map_err(|_| ())); let addr = - addr.with(multiaddr::Protocol::P2p((*service.network().local_peer_id()).into())); + addr.with(multiaddr::Protocol::P2p((service.network().local_peer_id()).into())); self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -387,7 +388,7 @@ where } network.run_until_all_full(move |_index, service| { - let connected = service.network().num_connected(); + let connected = service.network().sync_num_connected(); debug!("Got {}/{} full connections...", connected, expected_full_connections); connected == expected_full_connections }); @@ -422,7 +423,7 @@ where } network.run_until_all_full(move |_index, service| { - let connected = service.network().num_connected(); + let connected = service.network().sync_num_connected(); debug!("Got {}/{} full connections...", connected, expected_full_connections); connected == expected_full_connections }); diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 4539cec2c8e0a..043533cbf2258 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -236,10 +236,10 @@ pub trait Proposer { pub trait SyncOracle { /// Whether the synchronization service is undergoing major sync. /// Returns true if so. - fn is_major_syncing(&mut self) -> bool; + fn is_major_syncing(&self) -> bool; /// Whether the synchronization service is offline. /// Returns true if so. - fn is_offline(&mut self) -> bool; + fn is_offline(&self) -> bool; } /// A synchronization oracle for when there is no network. @@ -247,10 +247,10 @@ pub trait SyncOracle { pub struct NoNetwork; impl SyncOracle for NoNetwork { - fn is_major_syncing(&mut self) -> bool { + fn is_major_syncing(&self) -> bool { false } - fn is_offline(&mut self) -> bool { + fn is_offline(&self) -> bool { false } } @@ -258,14 +258,14 @@ impl SyncOracle for NoNetwork { impl SyncOracle for Arc where T: ?Sized, - for<'r> &'r T: SyncOracle, + T: SyncOracle, { - fn is_major_syncing(&mut self) -> bool { - <&T>::is_major_syncing(&mut &**self) + fn is_major_syncing(&self) -> bool { + T::is_major_syncing(self) } - fn is_offline(&mut self) -> bool { - <&T>::is_offline(&mut &**self) + fn is_offline(&self) -> bool { + T::is_offline(self) } } From ef433a8c31947c73e9bad212a9672d1f841a9c0d Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 10 Aug 2022 10:39:24 +0200 Subject: [PATCH 115/135] Add BoundedVec::sort_by_key (#11998) Signed-off-by: Oliver Tale-Yazdi --- primitives/runtime/src/bounded/bounded_vec.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/primitives/runtime/src/bounded/bounded_vec.rs b/primitives/runtime/src/bounded/bounded_vec.rs index d5f3f2da0d615..aed1a156ad699 100644 --- a/primitives/runtime/src/bounded/bounded_vec.rs +++ b/primitives/runtime/src/bounded/bounded_vec.rs @@ -319,6 +319,17 @@ impl BoundedVec { self.0.sort_by(compare) } + /// Exactly the same semantics as [`slice::sort_by_key`]. + /// + /// This is safe since sorting cannot change the number of elements in the vector. + pub fn sort_by_key(&mut self, f: F) + where + F: FnMut(&T) -> K, + K: sp_std::cmp::Ord, + { + self.0.sort_by_key(f) + } + /// Exactly the same semantics as [`slice::sort`]. /// /// This is safe since sorting cannot change the number of elements in the vector. @@ -1189,4 +1200,12 @@ pub mod test { b1.iter().map(|x| x + 1).rev().take(2).try_collect(); assert!(b2.is_err()); } + + #[test] + fn bounded_vec_sort_by_key_works() { + let mut v: BoundedVec> = bounded_vec![-5, 4, 1, -3, 2]; + // Sort by absolute value. + v.sort_by_key(|k| k.abs()); + assert_eq!(v, vec![1, 2, -3, 4, -5]); + } } From edeba3f760fe54600ea38c8765a0d099e58d4799 Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Wed, 10 Aug 2022 12:36:16 +0200 Subject: [PATCH 116/135] Transaction payment runtime api: query call info and fee details (#11819) * Transaction payment RPC calls: query call info * transaction payment pallet - runtime api - add query_call info and fee_details * remove unused deps * separate call runtime api * undo fmt for unchanged code * system config call bounded to GetDispatchInfo, drop Call generic for query call info/fee * impl GetDispatchInfo for Extrinsics within runtime test-utils * introduced runtime api methods accept encoded Call instead of Call type * replace Bytes by Vec, docs for for new api, drop len argument, drop GetDispatchInfo bound from system_Config::Call * clean up toml and extra impl for dropped bound * panic if Call can not be decoded * revert to 6d0ca79 * fmt and docs * rustfmt --- bin/node-template/runtime/src/lib.rs | 17 +++++ bin/node/runtime/src/lib.rs | 11 ++++ .../rpc/runtime-api/src/lib.rs | 12 ++++ frame/transaction-payment/src/lib.rs | 63 +++++++++++++++++++ 4 files changed, 103 insertions(+) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 88fc86db02ef9..b43fbde52dcdc 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -467,6 +467,23 @@ impl_runtime_apis! { } } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: Call, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: Call, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2ac1e6444f119..6bfbe3413058a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1965,6 +1965,17 @@ impl_runtime_apis! { } } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info(call: Call, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details(call: Call, len: u32) -> FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + } + impl pallet_mmr::primitives::MmrApi< Block, mmr::Hash, diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index 5a0c70138db24..6944593daa57a 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -31,4 +31,16 @@ sp_api::decl_runtime_apis! { fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; } + + pub trait TransactionPaymentCallApi + where + Balance: Codec + MaybeDisplay, + Call: Codec, + { + /// Query information of a dispatch class, weight, and fee of a given encoded `Call`. + fn query_call_info(call: Call, len: u32) -> RuntimeDispatchInfo; + + /// Query fee details of a given encoded `Call`. + fn query_call_fee_details(call: Call, len: u32) -> FeeDetails; + } } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index fe37acb214452..ff65c0d2735fd 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -445,6 +445,32 @@ where } } + /// Query information of a dispatch class, weight, and fee of a given encoded `Call`. + pub fn query_call_info(call: T::Call, len: u32) -> RuntimeDispatchInfo> + where + T::Call: Dispatchable + GetDispatchInfo, + { + let dispatch_info = ::get_dispatch_info(&call); + let DispatchInfo { weight, class, .. } = dispatch_info; + + RuntimeDispatchInfo { + weight, + class, + partial_fee: Self::compute_fee(len, &dispatch_info, 0u32.into()), + } + } + + /// Query fee details of a given encoded `Call`. + pub fn query_call_fee_details(call: T::Call, len: u32) -> FeeDetails> + where + T::Call: Dispatchable + GetDispatchInfo, + { + let dispatch_info = ::get_dispatch_info(&call); + let tip = 0u32.into(); + + Self::compute_fee_details(len, &dispatch_info, tip) + } + /// Compute the final fee value for a particular transaction. pub fn compute_fee(len: u32, info: &DispatchInfoOf, tip: BalanceOf) -> BalanceOf where @@ -1206,6 +1232,43 @@ mod tests { }); } + #[test] + fn query_call_info_and_fee_details_works() { + let call = Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + let info = call.get_dispatch_info(); + let encoded_call = call.encode(); + let len = encoded_call.len() as u32; + + ExtBuilder::default().base_weight(5).weight_fee(2).build().execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_call_info(call.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_call_fee_details(call, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, /* base * weight_fee */ + len_fee: len as u64, /* len * 1 */ + adjusted_weight_fee: info.weight.min(BlockWeights::get().max_block) as u64 * + 2 * 3 / 2 /* weight * weight_fee * multipler */ + }), + tip: 0, + }, + ); + }); + } + #[test] fn compute_fee_works_without_multiplier() { ExtBuilder::default() From 1fd71c7845d6c28c532795ec79106d959dd1fe30 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 10 Aug 2022 14:58:52 +0100 Subject: [PATCH 117/135] Use `#[pallet::unbounded]` tag in FRAME System (#11946) * use unbounded in system * update ui tests --- frame/sudo/src/mock.rs | 12 ++++++------ frame/support/src/weights.rs | 4 ++-- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- frame/system/src/lib.rs | 15 ++++++++++----- 5 files changed, 20 insertions(+), 15 deletions(-) diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 2e2a4abafcd98..71f0f26b1a1d5 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -44,7 +44,6 @@ pub mod logger { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] pub struct Pallet(PhantomData); #[pallet::call] @@ -57,7 +56,7 @@ pub mod logger { ) -> DispatchResultWithPostInfo { // Ensure that the `origin` is `Root`. ensure_root(origin)?; - >::append(i); + >::try_append(i).map_err(|_| "could not append")?; Self::deposit_event(Event::AppendI32 { value: i, weight }); Ok(().into()) } @@ -70,8 +69,8 @@ pub mod logger { ) -> DispatchResultWithPostInfo { // Ensure that the `origin` is some signed account. let sender = ensure_signed(origin)?; - >::append(i); - >::append(sender.clone()); + >::try_append(i).map_err(|_| "could not append")?; + >::try_append(sender.clone()).map_err(|_| "could not append")?; Self::deposit_event(Event::AppendI32AndAccount { sender, value: i, weight }); Ok(().into()) } @@ -86,11 +85,12 @@ pub mod logger { #[pallet::storage] #[pallet::getter(fn account_log)] - pub(super) type AccountLog = StorageValue<_, Vec, ValueQuery>; + pub(super) type AccountLog = + StorageValue<_, BoundedVec>, ValueQuery>; #[pallet::storage] #[pallet::getter(fn i32_log)] - pub(super) type I32Log = StorageValue<_, Vec, ValueQuery>; + pub(super) type I32Log = StorageValue<_, BoundedVec>, ValueQuery>; } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index c37a72536bddf..fb34484416063 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -128,7 +128,7 @@ use crate::{ dispatch::{DispatchError, DispatchErrorWithPostInfo, DispatchResultWithPostInfo}, traits::Get, }; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -676,7 +676,7 @@ where } /// A struct holding value for each `DispatchClass`. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct PerDispatchClass { /// Value for `Normal` extrinsics. normal: T, diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index c2ec8cf7f4d05..6c49e1220a3a5 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 72 others + and 75 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index dbbc426de2906..9a0731683a953 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 72 others + and 75 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 94605c2da59bd..12d415e4e2b42 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -233,7 +233,8 @@ pub mod pallet { + Default + MaybeDisplay + AtLeast32Bit - + Copy; + + Copy + + MaxEncodedLen; /// The block number type used by the runtime. type BlockNumber: Parameter @@ -320,7 +321,7 @@ pub mod pallet { /// Data to be associated with an account (other than nonce/transaction counter, which this /// pallet does regardless). - type AccountData: Member + FullCodec + Clone + Default + TypeInfo; + type AccountData: Member + FullCodec + Clone + Default + TypeInfo + MaxEncodedLen; /// Handler for when a new account has just been created. type OnNewAccount: OnNewAccount; @@ -355,7 +356,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub (super) trait Store)] - #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::hooks] @@ -578,6 +578,7 @@ pub mod pallet { /// Extrinsics data for the current block (maps an extrinsic's index to its data). #[pallet::storage] #[pallet::getter(fn extrinsic_data)] + #[pallet::unbounded] pub(super) type ExtrinsicData = StorageMap<_, Twox64Concat, u32, Vec, ValueQuery>; @@ -593,6 +594,7 @@ pub mod pallet { /// Digest of the current block, also part of the block header. #[pallet::storage] + #[pallet::unbounded] #[pallet::getter(fn digest)] pub(super) type Digest = StorageValue<_, generic::Digest, ValueQuery>; @@ -604,6 +606,7 @@ pub mod pallet { /// Events have a large in-memory size. Box the events to not go out-of-memory /// just in case someone still reads them from within the runtime. #[pallet::storage] + #[pallet::unbounded] pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; @@ -623,12 +626,14 @@ pub mod pallet { /// the `EventIndex` then in case if the topic has the same contents on the next block /// no notification will be triggered thus the event might be lost. #[pallet::storage] + #[pallet::unbounded] #[pallet::getter(fn event_topics)] pub(super) type EventTopics = StorageMap<_, Blake2_128Concat, T::Hash, Vec<(T::BlockNumber, EventIndex)>, ValueQuery>; /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. #[pallet::storage] + #[pallet::unbounded] pub type LastRuntimeUpgrade = StorageValue<_, LastRuntimeUpgradeInfo>; /// True if we have upgraded so that `type RefCount` is `u32`. False (default) if not. @@ -690,7 +695,7 @@ pub type Key = Vec; pub type KeyValue = (Vec, Vec); /// A phase of a block's execution. -#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] pub enum Phase { /// Applying an extrinsic. @@ -738,7 +743,7 @@ type EventIndex = u32; pub type RefCount = u32; /// Information of an account. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct AccountInfo { /// The number of transactions this account has sent. pub nonce: Index, From a1a9b475c354d873f91135ed5fa028ac9ef6a2c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 10 Aug 2022 22:27:01 +0200 Subject: [PATCH 118/135] transactional: Wrap `pallet::calls` directly in storage layers (#11927) * transactional: Wrap `pallet::calls` directly in storage layers Before this pr we only wrapped `pallet::calls` into storage layers when executing the calls with `dispatch`. This pr is solving that by wrapping each call function inside a storage layer. * Teach `BasicExternalities` transactions support * Fix crates * FMT * Fix benchmarking tests * Use correct span * Support old decl macros * Fix test * Apply suggestions from code review Co-authored-by: Oliver Tale-Yazdi * Update frame/state-trie-migration/src/lib.rs * Update frame/state-trie-migration/src/lib.rs * Update frame/state-trie-migration/src/lib.rs * Feedback * Apply suggestions from code review Co-authored-by: cheme Co-authored-by: Oliver Tale-Yazdi Co-authored-by: cheme --- frame/state-trie-migration/src/lib.rs | 128 ++++++------ .../procedural/src/pallet/expand/call.rs | 26 ++- .../procedural/src/pallet/parse/call.rs | 20 +- frame/support/src/dispatch.rs | 14 +- frame/support/test/tests/construct_runtime.rs | 178 ++++++++-------- frame/support/test/tests/storage_layers.rs | 2 + frame/transaction-storage/src/lib.rs | 4 +- primitives/state-machine/src/basic.rs | 192 +++++++----------- .../src/overlayed_changes/changeset.rs | 31 ++- .../src/overlayed_changes/mod.rs | 15 ++ 10 files changed, 316 insertions(+), 294 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 94f6c1f223b9c..353bc93e5ab94 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -235,7 +235,10 @@ pub mod pallet { /// reading a key, we simply cannot know how many bytes it is. In other words, this should /// not be used in any environment where resources are strictly bounded (e.g. a parachain), /// but it is acceptable otherwise (relay chain, offchain workers). - pub fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) -> DispatchResult { + pub fn migrate_until_exhaustion( + &mut self, + limits: MigrationLimits, + ) -> Result<(), Error> { log!(debug, "running migrations on top of {:?} until {:?}", self, limits); if limits.item.is_zero() || limits.size.is_zero() { @@ -262,7 +265,7 @@ pub mod pallet { /// Migrate AT MOST ONE KEY. This can be either a top or a child key. /// /// This function is *the* core of this entire pallet. - fn migrate_tick(&mut self) -> DispatchResult { + fn migrate_tick(&mut self) -> Result<(), Error> { match (&self.progress_top, &self.progress_child) { (Progress::ToStart, _) => self.migrate_top(), (Progress::LastKey(_), Progress::LastKey(_)) => { @@ -301,7 +304,7 @@ pub mod pallet { /// Migrate the current child key, setting it to its new value, if one exists. /// /// It updates the dynamic counters. - fn migrate_child(&mut self) -> DispatchResult { + fn migrate_child(&mut self) -> Result<(), Error> { use sp_io::default_child_storage as child_io; let (maybe_current_child, child_root) = match (&self.progress_child, &self.progress_top) { @@ -350,7 +353,7 @@ pub mod pallet { /// Migrate the current top key, setting it to its new value, if one exists. /// /// It updates the dynamic counters. - fn migrate_top(&mut self) -> DispatchResult { + fn migrate_top(&mut self) -> Result<(), Error> { let maybe_current_top = match &self.progress_top { Progress::LastKey(last_top) => { let maybe_top: Option> = @@ -431,7 +434,7 @@ pub mod pallet { /// The auto migration task finished. AutoMigrationFinished, /// Migration got halted due to an error or miss-configuration. - Halted, + Halted { error: Error }, } /// The outer Pallet struct. @@ -516,8 +519,9 @@ pub mod pallet { pub type SignedMigrationMaxLimits = StorageValue<_, MigrationLimits, OptionQuery>; #[pallet::error] + #[derive(Clone, PartialEq)] pub enum Error { - /// max signed limits not respected. + /// Max signed limits not respected. MaxSignedLimits, /// A key was longer than the configured maximum. /// @@ -529,12 +533,12 @@ pub mod pallet { KeyTooLong, /// submitter does not have enough funds. NotEnoughFunds, - /// bad witness data provided. + /// Bad witness data provided. BadWitness, - /// upper bound of size is exceeded, - SizeUpperBoundExceeded, /// Signed migration is not allowed because the maximum limit is not set yet. SignedMigrationNotAllowed, + /// Bad child root provided. + BadChildRoot, } #[pallet::call] @@ -617,7 +621,7 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - return Err(Error::::SizeUpperBoundExceeded.into()) + return Ok(().into()) } Self::deposit_event(Event::::Migrated { @@ -634,13 +638,10 @@ pub mod pallet { MigrationProcess::::put(task); let post_info = PostDispatchInfo { actual_weight, pays_fee: Pays::No }; - match migration { - Ok(_) => Ok(post_info), - Err(error) => { - Self::halt(&error); - Err(DispatchErrorWithPostInfo { post_info, error }) - }, + if let Err(error) = migration { + Self::halt(error); } + Ok(post_info) } /// Migrate the list of top keys by iterating each of them one by one. @@ -679,7 +680,7 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - Err("wrong witness data".into()) + Ok(().into()) } else { Self::deposit_event(Event::::Migrated { top: keys.len() as u32, @@ -741,12 +742,9 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); Self::deposit_event(Event::::Slashed { who, amount: deposit }); - Err(DispatchErrorWithPostInfo { - error: "bad witness".into(), - post_info: PostDispatchInfo { - actual_weight: Some(T::WeightInfo::migrate_custom_child_fail()), - pays_fee: Pays::Yes, - }, + Ok(PostDispatchInfo { + actual_weight: Some(T::WeightInfo::migrate_custom_child_fail()), + pays_fee: Pays::Yes, }) } else { Self::deposit_event(Event::::Migrated { @@ -806,7 +804,7 @@ pub mod pallet { if let Some(limits) = Self::auto_limits() { let mut task = Self::migration_process(); if let Err(e) = task.migrate_until_exhaustion(limits) { - Self::halt(&e); + Self::halt(e); } let weight = Self::dynamic_weight(task.dyn_total_items(), task.dyn_size); @@ -849,10 +847,10 @@ pub mod pallet { } /// Put a stop to all ongoing migrations and logs an error. - fn halt(msg: &E) { - log!(error, "migration halted due to: {:?}", msg); + fn halt(error: Error) { + log!(error, "migration halted due to: {:?}", error); AutoLimits::::kill(); - Self::deposit_event(Event::::Halted); + Self::deposit_event(Event::::Halted { error }); } /// Convert a child root key, aka. "Child-bearing top key" into the proper format. @@ -871,7 +869,7 @@ pub mod pallet { fn transform_child_key_or_halt(root: &Vec) -> &[u8] { let key = Self::transform_child_key(root); if key.is_none() { - Self::halt("bad child root key"); + Self::halt(Error::::BadChildRoot); } key.unwrap_or_default() } @@ -961,8 +959,16 @@ mod benchmarks { frame_system::RawOrigin::Signed(caller.clone()).into(), vec![b"foo".to_vec()], 1, - ).is_err() - ) + ).is_ok() + ); + + frame_system::Pallet::::assert_last_event( + ::Event::from(crate::Event::Slashed { + who: caller.clone(), + amount: T::SignedDepositBase::get() + .saturating_add(T::SignedDepositPerItem::get().saturating_mul(1u32.into())), + }).into(), + ); } verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); @@ -1005,7 +1011,7 @@ mod benchmarks { StateTrieMigration::::childify("top"), vec![b"foo".to_vec()], 1, - ).is_err() + ).is_ok() ) } verify { @@ -1285,18 +1291,16 @@ mod test { SignedMigrationMaxLimits::::put(MigrationLimits { size: 1 << 20, item: 50 }); // fails if the top key is too long. - frame_support::assert_err_with_weight!( - StateTrieMigration::continue_migrate( - Origin::signed(1), - MigrationLimits { item: 50, size: 1 << 20 }, - Bounded::max_value(), - MigrationProcess::::get() - ), - Error::::KeyTooLong, - Some(2000000), - ); + frame_support::assert_ok!(StateTrieMigration::continue_migrate( + Origin::signed(1), + MigrationLimits { item: 50, size: 1 << 20 }, + Bounded::max_value(), + MigrationProcess::::get() + ),); // The auto migration halted. - System::assert_last_event(crate::Event::Halted {}.into()); + System::assert_last_event( + crate::Event::Halted { error: Error::::KeyTooLong }.into(), + ); // Limits are killed. assert!(AutoLimits::::get().is_none()); @@ -1322,18 +1326,16 @@ mod test { SignedMigrationMaxLimits::::put(MigrationLimits { size: 1 << 20, item: 50 }); // fails if the top key is too long. - frame_support::assert_err_with_weight!( - StateTrieMigration::continue_migrate( - Origin::signed(1), - MigrationLimits { item: 50, size: 1 << 20 }, - Bounded::max_value(), - MigrationProcess::::get() - ), - Error::::KeyTooLong, - Some(2000000), - ); + frame_support::assert_ok!(StateTrieMigration::continue_migrate( + Origin::signed(1), + MigrationLimits { item: 50, size: 1 << 20 }, + Bounded::max_value(), + MigrationProcess::::get() + )); // The auto migration halted. - System::assert_last_event(crate::Event::Halted {}.into()); + System::assert_last_event( + crate::Event::Halted { error: Error::::KeyTooLong }.into(), + ); // Limits are killed. assert!(AutoLimits::::get().is_none()); @@ -1484,7 +1486,7 @@ mod test { ..Default::default() } ), - Error::::BadWitness + Error::::BadWitness, ); // migrate all keys in a series of submissions @@ -1547,14 +1549,11 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); // note that we don't expect this to be a noop -- we do slash. - frame_support::assert_err!( - StateTrieMigration::migrate_custom_top( - Origin::signed(1), - vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], - correct_witness - 1, - ), - "wrong witness data" - ); + frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( + Origin::signed(1), + vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], + correct_witness - 1, + ),); // no funds should remain reserved. assert_eq!(Balances::reserved_balance(&1), 0); @@ -1584,13 +1583,12 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); // note that we don't expect this to be a noop -- we do slash. - assert!(StateTrieMigration::migrate_custom_child( + frame_support::assert_ok!(StateTrieMigration::migrate_custom_child( Origin::signed(1), StateTrieMigration::childify("chk1"), vec![b"key1".to_vec(), b"key2".to_vec()], 999999, // wrong witness - ) - .is_err()); + )); // no funds should remain reserved. assert_eq!(Balances::reserved_balance(&1), 0); diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index fe7589a8275d2..a9468451ad1d4 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -140,6 +140,24 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" }; + // Wrap all calls inside of storage layers + if let Some(syn::Item::Impl(item_impl)) = def + .call + .as_ref() + .map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index]) + { + item_impl.items.iter_mut().for_each(|i| { + if let syn::ImplItem::Method(method) = i { + let block = &method.block; + method.block = syn::parse_quote! {{ + // We execute all dispatchable in a new storage layer, allowing them + // to return an error at any point, and undoing any storage changes. + #frame_support::storage::with_storage_layer(|| #block) + }}; + } + }); + } + quote::quote_spanned!(span => #[doc(hidden)] pub mod __substrate_call_check { @@ -267,12 +285,8 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::sp_tracing::enter_span!( #frame_support::sp_tracing::trace_span!(stringify!(#fn_name)) ); - // We execute all dispatchable in a new storage layer, allowing them - // to return an error at any point, and undoing any storage changes. - #frame_support::storage::with_storage_layer(|| { - <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) - .map(Into::into).map_err(Into::into) - }) + <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) + .map(Into::into).map_err(Into::into) }, )* Self::__Ignore(_, _) => { diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index d8a81d699b8c2..336e08c3d39b7 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -48,8 +48,8 @@ pub struct CallDef { pub docs: Vec, } -#[derive(Clone)] /// Definition of dispatchable typically: `#[weight...] fn foo(origin .., param1: ...) -> ..` +#[derive(Clone)] pub struct CallVariantDef { /// Function name. pub name: syn::Ident, @@ -143,18 +143,18 @@ impl CallDef { index: usize, item: &mut syn::Item, ) -> syn::Result { - let item = if let syn::Item::Impl(item) = item { + let item_impl = if let syn::Item::Impl(item) = item { item } else { return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) }; let instances = vec![ - helper::check_impl_gen(&item.generics, item.impl_token.span())?, - helper::check_pallet_struct_usage(&item.self_ty)?, + helper::check_impl_gen(&item_impl.generics, item_impl.impl_token.span())?, + helper::check_pallet_struct_usage(&item_impl.self_ty)?, ]; - if let Some((_, _, for_)) = item.trait_ { + if let Some((_, _, for_)) = item_impl.trait_ { let msg = "Invalid pallet::call, expected no trait ident as in \ `impl<..> Pallet<..> { .. }`"; return Err(syn::Error::new(for_.span(), msg)) @@ -163,8 +163,8 @@ impl CallDef { let mut methods = vec![]; let mut indices = HashMap::new(); let mut last_index: Option = None; - for impl_item in &mut item.items { - if let syn::ImplItem::Method(method) = impl_item { + for item in &mut item_impl.items { + if let syn::ImplItem::Method(method) = item { if !matches!(method.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::call, dispatchable function must be public: \ `pub fn`"; @@ -290,7 +290,7 @@ impl CallDef { }); } else { let msg = "Invalid pallet::call, only method accepted"; - return Err(syn::Error::new(impl_item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)) } } @@ -299,8 +299,8 @@ impl CallDef { attr_span, instances, methods, - where_clause: item.generics.where_clause.clone(), - docs: get_doc_literals(&item.attrs), + where_clause: item_impl.generics.where_clause.clone(), + docs: get_doc_literals(&item_impl.attrs), }) } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 2b05478e0b02a..fb0b658cd9303 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -1787,9 +1787,11 @@ macro_rules! decl_module { $vis fn $name( $origin: $origin_ty $(, $param: $param_ty )* ) -> $crate::dispatch::DispatchResult { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); - { $( $impl )* } - Ok(()) + $crate::storage::with_storage_layer(|| { + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); + { $( $impl )* } + Ok(()) + }) } }; @@ -1805,8 +1807,10 @@ macro_rules! decl_module { ) => { $(#[$fn_attr])* $vis fn $name($origin: $origin_ty $(, $param: $param_ty )* ) -> $result { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); - $( $impl )* + $crate::storage::with_storage_layer(|| { + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); + $( $impl )* + }) } }; diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 63747a9d560dc..d388127f29abd 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -282,94 +282,96 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 31, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module2::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 32, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_2::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 33, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - NestedModule3::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 34, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_3::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 6, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_4::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 3, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_5::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 4, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_6::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 1, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_7::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 2, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_8::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 12, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_9::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 13, - error: [0; 4], - message: Some("Something") - })), - ); + sp_io::TestExternalities::default().execute_with(|| { + assert_eq!( + Module1_1::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 31, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 32, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 33, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + NestedModule3::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 34, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_3::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 6, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_4::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 3, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_5::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 4, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_6::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 1, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_7::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 2, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_8::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 12, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_9::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 13, + error: [0; 4], + message: Some("Something") + })), + ); + }); } #[test] diff --git a/frame/support/test/tests/storage_layers.rs b/frame/support/test/tests/storage_layers.rs index 05ed60fe90196..7404ccace2c09 100644 --- a/frame/support/test/tests/storage_layers.rs +++ b/frame/support/test/tests/storage_layers.rs @@ -276,5 +276,7 @@ fn storage_layer_in_decl_pallet_call() { let call2 = Call::DeclPallet(decl_pallet::Call::set_value { value: 1 }); assert_noop!(call2.dispatch(Origin::signed(0)), "Revert!"); + // Calling the function directly also works with storage layers. + assert_noop!(decl_pallet::Module::::set_value(Origin::signed(1), 1), "Revert!"); }); } diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index f16b8f029662b..681cd29af8222 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -245,9 +245,11 @@ pub mod pallet { let sender = ensure_signed(origin)?; let transactions = >::get(block).ok_or(Error::::RenewedNotFound)?; let info = transactions.get(index as usize).ok_or(Error::::RenewedNotFound)?; + let extrinsic_index = + >::extrinsic_index().ok_or(Error::::BadContext)?; + Self::apply_fee(sender, info.size)?; - let extrinsic_index = >::extrinsic_index().unwrap(); sp_io::transaction_index::renew(extrinsic_index, info.content_hash.into()); let mut index = 0; diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 6efc847bfbdb7..236a515a2412d 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -17,14 +17,13 @@ //! Basic implementation for Externalities. -use crate::{Backend, StorageKey, StorageValue}; +use crate::{Backend, OverlayedChanges, StorageKey, StorageValue}; use codec::Encode; use hash_db::Hasher; use log::warn; use sp_core::{ storage::{ - well_known_keys::is_child_storage_key, ChildInfo, StateVersion, Storage, StorageChild, - TrackedStorageKey, + well_known_keys::is_child_storage_key, ChildInfo, StateVersion, Storage, TrackedStorageKey, }, traits::Externalities, Blake2Hasher, @@ -35,20 +34,19 @@ use std::{ any::{Any, TypeId}, collections::BTreeMap, iter::FromIterator, - ops::Bound, }; /// Simple Map-based Externalities impl. #[derive(Debug)] pub struct BasicExternalities { - inner: Storage, + overlay: OverlayedChanges, extensions: Extensions, } impl BasicExternalities { /// Create a new instance of `BasicExternalities` pub fn new(inner: Storage) -> Self { - BasicExternalities { inner, extensions: Default::default() } + BasicExternalities { overlay: inner.into(), extensions: Default::default() } } /// New basic externalities with empty storage. @@ -57,13 +55,34 @@ impl BasicExternalities { } /// Insert key/value - pub fn insert(&mut self, k: StorageKey, v: StorageValue) -> Option { - self.inner.top.insert(k, v) + pub fn insert(&mut self, k: StorageKey, v: StorageValue) { + self.overlay.set_storage(k, Some(v)); } /// Consume self and returns inner storages pub fn into_storages(self) -> Storage { - self.inner + Storage { + top: self + .overlay + .changes() + .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) + .collect(), + children_default: self + .overlay + .children() + .map(|(iter, i)| { + ( + i.storage_key().to_vec(), + sp_core::storage::StorageChild { + data: iter + .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) + .collect(), + child_info: i.clone(), + }, + ) + }) + .collect(), + } } /// Execute the given closure `f` with the externalities set and initialized with `storage`. @@ -73,13 +92,7 @@ impl BasicExternalities { storage: &mut sp_core::storage::Storage, f: impl FnOnce() -> R, ) -> R { - let mut ext = Self { - inner: Storage { - top: std::mem::take(&mut storage.top), - children_default: std::mem::take(&mut storage.children_default), - }, - extensions: Default::default(), - }; + let mut ext = Self::new(std::mem::take(storage)); let r = ext.execute_with(f); @@ -108,15 +121,26 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.inner.top.eq(&other.inner.top) && - self.inner.children_default.eq(&other.inner.children_default) + self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == + other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && + self.overlay + .children() + .map(|(iter, i)| (i, iter.map(|(k, v)| (k, v.value())).collect::>())) + .collect::>() == + other + .overlay + .children() + .map(|(iter, i)| { + (i, iter.map(|(k, v)| (k, v.value())).collect::>()) + }) + .collect::>() } } impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { fn from_iter>(iter: I) -> Self { let mut t = Self::default(); - t.inner.top.extend(iter); + iter.into_iter().for_each(|(k, v)| t.insert(k, v)); t } } @@ -128,11 +152,8 @@ impl Default for BasicExternalities { } impl From> for BasicExternalities { - fn from(hashmap: BTreeMap) -> Self { - BasicExternalities { - inner: Storage { top: hashmap, children_default: Default::default() }, - extensions: Default::default(), - } + fn from(map: BTreeMap) -> Self { + Self::from_iter(map.into_iter()) } } @@ -140,7 +161,7 @@ impl Externalities for BasicExternalities { fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) {} fn storage(&self, key: &[u8]) -> Option { - self.inner.top.get(key).cloned() + self.overlay.storage(key).and_then(|v| v.map(|v| v.to_vec())) } fn storage_hash(&self, key: &[u8]) -> Option> { @@ -148,11 +169,7 @@ impl Externalities for BasicExternalities { } fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { - self.inner - .children_default - .get(child_info.storage_key()) - .and_then(|child| child.data.get(key)) - .cloned() + self.overlay.child_storage(child_info, key).and_then(|v| v.map(|v| v.to_vec())) } fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { @@ -160,16 +177,13 @@ impl Externalities for BasicExternalities { } fn next_storage_key(&self, key: &[u8]) -> Option { - let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.top.range::<[u8], _>(range).next().map(|(k, _)| k).cloned() + self.overlay.iter_after(key).find_map(|(k, v)| v.value().map(|_| k.to_vec())) } fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { - let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner - .children_default - .get(child_info.storage_key()) - .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) + self.overlay + .child_iter_after(child_info.storage_key(), key) + .find_map(|(k, v)| v.value().map(|_| k.to_vec())) } fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { @@ -178,14 +192,7 @@ impl Externalities for BasicExternalities { return } - match maybe_value { - Some(value) => { - self.inner.top.insert(key, value); - }, - None => { - self.inner.top.remove(&key); - }, - } + self.overlay.set_storage(key, maybe_value) } fn place_child_storage( @@ -194,19 +201,7 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - let child_map = self - .inner - .children_default - .entry(child_info.storage_key().to_vec()) - .or_insert_with(|| StorageChild { - data: Default::default(), - child_info: child_info.to_owned(), - }); - if let Some(value) = value { - child_map.data.insert(key, value); - } else { - child_map.data.remove(&key); - } + self.overlay.set_child_storage(child_info, key, value); } fn kill_child_storage( @@ -215,12 +210,7 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - let count = self - .inner - .children_default - .remove(child_info.storage_key()) - .map(|c| c.data.len()) - .unwrap_or(0) as u32; + let count = self.overlay.clear_child_storage(child_info); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -239,19 +229,7 @@ impl Externalities for BasicExternalities { return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 } } - let to_remove = self - .inner - .top - .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) - .map(|(k, _)| k) - .take_while(|k| k.starts_with(prefix)) - .cloned() - .collect::>(); - - let count = to_remove.len() as u32; - for key in to_remove { - self.inner.top.remove(&key); - } + let count = self.overlay.clear_prefix(prefix); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -262,56 +240,37 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { - let to_remove = child - .data - .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) - .map(|(k, _)| k) - .take_while(|k| k.starts_with(prefix)) - .cloned() - .collect::>(); - - let count = to_remove.len() as u32; - for key in to_remove { - child.data.remove(&key); - } - MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } - } else { - MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } - } + let count = self.overlay.clear_child_prefix(child_info, prefix); + MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } fn storage_append(&mut self, key: Vec, value: Vec) { - let current = self.inner.top.entry(key).or_default(); - crate::ext::StorageAppend::new(current).append(value); + let current_value = self.overlay.value_mut_or_insert_with(&key, || Default::default()); + crate::ext::StorageAppend::new(current_value).append(value); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { - let mut top = self.inner.top.clone(); - let prefixed_keys: Vec<_> = self - .inner - .children_default - .iter() - .map(|(_k, v)| (v.child_info.prefixed_storage_key(), v.child_info.clone())) - .collect(); + let mut top = self + .overlay + .changes() + .filter_map(|(k, v)| v.value().map(|v| (k.clone(), v.clone()))) + .collect::>(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. let empty_hash = empty_child_trie_root::>(); - for (prefixed_storage_key, child_info) in prefixed_keys { + for child_info in self.overlay.children().map(|d| d.1.clone()).collect::>() { let child_root = self.child_storage_root(&child_info, state_version); if empty_hash[..] == child_root[..] { - top.remove(prefixed_storage_key.as_slice()); + top.remove(child_info.prefixed_storage_key().as_slice()); } else { - top.insert(prefixed_storage_key.into_inner(), child_root); + top.insert(child_info.prefixed_storage_key().into_inner(), child_root); } } match state_version { - StateVersion::V0 => - LayoutV0::::trie_root(self.inner.top.clone()).as_ref().into(), - StateVersion::V1 => - LayoutV1::::trie_root(self.inner.top.clone()).as_ref().into(), + StateVersion::V0 => LayoutV0::::trie_root(top).as_ref().into(), + StateVersion::V1 => LayoutV1::::trie_root(top).as_ref().into(), } } @@ -320,10 +279,11 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, state_version: StateVersion, ) -> Vec { - if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { - let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); + if let Some((data, child_info)) = self.overlay.child_changes(child_info.storage_key()) { + let delta = + data.into_iter().map(|(k, v)| (k.as_ref(), v.value().map(|v| v.as_slice()))); crate::in_memory_backend::new_in_mem::>() - .child_storage_root(&child.child_info, delta, state_version) + .child_storage_root(&child_info, delta, state_version) .0 } else { empty_child_trie_root::>() @@ -332,15 +292,15 @@ impl Externalities for BasicExternalities { } fn storage_start_transaction(&mut self) { - unimplemented!("Transactions are not supported by BasicExternalities"); + self.overlay.start_transaction() } fn storage_rollback_transaction(&mut self) -> Result<(), ()> { - unimplemented!("Transactions are not supported by BasicExternalities"); + self.overlay.rollback_transaction().map_err(drop) } fn storage_commit_transaction(&mut self) -> Result<(), ()> { - unimplemented!("Transactions are not supported by BasicExternalities"); + self.overlay.commit_transaction().map_err(drop) } fn wipe(&mut self) {} diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index ae5d47f6bb943..e5dad7157c731 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -57,7 +57,7 @@ pub struct NotInRuntime; /// Describes in which mode the node is currently executing. #[derive(Debug, Clone, Copy)] pub enum ExecutionMode { - /// Exeuting in client mode: Removal of all transactions possible. + /// Executing in client mode: Removal of all transactions possible. Client, /// Executing in runtime mode: Transactions started by the client are protected. Runtime, @@ -95,7 +95,7 @@ pub type OverlayedChangeSet = OverlayedMap>; /// Holds a set of changes with the ability modify them using nested transactions. #[derive(Debug, Clone)] -pub struct OverlayedMap { +pub struct OverlayedMap { /// Stores the changes that this overlay constitutes. changes: BTreeMap>, /// Stores which keys are dirty per transaction. Needed in order to determine which @@ -110,7 +110,7 @@ pub struct OverlayedMap { execution_mode: ExecutionMode, } -impl Default for OverlayedMap { +impl Default for OverlayedMap { fn default() -> Self { Self { changes: BTreeMap::new(), @@ -121,6 +121,31 @@ impl Default for OverlayedMap { } } +#[cfg(feature = "std")] +impl From for OverlayedMap> { + fn from(storage: sp_core::storage::StorageMap) -> Self { + Self { + changes: storage + .into_iter() + .map(|(k, v)| { + ( + k, + OverlayedEntry { + transactions: SmallVec::from_iter([InnerValue { + value: Some(v), + extrinsics: Default::default(), + }]), + }, + ) + }) + .collect(), + dirty_keys: Default::default(), + num_client_transactions: 0, + execution_mode: ExecutionMode::Client, + } + } +} + impl Default for ExecutionMode { fn default() -> Self { Self::Client diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 746599a6768e6..001b4b656c34e 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -638,6 +638,21 @@ impl OverlayedChanges { } } +#[cfg(feature = "std")] +impl From for OverlayedChanges { + fn from(storage: sp_core::storage::Storage) -> Self { + Self { + top: storage.top.into(), + children: storage + .children_default + .into_iter() + .map(|(k, v)| (k, (v.data.into(), v.child_info))) + .collect(), + ..Default::default() + } + } +} + #[cfg(feature = "std")] fn retain_map(map: &mut Map, f: F) where From 389cc11d8d836a28cd4462bf8f63a36ff794a976 Mon Sep 17 00:00:00 2001 From: yjh Date: Thu, 11 Aug 2022 17:59:38 +0800 Subject: [PATCH 119/135] chore: improve runtime docs and remove unused error (#12000) --- Cargo.lock | 1 - client/executor/common/Cargo.toml | 1 - client/executor/common/src/error.rs | 4 ---- client/executor/src/native_executor.rs | 11 +++++++---- primitives/runtime-interface/src/lib.rs | 4 ++-- 5 files changed, 9 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38063e8060835..a5cd455e6029b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8188,7 +8188,6 @@ dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", "sp-sandbox", - "sp-serializer", "sp-wasm-interface", "thiserror", "wasm-instrument", diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 9ffdfb788474d..b16ed7e8552ce 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -23,7 +23,6 @@ wasmi = "0.9.1" sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../../primitives/maybe-compressed-blob" } sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } -sp-serializer = { version = "4.0.0-dev", path = "../../../primitives/serializer" } sp-wasm-interface = { version = "6.0.0", path = "../../../primitives/wasm-interface" } [features] diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index 09e070bb3bae5..376ac190bd7b7 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -18,7 +18,6 @@ //! Rust executor possible errors. -use sp_serializer; use wasmi; /// Result type alias. @@ -28,9 +27,6 @@ pub type Result = std::result::Result; #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { - #[error("Unserializable data encountered")] - InvalidData(#[from] sp_serializer::Error), - #[error(transparent)] Wasmi(#[from] wasmi::Error), diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index d805949d26fd9..4d8f559d700b5 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -130,14 +130,13 @@ where /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. /// - /// `host_functions` - The set of host functions to be available for import provided by this - /// executor. - /// /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. /// /// `cache_path` - A path to a directory where the executor can place its files for purposes of /// caching. This may be important in cases when there are many different modules with the /// compiled execution method is used. + /// + /// `runtime_cache_size` - The capacity of runtime cache. pub fn new( method: WasmExecutionMethod, default_heap_pages: Option, @@ -209,7 +208,7 @@ where /// The runtime is passed as a [`RuntimeBlob`]. The runtime will be instantiated with the /// parameters this `WasmExecutor` was initialized with. /// - /// In case of problems with during creation of the runtime or instantation, a `Err` is + /// In case of problems with during creation of the runtime or instantiation, a `Err` is /// returned. that describes the message. #[doc(hidden)] // We use this function for tests across multiple crates. pub fn uncached_call( @@ -405,6 +404,10 @@ impl NativeElseWasmExecutor { /// /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. + /// + /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. + /// + /// `runtime_cache_size` - The capacity of runtime cache. pub fn new( fallback_method: WasmExecutionMethod, default_heap_pages: Option, diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index f9bf8825f9486..6ebcb7482a779 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -308,10 +308,10 @@ pub use sp_std; /// /// 1. The generated functions are not callable from the native side. /// 2. The trait as shown above is not implemented for [`Externalities`] and is instead -/// implemented for `FunctionExecutor` (from `sp-wasm-interface`). +/// implemented for `FunctionContext` (from `sp-wasm-interface`). /// /// # Disable tracing -/// By addding `no_tracing` to the list of options you can prevent the wasm-side interface from +/// By adding `no_tracing` to the list of options you can prevent the wasm-side interface from /// generating the default `sp-tracing`-calls. Note that this is rarely needed but only meant /// for the case when that would create a circular dependency. You usually _do not_ want to add /// this flag, as tracing doesn't cost you anything by default anyways (it is added as a no-op) From 6cd7ac7bd1e9da914adb7c8066d759f0a2d3849f Mon Sep 17 00:00:00 2001 From: lucasvanmol Date: Thu, 11 Aug 2022 06:13:49 -0700 Subject: [PATCH 120/135] Fix broken links in `polkadot --help` (#12010) https://docs.substrate.io/v3/runtime/custom-rpcs/#public-rpcs > https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs --- client/cli/src/commands/run_cmd.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 3a74fdd9700f2..fd236aa0211dd 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -54,7 +54,7 @@ pub struct RunCmd { /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: - /// . + /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[clap(long)] pub rpc_external: bool, @@ -85,7 +85,7 @@ pub struct RunCmd { /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: - /// . + /// . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. #[clap(long)] pub ws_external: bool, From e3512480d5e6ff527b1f82c2b47618aaa824c9bb Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Thu, 11 Aug 2022 15:39:56 +0100 Subject: [PATCH 121/135] add decode with depth limit to opaque types (#11947) Co-authored-by: parity-processbot <> --- Cargo.lock | 1 + frame/support/Cargo.toml | 2 ++ frame/support/src/traits/misc.rs | 33 +++++++++++++++++++++++++++++--- 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a5cd455e6029b..2db192dcbf1a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2250,6 +2250,7 @@ dependencies = [ "serde", "serde_json", "smallvec", + "sp-api", "sp-arithmetic", "sp-core", "sp-core-hashing-proc-macro", diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index ca26d3a5e32f2..a69133961e970 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -17,6 +17,7 @@ serde = { version = "1.0.136", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-metadata = { version = "15.0.0", default-features = false, features = ["v14"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } @@ -49,6 +50,7 @@ default = ["std"] std = [ "once_cell", "serde", + "sp-api/std", "sp-io/std", "codec/std", "scale-info/std", diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index ddb7f6f41b378..fa59fa92f920d 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -18,7 +18,7 @@ //! Smaller traits used in FRAME which don't need their own file. use crate::dispatch::Parameter; -use codec::{CompactLen, Decode, DecodeAll, Encode, EncodeLike, Input, MaxEncodedLen}; +use codec::{CompactLen, Decode, DecodeLimit, Encode, EncodeLike, Input, MaxEncodedLen}; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, Saturating}; #[doc(hidden)] @@ -745,7 +745,10 @@ impl Encode for WrapperOpaque { impl Decode for WrapperOpaque { fn decode(input: &mut I) -> Result { - Ok(Self(T::decode_all(&mut &>::decode(input)?[..])?)) + Ok(Self(T::decode_all_with_depth_limit( + sp_api::MAX_EXTRINSIC_DEPTH, + &mut &>::decode(input)?[..], + )?)) } fn skip(input: &mut I) -> Result<(), codec::Error> { @@ -807,7 +810,7 @@ impl WrapperKeepOpaque { /// /// Returns `None` if the decoding failed. pub fn try_decode(&self) -> Option { - T::decode_all(&mut &self.data[..]).ok() + T::decode_all_with_depth_limit(sp_api::MAX_EXTRINSIC_DEPTH, &mut &self.data[..]).ok() } /// Returns the length of the encoded `T`. @@ -939,6 +942,30 @@ impl PreimageRecipient for () { #[cfg(test)] mod test { use super::*; + use sp_std::marker::PhantomData; + + #[derive(Encode, Decode)] + enum NestedType { + Nested(Box), + Done, + } + + #[test] + fn test_opaque_wrapper_decode_limit() { + let limit = sp_api::MAX_EXTRINSIC_DEPTH as usize; + let mut ok_bytes = vec![0u8; limit]; + ok_bytes.push(1u8); + let mut err_bytes = vec![0u8; limit + 1]; + err_bytes.push(1u8); + assert!(>::decode(&mut &ok_bytes.encode()[..]).is_ok()); + assert!(>::decode(&mut &err_bytes.encode()[..]).is_err()); + + let ok_keep_opaque = WrapperKeepOpaque { data: ok_bytes, _phantom: PhantomData }; + let err_keep_opaque = WrapperKeepOpaque { data: err_bytes, _phantom: PhantomData }; + + assert!(>::try_decode(&ok_keep_opaque).is_some()); + assert!(>::try_decode(&err_keep_opaque).is_none()); + } #[test] fn test_opaque_wrapper() { From dc45cc29167ee6358b527f3a0bcc0617dc9e4c99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 11 Aug 2022 16:53:40 +0200 Subject: [PATCH 122/135] Use correct length for Pallet and Storage prefix when calculating `KeyLenOf` (#11994) * Use correct length for Pallet and Storage prefix when calculating `KeyLenOf` * Add tests Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi --- frame/support/src/storage/types/double_map.rs | 19 ++++++++++--- frame/support/src/storage/types/map.rs | 27 +++++++++++++++++-- 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs index 2e090d30119aa..9ba4cf052e82a 100644 --- a/frame/support/src/storage/types/double_map.rs +++ b/frame/support/src/storage/types/double_map.rs @@ -25,6 +25,7 @@ use crate::{ KeyLenOf, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, + StorageHasher, Twox128, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::SaturatedConversion; @@ -91,10 +92,10 @@ impl Key2: MaxEncodedLen, { fn get() -> u32 { - let z = Hasher1::max_len::() + - Hasher2::max_len::() + - Prefix::pallet_prefix().len() + - Prefix::STORAGE_PREFIX.len(); + // The `max_len` of both key hashes plus the pallet prefix and storage prefix (which both + // are hashed with `Twox128`). + let z = + Hasher1::max_len::() + Hasher2::max_len::() + Twox128::max_len::<()>() * 2; z as u32 } } @@ -755,6 +756,16 @@ mod test { } } + #[test] + fn keylenof_works() { + // Works with Blake2_128Concat and Twox64Concat. + type A = StorageDoubleMap; + let size = 16 * 2 // Two Twox128 + + 16 + 8 // Blake2_128Concat = hash + key + + 8 + 4; // Twox64Concat = hash + key + assert_eq!(KeyLenOf::::get(), size); + } + #[test] fn test() { type A = diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index f4ac83c22663b..0f89e2378a55d 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -25,6 +25,7 @@ use crate::{ KeyLenOf, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, + StorageHasher, Twox128, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::SaturatedConversion; @@ -61,8 +62,9 @@ where Key: FullCodec + MaxEncodedLen, { fn get() -> u32 { - let z = - Hasher::max_len::() + Prefix::pallet_prefix().len() + Prefix::STORAGE_PREFIX.len(); + // The `max_len` of the key hash plus the pallet prefix and storage prefix (which both are + // hashed with `Twox128`). + let z = Hasher::max_len::() + Twox128::max_len::<()>() * 2; z as u32 } } @@ -501,6 +503,27 @@ mod test { } } + #[test] + fn keylenof_works() { + // Works with Blake2_128Concat. + type A = StorageMap; + let size = 16 * 2 // Two Twox128 + + 16 + 4; // Blake2_128Concat = hash + key + assert_eq!(KeyLenOf::::get(), size); + + // Works with Blake2_256. + type B = StorageMap; + let size = 16 * 2 // Two Twox128 + + 32; // Blake2_256 + assert_eq!(KeyLenOf::::get(), size); + + // Works with Twox64Concat. + type C = StorageMap; + let size = 16 * 2 // Two Twox128 + + 8 + 4; // Twox64Concat = hash + key + assert_eq!(KeyLenOf::::get(), size); + } + #[test] fn test() { type A = StorageMap; From 469189bfe16b61b238f7786c5d2cff82060ec8f8 Mon Sep 17 00:00:00 2001 From: benluelo <57334811+benluelo@users.noreply.github.com> Date: Thu, 11 Aug 2022 14:55:30 -0400 Subject: [PATCH 123/135] Add map and try_map methods to BoundedBTreeMap (#11869) * Add map and try_map methods to BoundedBTreeMap * Undo changes to basic_authorship.rs * Remove unwrap and use unchecked_from instead * Add iter_mut() method * Remove map functions and add docs to iter_mut * fmt Co-authored-by: Shawn Tabrizi --- .../runtime/src/bounded/bounded_btree_map.rs | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/primitives/runtime/src/bounded/bounded_btree_map.rs b/primitives/runtime/src/bounded/bounded_btree_map.rs index aefd168632a1e..725864b462694 100644 --- a/primitives/runtime/src/bounded/bounded_btree_map.rs +++ b/primitives/runtime/src/bounded/bounded_btree_map.rs @@ -161,6 +161,13 @@ where { self.0.remove_entry(key) } + + /// Gets a mutable iterator over the entries of the map, sorted by key. + /// + /// See [`BTreeMap::iter_mut`] for more information. + pub fn iter_mut(&mut self) -> sp_std::collections::btree_map::IterMut { + self.0.iter_mut() + } } impl Default for BoundedBTreeMap @@ -508,7 +515,7 @@ pub mod test { b1.iter().map(|(k, v)| (k + 1, *v)).take(2).try_collect().unwrap(); assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3]); - // but these worn't work + // but these won't work let b2: Result>, _> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect(); assert!(b2.is_err()); @@ -517,4 +524,17 @@ pub mod test { b1.iter().map(|(k, v)| (k + 1, *v)).skip(2).try_collect(); assert!(b2.is_err()); } + + #[test] + fn test_iter_mut() { + let mut b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + + b1.iter_mut().for_each(|(_, v)| *v *= 2); + + assert_eq!(b1, b2); + } } From 6b8553511112afd5ae7e8e6877dc2f467850f155 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 12 Aug 2022 11:03:27 +0200 Subject: [PATCH 124/135] [ci] Improve cancel-pipeline job (#12008) * [WIP][ci] Improve cancel-pipeline job * fix job name * test that fail works * debug cancel-pipeline * remove artifacts-false from cancel-pipeline jobs * split cancel pipeline jobs * fail test-linux-stable 2/3 * fail test-linux-stable 3/3 * fail cargo-check-benches 1/2 * fail cargo-check-benches 2/2 * fail test-linux-stable-int * fail cargo-check-subkey * fail check-tracing * fail check-tracing * fix pipeline --- .gitlab-ci.yml | 60 ++++++++++++++++++++++------- scripts/ci/gitlab/pipeline/test.yml | 6 +++ 2 files changed, 52 insertions(+), 14 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9608d88e9554d..765112bfa9e62 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -82,18 +82,31 @@ default: tags: - kubernetes-parity-build -.rust-info-script: &rust-info-script +.rust-info-script: script: - rustup show - cargo --version - rustup +nightly show - cargo +nightly --version +.pipeline-stopper-vars: + script: + - echo "Collecting env variables for the cancel-pipeline job" + - echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env + - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env + - echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env + +.pipeline-stopper-artifacts: + artifacts: + reports: + dotenv: pipeline-stopper.env + .docker-env: image: "${CI_IMAGE}" before_script: - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] + - !reference [.pipeline-stopper-vars, script] after_script: - !reference [.rusty-cachier, after_script] tags: @@ -246,35 +259,54 @@ rusty-cachier-notify: when: on_failure variables: PROJECT_ID: "${CI_PROJECT_ID}" + PROJECT_NAME: "${CI_PROJECT_NAME}" PIPELINE_ID: "${CI_PIPELINE_ID}" - trigger: "parity/infrastructure/ci_cd/pipeline-stopper" + FAILED_JOB_URL: "${FAILED_JOB_URL}" + FAILED_JOB_NAME: "${FAILED_JOB_NAME}" + PR_NUM: "${PR_NUM}" + trigger: + project: "parity/infrastructure/ci_cd/pipeline-stopper" + # remove branch, when pipeline-stopper for polakdot is updated to the same branch + branch: "as-improve" -cancel-pipeline-test-linux-stable: +# need to copy jobs this way because otherwise gitlab will wait +# for all 3 jobs to finish instead of cancelling if one fails +cancel-pipeline-test-linux-stable1: extends: .cancel-pipeline-template needs: - - job: test-linux-stable - artifacts: false + - job: "test-linux-stable 1/3" + +cancel-pipeline-test-linux-stable2: + extends: .cancel-pipeline-template + needs: + - job: "test-linux-stable 2/3" + +cancel-pipeline-test-linux-stable3: + extends: .cancel-pipeline-template + needs: + - job: "test-linux-stable 3/3" + +cancel-pipeline-cargo-check-benches1: + extends: .cancel-pipeline-template + needs: + - job: "cargo-check-benches 1/2" + +cancel-pipeline-cargo-check-benches2: + extends: .cancel-pipeline-template + needs: + - job: "cargo-check-benches 2/2" cancel-pipeline-test-linux-stable-int: extends: .cancel-pipeline-template needs: - job: test-linux-stable-int - artifacts: false cancel-pipeline-cargo-check-subkey: extends: .cancel-pipeline-template needs: - job: cargo-check-subkey - artifacts: false - -cancel-pipeline-cargo-check-benches: - extends: .cancel-pipeline-template - needs: - - job: cargo-check-benches - artifacts: false cancel-pipeline-check-tracing: extends: .cancel-pipeline-template needs: - job: check-tracing - artifacts: false diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index ccf8338236d0a..a58015c6e22d6 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -61,10 +61,12 @@ cargo-check-benches: - .docker-env - .test-refs - .collect-artifacts + - .pipeline-stopper-artifacts before_script: # perform rusty-cachier operations before any further modifications to the git repo to make cargo feel cheated not so much - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] + - !reference [.pipeline-stopper-vars, script] # merges in the master branch on PRs - if [ $CI_COMMIT_REF_NAME != "master" ]; then git fetch origin +master:master; @@ -131,6 +133,7 @@ cargo-check-subkey: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts script: - rusty-cachier snapshot create - cd ./bin/utils/subkey @@ -199,6 +202,7 @@ test-linux-stable: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. @@ -299,6 +303,7 @@ test-linux-stable-int: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. @@ -328,6 +333,7 @@ check-tracing: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts script: - rusty-cachier snapshot create # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases From 241fc1f8a7e367420ee8c697a9a85b7ceb78713d Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Fri, 12 Aug 2022 16:34:00 +0100 Subject: [PATCH 125/135] better benchmark log (#12016) * better benchmark log * Update lib.rs --- frame/benchmarking/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index afd53915cc397..a1399dc388b09 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -1054,7 +1054,9 @@ macro_rules! impl_benchmark { // Time the extrinsic logic. $crate::log::trace!( target: "benchmark", - "Start Benchmark: {:?}", c + "Start Benchmark: {} ({:?})", + extrinsic, + c ); let start_pov = $crate::benchmarking::proof_size(); From bfbbf4c7fb1be8d2304b8da102c89fe286e70804 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Fri, 12 Aug 2022 17:39:05 +0200 Subject: [PATCH 126/135] feat(rpc middleware): use custom time buckets (#11950) * feat(rpc middleware): use custom time buckets * cargo fmt * make it compile again * fix bad comment --- client/rpc-servers/src/middleware.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 1c0660fc3528d..0d77442323241 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -25,6 +25,21 @@ use prometheus_endpoint::{ }; use std::net::SocketAddr; +/// Histogram time buckets in microseconds. +const HISTOGRAM_BUCKETS: [f64; 11] = [ + 5.0, + 25.0, + 100.0, + 500.0, + 1_000.0, + 2_500.0, + 10_000.0, + 25_000.0, + 100_000.0, + 1_000_000.0, + 10_000_000.0, +]; + /// Metrics for RPC middleware storing information about the number of requests started/completed, /// calls started/completed and their timings. #[derive(Debug, Clone)] @@ -75,7 +90,8 @@ impl RpcMetrics { HistogramOpts::new( "substrate_rpc_calls_time", "Total time [μs] of processed RPC calls", - ), + ) + .buckets(HISTOGRAM_BUCKETS.to_vec()), &["protocol", "method"], )?, metrics_registry, From 95a0c6a9d41628c3cc37f59dc42f5f45ca3f3977 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 12 Aug 2022 19:18:27 +0200 Subject: [PATCH 127/135] Add `defer` (#12013) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add defer Signed-off-by: Oliver Tale-Yazdi * Convert to macro + review fixes Signed-off-by: Oliver Tale-Yazdi * Apply suggestions from code review Co-authored-by: Bastian Köcher * Move into new file and add panic unwind tests Signed-off-by: Oliver Tale-Yazdi * Fix doc and Clippy Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher --- primitives/core/src/defer.rs | 140 +++++++++++++++++++++++++++++++++++ primitives/core/src/lib.rs | 1 + 2 files changed, 141 insertions(+) create mode 100644 primitives/core/src/defer.rs diff --git a/primitives/core/src/defer.rs b/primitives/core/src/defer.rs new file mode 100644 index 0000000000000..d14b26d59e4dd --- /dev/null +++ b/primitives/core/src/defer.rs @@ -0,0 +1,140 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains the [`crate::defer!`] macro for *deferring* the execution +//! of code until the current scope is dropped. +//! This helps with *always* executing cleanup code. + +/// Executes the wrapped closure on drop. +/// +/// Should be used together with the [`crate::defer!`] macro. +#[must_use] +pub struct DeferGuard(pub Option); + +impl Drop for DeferGuard { + fn drop(&mut self) { + self.0.take().map(|f| f()); + } +} + +/// Executes the given code when the current scope is dropped. +/// +/// Multiple calls to [`crate::defer!`] will execute the passed codes in reverse order. +/// This also applies to panic stack unwinding. +/// +/// # Example +/// +/// ```rust +/// use sp_core::defer; +/// +/// let message = std::cell::RefCell::new("".to_string()); +/// { +/// defer!( +/// message.borrow_mut().push_str("world!"); +/// ); +/// defer!( +/// message.borrow_mut().push_str("Hello "); +/// ); +/// } +/// assert_eq!(*message.borrow(), "Hello world!"); +/// ``` +#[macro_export] +macro_rules! defer( + ( $( $code:tt )* ) => { + let _guard = $crate::defer::DeferGuard(Some(|| { $( $code )* })); + }; +); + +#[cfg(test)] +mod test { + #[test] + fn defer_guard_works() { + let mut called = false; + { + defer!( + called = true; + ); + } + assert!(called, "DeferGuard should have executed the closure"); + } + + #[test] + /// `defer` executes the code in reverse order of being called. + fn defer_guard_order_works() { + let called = std::cell::RefCell::new(1); + + defer!( + assert_eq!(*called.borrow(), 3); + ); + defer!( + assert_eq!(*called.borrow(), 2); + *called.borrow_mut() = 3; + ); + defer!({ + assert_eq!(*called.borrow(), 1); + *called.borrow_mut() = 2; + }); + } + + #[test] + #[allow(unused_braces)] + #[allow(clippy::unnecessary_operation)] + fn defer_guard_syntax_works() { + let called = std::cell::RefCell::new(0); + { + defer!(*called.borrow_mut() += 1); + defer!(*called.borrow_mut() += 1;); // With ; + defer!({ *called.borrow_mut() += 1 }); + defer!({ *called.borrow_mut() += 1 };); // With ; + } + assert_eq!(*called.borrow(), 4); + } + + #[test] + /// `defer` executes the code even in case of a panic. + fn defer_guard_panic_unwind_works() { + use std::panic::{catch_unwind, AssertUnwindSafe}; + let mut called = false; + + let should_panic = catch_unwind(AssertUnwindSafe(|| { + defer!(called = true); + panic!(); + })); + + assert!(should_panic.is_err(), "DeferGuard should have panicked"); + assert!(called, "DeferGuard should have executed the closure"); + } + + #[test] + /// `defer` executes the code even in case another `defer` panics. + fn defer_guard_defer_panics_unwind_works() { + use std::panic::{catch_unwind, AssertUnwindSafe}; + let counter = std::cell::RefCell::new(0); + + let should_panic = catch_unwind(AssertUnwindSafe(|| { + defer!(*counter.borrow_mut() += 1); + defer!( + *counter.borrow_mut() += 1; + panic!(); + ); + defer!(*counter.borrow_mut() += 1); + })); + + assert!(should_panic.is_err(), "DeferGuard should have panicked"); + assert_eq!(*counter.borrow(), 3, "DeferGuard should have executed the closure"); + } +} diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index ab3334f9e4f5a..f48adc274f524 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -56,6 +56,7 @@ pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_6 pub mod crypto; pub mod hexdisplay; +pub mod defer; pub mod ecdsa; pub mod ed25519; pub mod hash; From c1293d4adec3aac82de9620842ae7f3cc0c97915 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Sat, 13 Aug 2022 14:56:40 +0300 Subject: [PATCH 128/135] Runtime API versioning (#11779) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Runtime API versioning Related to issue #11577 Add support for multiple versions of a Runtime API. The purpose is to have one main version of the API, which is considered stable and multiple unstable (aka staging) ones. How it works =========== Some methods of the API trait can be tagged with `#[api_version(N)]` attribute where N is version number bigger than the main one. Let's call them **staging methods** for brevity. The implementor of the API decides which version to implement. Example (from https://github.com/paritytech/substrate/issues/11577#issuecomment-1145347025): ``` decl_runtime_apis! { #{api_version(10)] trait Test { fn something() -> Vec; #[api_version(11)] fn new_cool_function() -> u32; } } ``` ``` impl_runtime_apis! { #[api_version(11)] impl Test for Runtime { fn something() -> Vec { vec![1, 2, 3] } fn new_cool_function() -> u32 { 10 } } } ``` Version safety checks (currently not implemented) ================================================= By default in the API trait all staging methods has got default implementation calling `unimplemented!()`. This is a problem because if the developer wants to implement version 11 in the example above and forgets to add `fn new_cool_function()` in `impl_runtime_apis!` the runtime will crash when the function is executed. Ideally a compilation error should be generated in such cases. TODOs ===== Things not working well at the moment: [ ] Version safety check [ ] Integration tests of `primitives/api` are messed up a bit. More specifically `primitives/api/test/tests/decl_and_impl.rs` [ ] Integration test covering the new functionality. [ ] Some duplicated code * Update primitives/api/proc-macro/src/impl_runtime_apis.rs Code review feedback and formatting Co-authored-by: asynchronous rob * Code review feedback Applying suggestions from @bkchr * fmt * Apply suggestions from code review Co-authored-by: Bastian Köcher * Code review feedback * dummy trait -> versioned trait * Implement only versioned traits (not compiling) * Remove native API calls (still not compiling) * fmt * Fix compilation * Comments * Remove unused code * Remove native runtime tests * Remove unused code * Fix UI tests * Code review feedback * Code review feedback * attribute_names -> common * Rework `append_api_version` * Code review feedback * Apply suggestions from code review Co-authored-by: Bastian Köcher * Code review feedback * Code review feedback * Code review feedback * Use type alias for the default trait - doesn't compile * Fixes * Better error for `method_api_ver < trait_api_version` * fmt * Rework how we call runtime functions * Update UI tests * Fix warnings * Fix doctests * Apply suggestions from code review Co-authored-by: Bastian Köcher * Apply suggestions from code review Co-authored-by: Bastian Köcher * Fix formatting and small compilation errors * Update primitives/api/proc-macro/src/impl_runtime_apis.rs Co-authored-by: Bastian Köcher Co-authored-by: asynchronous rob Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- primitives/api/proc-macro/src/common.rs | 41 + .../api/proc-macro/src/decl_runtime_apis.rs | 699 +++++++----------- .../api/proc-macro/src/impl_runtime_apis.rs | 319 ++++---- primitives/api/proc-macro/src/lib.rs | 1 + .../proc-macro/src/mock_impl_runtime_apis.rs | 134 +++- primitives/api/proc-macro/src/utils.rs | 43 +- primitives/api/src/lib.rs | 82 +- primitives/api/test/tests/decl_and_impl.rs | 39 +- primitives/api/test/tests/runtime_calls.rs | 24 +- primitives/api/test/tests/trybuild.rs | 1 + .../ui/impl_incorrect_method_signature.stderr | 43 -- .../api/test/tests/ui/impl_missing_version.rs | 40 + .../test/tests/ui/impl_missing_version.stderr | 14 + ...pi_version.rs => invalid_api_version_1.rs} | 0 ...on.stderr => invalid_api_version_1.stderr} | 4 +- .../tests/ui/invalid_api_version_2.stderr | 4 +- .../tests/ui/invalid_api_version_3.stderr | 4 +- .../test/tests/ui/invalid_api_version_4.rs | 8 + .../tests/ui/invalid_api_version_4.stderr | 5 + .../ui/method_ver_lower_than_trait_ver.rs | 9 + .../ui/method_ver_lower_than_trait_ver.stderr | 11 + .../test/tests/ui/missing_versioned_method.rs | 39 + .../tests/ui/missing_versioned_method.stderr | 8 + .../missing_versioned_method_multiple_vers.rs | 42 ++ ...sing_versioned_method_multiple_vers.stderr | 8 + .../ui/mock_advanced_block_id_by_value.rs | 2 +- .../ui/mock_advanced_block_id_by_value.stderr | 4 +- .../tests/ui/mock_advanced_missing_blockid.rs | 2 +- .../ui/mock_advanced_missing_blockid.stderr | 4 +- .../tests/ui/mock_only_self_reference.stderr | 74 +- .../tests/ui/positive_cases/default_impls.rs | 41 + ...reference_in_impl_runtime_apis_call.stderr | 43 -- 32 files changed, 937 insertions(+), 855 deletions(-) create mode 100644 primitives/api/proc-macro/src/common.rs create mode 100644 primitives/api/test/tests/ui/impl_missing_version.rs create mode 100644 primitives/api/test/tests/ui/impl_missing_version.stderr rename primitives/api/test/tests/ui/{invalid_api_version.rs => invalid_api_version_1.rs} (100%) rename primitives/api/test/tests/ui/{invalid_api_version.stderr => invalid_api_version_1.stderr} (65%) create mode 100644 primitives/api/test/tests/ui/invalid_api_version_4.rs create mode 100644 primitives/api/test/tests/ui/invalid_api_version_4.stderr create mode 100644 primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs create mode 100644 primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr create mode 100644 primitives/api/test/tests/ui/missing_versioned_method.rs create mode 100644 primitives/api/test/tests/ui/missing_versioned_method.stderr create mode 100644 primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs create mode 100644 primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr create mode 100644 primitives/api/test/tests/ui/positive_cases/default_impls.rs diff --git a/primitives/api/proc-macro/src/common.rs b/primitives/api/proc-macro/src/common.rs new file mode 100644 index 0000000000000..10887be613278 --- /dev/null +++ b/primitives/api/proc-macro/src/common.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2024 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// The ident used for the block generic parameter. +pub const BLOCK_GENERIC_IDENT: &str = "Block"; + +/// Unique identifier used to make the hidden includes unique for this macro. +pub const HIDDEN_INCLUDES_ID: &str = "DECL_RUNTIME_APIS"; + +/// The `core_trait` attribute. +pub const CORE_TRAIT_ATTRIBUTE: &str = "core_trait"; +/// The `api_version` attribute. +/// +/// Is used to set the current version of the trait. +pub const API_VERSION_ATTRIBUTE: &str = "api_version"; +/// The `changed_in` attribute. +/// +/// Is used when the function signature changed between different versions of a trait. +/// This attribute should be placed on the old signature of the function. +pub const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; +/// The `renamed` attribute. +/// +/// Is used when a trait method was renamed. +pub const RENAMED_ATTRIBUTE: &str = "renamed"; +/// All attributes that we support in the declaration of a runtime api trait. +pub const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = + &[CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE]; diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index b031c0f8bb1cc..aac4491720c34 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -16,11 +16,15 @@ // limitations under the License. use crate::utils::{ - extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, - generate_call_api_at_fn_name, generate_crate_access, generate_hidden_includes, - generate_method_runtime_api_impl_name, generate_native_call_generator_fn_name, - generate_runtime_mod_name_for_trait, prefix_function_with_trait, - replace_wild_card_parameter_names, return_type_extract_type, AllowSelfRefInParameters, + extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, generate_crate_access, + generate_hidden_includes, generate_runtime_mod_name_for_trait, parse_runtime_api_version, + prefix_function_with_trait, replace_wild_card_parameter_names, return_type_extract_type, + versioned_trait_name, AllowSelfRefInParameters, +}; + +use crate::common::{ + API_VERSION_ATTRIBUTE, BLOCK_GENERIC_IDENT, CHANGED_IN_ATTRIBUTE, CORE_TRAIT_ATTRIBUTE, + HIDDEN_INCLUDES_ID, RENAMED_ATTRIBUTE, SUPPORTED_ATTRIBUTE_NAMES, }; use proc_macro2::{Span, TokenStream}; @@ -33,36 +37,11 @@ use syn::{ parse_macro_input, parse_quote, spanned::Spanned, visit::{self, Visit}, - Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, ReturnType, - TraitBound, TraitItem, TraitItemMethod, Type, + Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, TraitBound, + TraitItem, TraitItemMethod, }; -use std::collections::HashMap; - -/// The ident used for the block generic parameter. -const BLOCK_GENERIC_IDENT: &str = "Block"; - -/// Unique identifier used to make the hidden includes unique for this macro. -const HIDDEN_INCLUDES_ID: &str = "DECL_RUNTIME_APIS"; - -/// The `core_trait` attribute. -const CORE_TRAIT_ATTRIBUTE: &str = "core_trait"; -/// The `api_version` attribute. -/// -/// Is used to set the current version of the trait. -const API_VERSION_ATTRIBUTE: &str = "api_version"; -/// The `changed_in` attribute. -/// -/// Is used when the function signature changed between different versions of a trait. -/// This attribute should be placed on the old signature of the function. -const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; -/// The `renamed` attribute. -/// -/// Is used when a trait method was renamed. -const RENAMED_ATTRIBUTE: &str = "renamed"; -/// All attributes that we support in the declaration of a runtime api trait. -const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = - &[CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE]; +use std::collections::{BTreeMap, HashMap}; /// The structure used for parsing the runtime api declarations. struct RuntimeApiDecls { @@ -119,20 +98,6 @@ impl<'ast> Visit<'ast> for IsUsingBlock { } } -/// Visits the ast and checks if `Block` ident is used somewhere. -fn type_is_using_block(ty: &Type) -> bool { - let mut visitor = IsUsingBlock { result: false }; - visitor.visit_type(ty); - visitor.result -} - -/// Visits the ast and checks if `Block` ident is used somewhere. -fn return_type_is_using_block(ty: &ReturnType) -> bool { - let mut visitor = IsUsingBlock { result: false }; - visitor.visit_return_type(ty); - visitor.result -} - /// Replace all occurrences of `Block` with `NodeBlock` struct ReplaceBlockWithNodeBlock {} @@ -146,146 +111,69 @@ impl Fold for ReplaceBlockWithNodeBlock { } } -/// Replace all occurrences of `Block` with `NodeBlock` -fn fn_arg_replace_block_with_node_block(fn_arg: FnArg) -> FnArg { - let mut replace = ReplaceBlockWithNodeBlock {}; - fold::fold_fn_arg(&mut replace, fn_arg) -} - -/// Replace all occurrences of `Block` with `NodeBlock` -fn return_type_replace_block_with_node_block(return_type: ReturnType) -> ReturnType { - let mut replace = ReplaceBlockWithNodeBlock {}; - fold::fold_return_type(&mut replace, return_type) -} - -/// Generate the functions that generate the native call closure for each trait method. -fn generate_native_call_generators(decl: &ItemTrait) -> Result { - let fns = decl.items.iter().filter_map(|i| match i { - TraitItem::Method(ref m) => Some(&m.sig), - _ => None, - }); - - let mut result = Vec::new(); - let trait_ = &decl.ident; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Auxiliary function that is used to convert between types that use different block types. - // The function expects that both are convertible by encoding the one and decoding the other. - result.push(quote!( - #[cfg(any(feature = "std", test))] - fn convert_between_block_types - #crate_::ApiError>( - input: &I, - map_error: F, - ) -> std::result::Result - { - ::decode_with_depth_limit( - #crate_::MAX_EXTRINSIC_DEPTH, - &mut &#crate_::Encode::encode(input)[..], - ).map_err(map_error) +/// Versioned API traits are used to catch missing methods when implementing a specific version of a +/// versioned API. They contain all non-versioned methods (aka stable methods) from the main trait +/// and all versioned methods for the specific version. This means that there is one trait for each +/// version mentioned in the trait definition. For example: +/// ```ignore +/// // The trait version implicitly is 1 +/// decl_runtime_apis!( +/// trait SomeApi { +/// fn method1(); // this is a 'stable method' +/// +/// #[api_version(2)] +/// fn method2(); +/// +/// #[api_version(2)] +/// fn method3(); +/// +/// #[api_version(3)] +/// fn method4(); +/// } +/// ); +/// ``` +/// This trait has got three different versions. The function below will generate the following +/// code: +/// ``` +/// trait SomeApiV1 { +/// // in V1 only the stable methods are required. The rest has got default implementations. +/// fn method1(); +/// } +/// +/// trait SomeApiV2 { +/// // V2 contains all methods from V1 and V2. V3 not required so they are skipped. +/// fn method1(); +/// fn method2(); +/// fn method3(); +/// } +/// +/// trait SomeApiV3 { +/// // And V3 contains all methods from the trait. +/// fn method1(); +/// fn method2(); +/// fn method3(); +/// fn method4(); +/// } +/// ``` +fn generate_versioned_api_traits( + api: ItemTrait, + methods: BTreeMap>, +) -> Vec { + let mut result = Vec::::new(); + for (version, _) in &methods { + let mut versioned_trait = api.clone(); + versioned_trait.ident = versioned_trait_name(&versioned_trait.ident, *version); + versioned_trait.items = Vec::new(); + // Add the methods from the current version and all previous one. Versions are sorted so + // it's safe to stop early. + for (_, m) in methods.iter().take_while(|(v, _)| v <= &version) { + versioned_trait.items.extend(m.iter().cloned().map(|m| TraitItem::Method(m))); } - )); - - // Generate a native call generator for each function of the given trait. - for fn_ in fns { - let params = extract_parameter_names_types_and_borrows(fn_, AllowSelfRefInParameters::No)?; - let trait_fn_name = &fn_.ident; - let function_name_str = fn_.ident.to_string(); - let fn_name = generate_native_call_generator_fn_name(&fn_.ident); - let output = return_type_replace_block_with_node_block(fn_.output.clone()); - let output_ty = return_type_extract_type(&output); - let output = quote!( std::result::Result<#output_ty, #crate_::ApiError> ); - - // Every type that is using the `Block` generic parameter, we need to encode/decode, - // to make it compatible between the runtime/node. - let conversions = params.iter().filter(|v| type_is_using_block(&v.1)).map(|(n, t, _)| { - let param_name = quote!(#n).to_string(); - - quote!( - let #n: #t = convert_between_block_types( - &#n, - |e| #crate_::ApiError::FailedToConvertParameter { - function: #function_name_str, - parameter: #param_name, - error: e, - }, - )?; - ) - }); - // Same as for the input types, we need to check if we also need to convert the output, - // before returning it. - let output_conversion = if return_type_is_using_block(&fn_.output) { - quote!( - convert_between_block_types( - &res, - |e| #crate_::ApiError::FailedToConvertReturnValue { - function: #function_name_str, - error: e, - }, - ) - ) - } else { - quote!(Ok(res)) - }; - - let input_names = params.iter().map(|v| &v.0); - // If the type is using the block generic type, we will encode/decode it to make it - // compatible. To ensure that we forward it by ref/value, we use the value given by the - // the user. Otherwise if it is not using the block, we don't need to add anything. - let input_borrows = - params.iter().map(|v| if type_is_using_block(&v.1) { v.2 } else { None }); - - // Replace all `Block` with `NodeBlock`, add `'a` lifetime to references and collect - // all the function inputs. - let fn_inputs = fn_ - .inputs - .iter() - .map(|v| fn_arg_replace_block_with_node_block(v.clone())) - .map(|v| match v { - FnArg::Typed(ref arg) => { - let mut arg = arg.clone(); - if let Type::Reference(ref mut r) = *arg.ty { - r.lifetime = Some(parse_quote!( 'a )); - } - FnArg::Typed(arg) - }, - r => r, - }); - - let (impl_generics, ty_generics, where_clause) = decl.generics.split_for_impl(); - // We need to parse them again, to get an easy access to the actual parameters. - let impl_generics: Generics = parse_quote!( #impl_generics ); - let impl_generics_params = impl_generics.params.iter().map(|p| { - match p { - GenericParam::Type(ref ty) => { - let mut ty = ty.clone(); - ty.bounds.push(parse_quote!( 'a )); - GenericParam::Type(ty) - }, - // We should not see anything different than type params here. - r => r.clone(), - } - }); - // Generate the generator function - result.push(quote!( - #[cfg(any(feature = "std", test))] - pub fn #fn_name< - 'a, ApiImpl: #trait_ #ty_generics, NodeBlock: #crate_::BlockT - #(, #impl_generics_params)* - >( - #( #fn_inputs ),* - ) -> impl FnOnce() -> #output + 'a #where_clause { - move || { - #( #conversions )* - let res = ApiImpl::#trait_fn_name(#( #input_borrows #input_names ),*); - #output_conversion - } - } - )); + result.push(versioned_trait); } - Ok(quote!( #( #result )* )) + result } /// Try to parse the given `Attribute` as `renamed` attribute. @@ -323,126 +211,13 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { } } -/// Generate the functions that call the api at a given block for a given trait method. -fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { - let fns = decl.items.iter().filter_map(|i| match i { - TraitItem::Method(ref m) => Some((&m.attrs, &m.sig)), - _ => None, - }); - - let mut result = Vec::new(); - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Generate a native call generator for each function of the given trait. - for (attrs, fn_) in fns { - let trait_name = &decl.ident; - let trait_fn_name = prefix_function_with_trait(trait_name, &fn_.ident); - let fn_name = generate_call_api_at_fn_name(&fn_.ident); - - let attrs = remove_supported_attributes(&mut attrs.clone()); - - if attrs.contains_key(RENAMED_ATTRIBUTE) && attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - return Err(Error::new( - fn_.span(), - format!( - "`{}` and `{}` are not supported at once.", - RENAMED_ATTRIBUTE, CHANGED_IN_ATTRIBUTE - ), - )) - } - - // We do not need to generate this function for a method that signature was changed. - if attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - continue - } - - // Parse the renamed attributes. - let mut renames = Vec::new(); - if let Some((_, a)) = attrs.iter().find(|a| a.0 == &RENAMED_ATTRIBUTE) { - let (old_name, version) = parse_renamed_attribute(a)?; - renames.push((version, prefix_function_with_trait(trait_name, &old_name))); - } - - renames.sort_by(|l, r| r.cmp(l)); - let (versions, old_names) = renames.into_iter().fold( - (Vec::new(), Vec::new()), - |(mut versions, mut old_names), (version, old_name)| { - versions.push(version); - old_names.push(old_name); - (versions, old_names) - }, - ); - - // Generate the generator function - result.push(quote!( - #[cfg(any(feature = "std", test))] - #[allow(clippy::too_many_arguments)] - pub fn #fn_name< - R: #crate_::Encode + #crate_::Decode + std::cmp::PartialEq, - NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, - Block: #crate_::BlockT, - T: #crate_::CallApiAt, - >( - call_runtime_at: &T, - at: &#crate_::BlockId, - args: std::vec::Vec, - changes: &std::cell::RefCell<#crate_::OverlayedChanges>, - storage_transaction_cache: &std::cell::RefCell< - #crate_::StorageTransactionCache - >, - native_call: std::option::Option, - context: #crate_::ExecutionContext, - recorder: &std::option::Option<#crate_::ProofRecorder>, - ) -> std::result::Result<#crate_::NativeOrEncoded, #crate_::ApiError> { - let version = call_runtime_at.runtime_version_at(at)?; - - #( - // Check if we need to call the function by an old name. - if version.apis.iter().any(|(s, v)| { - s == &ID && *v < #versions - }) { - let params = #crate_::CallApiAtParams::<_, fn() -> _, _> { - at, - function: #old_names, - native_call: None, - arguments: args, - overlayed_changes: changes, - storage_transaction_cache, - context, - recorder, - }; - - let ret = #crate_::CallApiAt::::call_api_at(call_runtime_at, params)?; - - return Ok(ret) - } - )* - - let params = #crate_::CallApiAtParams { - at, - function: #trait_fn_name, - native_call, - arguments: args, - overlayed_changes: changes, - storage_transaction_cache, - context, - recorder, - }; - - #crate_::CallApiAt::::call_api_at(call_runtime_at, params) - } - )); - } - - Ok(quote!( #( #result )* )) -} - /// Generate the declaration of the trait for the runtime. fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { let mut result = Vec::new(); for decl in decls { let mut decl = decl.clone(); + let decl_span = decl.span(); extend_generics_with_block(&mut decl.generics); let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); let found_attributes = remove_supported_attributes(&mut decl.attrs); @@ -450,30 +225,73 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32))?; let id = generate_runtime_api_id(&decl.ident.to_string()); - let call_api_at_calls = generate_call_api_at_calls(&decl)?; - - // Remove methods that have the `changed_in` attribute as they are not required for the - // runtime anymore. - decl.items = decl - .items - .iter_mut() - .filter_map(|i| match i { - TraitItem::Method(ref mut method) => { - if remove_supported_attributes(&mut method.attrs) - .contains_key(CHANGED_IN_ATTRIBUTE) - { - None - } else { - // Make sure we replace all the wild card parameter names. - replace_wild_card_parameter_names(&mut method.sig); - Some(TraitItem::Method(method.clone())) - } - }, - r => Some(r.clone()), - }) - .collect(); + let trait_api_version = get_api_version(&found_attributes)?; + + let mut methods_by_version: BTreeMap> = BTreeMap::new(); + + // Process the items in the declaration. The filter_map function below does a lot of stuff + // because the method attributes are stripped at this point + decl.items.iter_mut().for_each(|i| match i { + TraitItem::Method(ref mut method) => { + let method_attrs = remove_supported_attributes(&mut method.attrs); + let mut method_version = trait_api_version; + // validate the api version for the method (if any) and generate default + // implementation for versioned methods + if let Some(version_attribute) = method_attrs.get(API_VERSION_ATTRIBUTE) { + method_version = match parse_runtime_api_version(version_attribute) { + Ok(method_api_ver) if method_api_ver < trait_api_version => { + let method_ver = method_api_ver.to_string(); + let trait_ver = trait_api_version.to_string(); + let mut err1 = Error::new( + version_attribute.span(), + format!( + "Method version `{}` is older than (or equal to) trait version `{}`.\ + Methods can't define versions older than the trait version.", + method_ver, + trait_ver, + ), + ); + + let err2 = match found_attributes.get(&API_VERSION_ATTRIBUTE) { + Some(attr) => Error::new(attr.span(), "Trait version is set here."), + None => Error::new( + decl_span, + "Trait version is not set so it is implicitly equal to 1.", + ), + }; + err1.combine(err2); + result.push(err1.to_compile_error()); + + trait_api_version + }, + Ok(method_api_ver) => method_api_ver, + Err(e) => { + result.push(e.to_compile_error()); + trait_api_version + }, + }; + } - let native_call_generators = generate_native_call_generators(&decl)?; + // Any method with the `changed_in` attribute isn't required for the runtime + // anymore. + if !method_attrs.contains_key(CHANGED_IN_ATTRIBUTE) { + // Make sure we replace all the wild card parameter names. + replace_wild_card_parameter_names(&mut method.sig); + + // partition methods by api version + methods_by_version.entry(method_version).or_default().push(method.clone()); + } + }, + _ => (), + }); + + let versioned_api_traits = generate_versioned_api_traits(decl.clone(), methods_by_version); + + let main_api_ident = decl.ident.clone(); + let versioned_ident = &versioned_api_traits + .first() + .expect("There should always be at least one version.") + .ident; result.push(quote!( #[doc(hidden)] @@ -482,15 +300,13 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { pub mod #mod_name { use super::*; - #decl + #( #versioned_api_traits )* + + pub use #versioned_ident as #main_api_ident; pub #api_version pub #id - - #native_call_generators - - #call_api_at_calls } )); } @@ -509,18 +325,45 @@ struct ToClientSideDecl<'a> { } impl<'a> ToClientSideDecl<'a> { - fn fold_item_trait_items(&mut self, items: Vec) -> Vec { + /// Process the given [`ItemTrait`]. + fn process(mut self, decl: ItemTrait) -> ItemTrait { + let mut decl = self.fold_item_trait(decl); + + let block_id = self.block_id; + let crate_ = self.crate_; + + // Add the special method that will be implemented by the `impl_runtime_apis!` macro + // to enable functions to call into the runtime. + decl.items.push(parse_quote! { + /// !!INTERNAL USE ONLY!! + #[doc(hidden)] + fn __runtime_api_internal_call_api_at( + &self, + at: &#block_id, + context: #crate_::ExecutionContext, + params: std::vec::Vec, + fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError>; + }); + + decl + } +} + +impl<'a> ToClientSideDecl<'a> { + fn fold_item_trait_items( + &mut self, + items: Vec, + trait_generics_num: usize, + ) -> Vec { let mut result = Vec::new(); items.into_iter().for_each(|i| match i { TraitItem::Method(method) => { - let (fn_decl, fn_impl, fn_decl_ctx) = self.fold_trait_item_method(method); + let (fn_decl, fn_decl_ctx) = + self.fold_trait_item_method(method, trait_generics_num); result.push(fn_decl.into()); result.push(fn_decl_ctx.into()); - - if let Some(fn_impl) = fn_impl { - result.push(fn_impl.into()); - } }, r => result.push(r), }); @@ -531,20 +374,24 @@ impl<'a> ToClientSideDecl<'a> { fn fold_trait_item_method( &mut self, method: TraitItemMethod, - ) -> (TraitItemMethod, Option, TraitItemMethod) { + trait_generics_num: usize, + ) -> (TraitItemMethod, TraitItemMethod) { let crate_ = self.crate_; let context = quote!( #crate_::ExecutionContext::OffchainCall(None) ); - let fn_impl = self.create_method_runtime_api_impl(method.clone()); - let fn_decl = self.create_method_decl(method.clone(), context); - let fn_decl_ctx = self.create_method_decl_with_context(method); + let fn_decl = self.create_method_decl(method.clone(), context, trait_generics_num); + let fn_decl_ctx = self.create_method_decl_with_context(method, trait_generics_num); - (fn_decl, fn_impl, fn_decl_ctx) + (fn_decl, fn_decl_ctx) } - fn create_method_decl_with_context(&mut self, method: TraitItemMethod) -> TraitItemMethod { + fn create_method_decl_with_context( + &mut self, + method: TraitItemMethod, + trait_generics_num: usize, + ) -> TraitItemMethod { let crate_ = self.crate_; let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext ); - let mut fn_decl_ctx = self.create_method_decl(method, quote!(context)); + let mut fn_decl_ctx = self.create_method_decl(method, quote!(context), trait_generics_num); fn_decl_ctx.sig.ident = Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); fn_decl_ctx.sig.inputs.insert(2, context_arg); @@ -552,52 +399,6 @@ impl<'a> ToClientSideDecl<'a> { fn_decl_ctx } - /// Takes the given method and creates a `method_runtime_api_impl` method that will be - /// implemented in the runtime for the client side. - fn create_method_runtime_api_impl( - &mut self, - mut method: TraitItemMethod, - ) -> Option { - if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - return None - } - - let fn_sig = &method.sig; - let ret_type = return_type_extract_type(&fn_sig.output); - - // Get types and if the value is borrowed from all parameters. - // If there is an error, we push it as the block to the user. - let param_types = - match extract_parameter_names_types_and_borrows(fn_sig, AllowSelfRefInParameters::No) { - Ok(res) => res - .into_iter() - .map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }) - .collect::>(), - Err(e) => { - self.errors.push(e.to_compile_error()); - Vec::new() - }, - }; - let name = generate_method_runtime_api_impl_name(self.trait_, &method.sig.ident); - let block_id = self.block_id; - let crate_ = self.crate_; - - Some(parse_quote! { - #[doc(hidden)] - fn #name( - &self, - at: &#block_id, - context: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError>; - }) - } - /// Takes the method declared by the user and creates the declaration we require for the runtime /// api client side. This method will call by default the `method_runtime_api_impl` for doing /// the actual call into the runtime. @@ -605,6 +406,7 @@ impl<'a> ToClientSideDecl<'a> { &mut self, mut method: TraitItemMethod, context: TokenStream, + trait_generics_num: usize, ) -> TraitItemMethod { let params = match extract_parameter_names_types_and_borrows( &method.sig, @@ -616,18 +418,42 @@ impl<'a> ToClientSideDecl<'a> { Vec::new() }, }; - let params2 = params.clone(); let ret_type = return_type_extract_type(&method.sig.output); fold_fn_decl_for_client_side(&mut method.sig, self.block_id, self.crate_); - let name_impl = generate_method_runtime_api_impl_name(self.trait_, &method.sig.ident); let crate_ = self.crate_; let found_attributes = remove_supported_attributes(&mut method.attrs); + + // Parse the renamed attributes. + let mut renames = Vec::new(); + for (_, a) in found_attributes.iter().filter(|a| a.0 == &RENAMED_ATTRIBUTE) { + match parse_renamed_attribute(a) { + Ok((old_name, version)) => { + renames.push((version, prefix_function_with_trait(&self.trait_, &old_name))); + }, + Err(e) => self.errors.push(e.to_compile_error()), + } + } + + renames.sort_by(|l, r| r.cmp(l)); + let (versions, old_names) = renames.into_iter().fold( + (Vec::new(), Vec::new()), + |(mut versions, mut old_names), (version, old_name)| { + versions.push(version); + old_names.push(old_name); + (versions, old_names) + }, + ); + + // Generate the function name before we may rename it below to + // `function_name_before_version_{}`. + let function_name = prefix_function_with_trait(&self.trait_, &method.sig.ident); + // If the method has a `changed_in` attribute, we need to alter the method name to // `method_before_version_VERSION`. - let (native_handling, param_tuple) = match get_changed_in(&found_attributes) { + match get_changed_in(&found_attributes) { Ok(Some(version)) => { // Make sure that the `changed_in` version is at least the current `api_version`. if get_api_version(self.found_attributes).ok() < Some(version) { @@ -646,47 +472,51 @@ impl<'a> ToClientSideDecl<'a> { ); method.sig.ident = ident; method.attrs.push(parse_quote!( #[deprecated] )); - - let panic = - format!("Calling `{}` should not return a native value!", method.sig.ident); - (quote!(panic!(#panic)), quote!(None)) }, - Ok(None) => (quote!(Ok(n)), quote!( Some(( #( #params2 ),* )) )), + Ok(None) => {}, Err(e) => { self.errors.push(e.to_compile_error()); - (quote!(unimplemented!()), quote!(None)) }, }; - let function_name = method.sig.ident.to_string(); + // The module where the runtime relevant stuff is declared. + let trait_name = &self.trait_; + let runtime_mod = generate_runtime_mod_name_for_trait(trait_name); + let underscores = (0..trait_generics_num).map(|_| quote!(_)); // Generate the default implementation that calls the `method_runtime_api_impl` method. method.default = Some(parse_quote! { { - let runtime_api_impl_params_encoded = + let __runtime_api_impl_params_encoded__ = #crate_::Encode::encode(&( #( &#params ),* )); - self.#name_impl( + >::__runtime_api_internal_call_api_at( + self, __runtime_api_at_param__, #context, - #param_tuple, - runtime_api_impl_params_encoded, - ).and_then(|r| - match r { - #crate_::NativeOrEncoded::Native(n) => { - #native_handling - }, - #crate_::NativeOrEncoded::Encoded(r) => { - std::result::Result::map_err( - <#ret_type as #crate_::Decode>::decode(&mut &r[..]), - |err| #crate_::ApiError::FailedToDecodeReturnValue { - function: #function_name, - error: err, - } - ) - } + __runtime_api_impl_params_encoded__, + &|version| { + #( + // Check if we need to call the function by an old name. + if version.apis.iter().any(|(s, v)| { + s == &#runtime_mod::ID && *v < #versions + }) { + return #old_names + } + )* + + #function_name } ) + .and_then(|r| + std::result::Result::map_err( + <#ret_type as #crate_::Decode>::decode(&mut &r[..]), + |err| #crate_::ApiError::FailedToDecodeReturnValue { + function: #function_name, + error: err, + } + ) + ) } }); @@ -714,37 +544,12 @@ impl<'a> Fold for ToClientSideDecl<'a> { // The client side trait is only required when compiling with the feature `std` or `test`. input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); - input.items = self.fold_item_trait_items(input.items); + input.items = self.fold_item_trait_items(input.items, input.generics.params.len()); fold::fold_item_trait(self, input) } } -/// Parse the given attribute as `API_VERSION_ATTRIBUTE`. -fn parse_runtime_api_version(version: &Attribute) -> Result { - let meta = version.parse_meta()?; - - let err = Err(Error::new( - meta.span(), - &format!( - "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", - api_version = API_VERSION_ATTRIBUTE - ), - )); - - match meta { - Meta::List(list) => - if list.nested.len() != 1 { - err - } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { - i.base10_parse() - } else { - err - }, - _ => err, - } -} - /// Generates the identifier as const variable for the given `trait_name` /// by hashing the `trait_name`. fn generate_runtime_api_id(trait_name: &str) -> TokenStream { @@ -821,16 +626,14 @@ fn generate_client_side_decls(decls: &[ItemTrait]) -> Result { let mut errors = Vec::new(); let trait_ = decl.ident.clone(); - let decl = { - let mut to_client_side = ToClientSideDecl { - crate_: &crate_, - block_id: &block_id, - found_attributes: &mut found_attributes, - errors: &mut errors, - trait_: &trait_, - }; - to_client_side.fold_item_trait(decl) - }; + let decl = ToClientSideDecl { + crate_: &crate_, + block_id: &block_id, + found_attributes: &mut found_attributes, + errors: &mut errors, + trait_: &trait_, + } + .process(decl); let api_version = get_api_version(&found_attributes); diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 02ef37370ffeb..0087a1ad37ab9 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -17,13 +17,13 @@ use crate::utils::{ extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, - extract_parameter_names_types_and_borrows, generate_call_api_at_fn_name, generate_crate_access, - generate_hidden_includes, generate_method_runtime_api_impl_name, - generate_native_call_generator_fn_name, generate_runtime_mod_name_for_trait, - prefix_function_with_trait, return_type_extract_type, AllowSelfRefInParameters, - RequireQualifiedTraitPath, + extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, + generate_runtime_mod_name_for_trait, parse_runtime_api_version, prefix_function_with_trait, + versioned_trait_name, AllowSelfRefInParameters, RequireQualifiedTraitPath, }; +use crate::common::API_VERSION_ATTRIBUTE; + use proc_macro2::{Span, TokenStream}; use quote::quote; @@ -33,8 +33,7 @@ use syn::{ parse::{Error, Parse, ParseStream, Result}, parse_macro_input, parse_quote, spanned::Spanned, - Attribute, GenericArgument, Ident, ImplItem, ItemImpl, Path, PathArguments, Signature, Type, - TypePath, + Attribute, Ident, ImplItem, ItemImpl, Path, Signature, Type, TypePath, }; use std::collections::HashSet; @@ -105,8 +104,10 @@ fn generate_impl_calls( let mut impl_calls = Vec::new(); for impl_ in impls { + let trait_api_ver = extract_api_version(&impl_.attrs, impl_.span())?; let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; let impl_trait = extend_with_runtime_decl_path(impl_trait_path.clone()); + let impl_trait = extend_with_api_version(impl_trait, trait_api_ver); let impl_trait_ident = &impl_trait_path .segments .last() @@ -326,35 +327,6 @@ fn generate_runtime_api_base_structures() -> Result { #[cfg(any(feature = "std", test))] impl> RuntimeApiImpl { - fn call_api_at< - R: #crate_::Encode + #crate_::Decode + std::cmp::PartialEq, - F: FnOnce( - &C, - &std::cell::RefCell<#crate_::OverlayedChanges>, - &std::cell::RefCell<#crate_::StorageTransactionCache>, - &std::option::Option<#crate_::ProofRecorder>, - ) -> std::result::Result<#crate_::NativeOrEncoded, E>, - E, - >( - &self, - call_api_at: F, - ) -> std::result::Result<#crate_::NativeOrEncoded, E> { - if *std::cell::RefCell::borrow(&self.commit_on_success) { - #crate_::OverlayedChanges::start_transaction( - &mut std::cell::RefCell::borrow_mut(&self.changes) - ); - } - let res = call_api_at( - &self.call, - &self.changes, - &self.storage_transaction_cache, - &self.recorder, - ); - - self.commit_or_rollback(std::result::Result::is_ok(&res)); - res - } - fn commit_or_rollback(&self, commit: bool) { let proof = "\ We only close a transaction when we opened one ourself. @@ -398,6 +370,24 @@ fn extend_with_runtime_decl_path(mut trait_: Path) -> Path { trait_ } +fn extend_with_api_version(mut trait_: Path, version: Option) -> Path { + let version = if let Some(v) = version { + v + } else { + // nothing to do + return trait_ + }; + + let trait_name = &mut trait_ + .segments + .last_mut() + .expect("Trait path should always contain at least one item; qed") + .ident; + *trait_name = versioned_trait_name(trait_name, version); + + trait_ +} + /// Generates the implementations of the apis for the runtime. fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { let mut impls_prepared = Vec::new(); @@ -405,9 +395,12 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { // We put `runtime` before each trait to get the trait that is intended for the runtime and // we put the `RuntimeBlock` as first argument for the trait generics. for impl_ in impls.iter() { + let trait_api_ver = extract_api_version(&impl_.attrs, impl_.span())?; + let mut impl_ = impl_.clone(); let trait_ = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(); let trait_ = extend_with_runtime_decl_path(trait_); + let trait_ = extend_with_api_version(trait_, trait_api_ver); impl_.trait_.as_mut().unwrap().1 = trait_; impl_.attrs = filter_cfg_attrs(&impl_.attrs); @@ -424,121 +417,70 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { /// with code that calls into the runtime. struct ApiRuntimeImplToApiRuntimeApiImpl<'a> { runtime_block: &'a TypePath, - runtime_mod_path: &'a Path, - runtime_type: &'a Type, - trait_generic_arguments: &'a [GenericArgument], - impl_trait: &'a Ident, } -impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { - fn fold_type_path(&mut self, input: TypePath) -> TypePath { - let new_ty_path = - if input == *self.runtime_block { parse_quote!(__SR_API_BLOCK__) } else { input }; - - fold::fold_type_path(self, new_ty_path) - } +impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { + /// Process the given item implementation. + fn process(mut self, input: ItemImpl) -> ItemImpl { + let mut input = self.fold_item_impl(input); - fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { - let block = { - let runtime_mod_path = self.runtime_mod_path; - let runtime = self.runtime_type; - let native_call_generator_ident = - generate_native_call_generator_fn_name(&input.sig.ident); - let call_api_at_call = generate_call_api_at_fn_name(&input.sig.ident); - let trait_generic_arguments = self.trait_generic_arguments; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Generate the access to the native parameters - let param_tuple_access = if input.sig.inputs.len() == 1 { - vec![quote!(p)] - } else { - input - .sig - .inputs - .iter() - .enumerate() - .map(|(i, _)| { - let i = syn::Index::from(i); - quote!( p.#i ) - }) - .collect::>() - }; - - let (param_types, error) = match extract_parameter_names_types_and_borrows( - &input.sig, - AllowSelfRefInParameters::No, - ) { - Ok(res) => ( - res.into_iter() - .map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }) - .collect::>(), - None, - ), - Err(e) => (Vec::new(), Some(e.to_compile_error())), - }; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - // Rewrite the input parameters. - input.sig.inputs = parse_quote! { + // Delete all functions, because all of them are default implemented by + // `decl_runtime_apis!`. We only need to implement the `__runtime_api_internal_call_api_at` + // function. + input.items.clear(); + input.items.push(parse_quote! { + fn __runtime_api_internal_call_api_at( &self, at: &#crate_::BlockId<__SR_API_BLOCK__>, context: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - }; - - input.sig.ident = - generate_method_runtime_api_impl_name(self.impl_trait, &input.sig.ident); - let ret_type = return_type_extract_type(&input.sig.output); - - // Generate the correct return type. - input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> - ); - - // Generate the new method implementation that calls into the runtime. - parse_quote!( - { - // Get the error to the user (if we have one). - #error - - self.call_api_at( - | - call_runtime_at, - changes, - storage_transaction_cache, - recorder - | { - #runtime_mod_path #call_api_at_call( - call_runtime_at, - at, - params_encoded, - changes, - storage_transaction_cache, - params.map(|p| { - #runtime_mod_path #native_call_generator_ident :: - <#runtime, __SR_API_BLOCK__ #(, #trait_generic_arguments )*> ( - #( #param_tuple_access ),* - ) - }), - context, - recorder, - ) - } - ) + params: std::vec::Vec, + fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError> { + if *std::cell::RefCell::borrow(&self.commit_on_success) { + #crate_::OverlayedChanges::start_transaction( + &mut std::cell::RefCell::borrow_mut(&self.changes) + ); } - ) - }; - let mut input = fold::fold_impl_item_method(self, input); - // We need to set the block, after we modified the rest of the ast, otherwise we would - // modify our generated block as well. - input.block = block; + let res = (|| { + let version = #crate_::CallApiAt::<__SR_API_BLOCK__>::runtime_version_at(self.call, at)?; + + let params = #crate_::CallApiAtParams::<_, fn() -> _, _> { + at, + function: (*fn_name)(version), + native_call: None, + arguments: params, + overlayed_changes: &self.changes, + storage_transaction_cache: &self.storage_transaction_cache, + context, + recorder: &self.recorder, + }; + + #crate_::CallApiAt::<__SR_API_BLOCK__>::call_api_at::<#crate_::NeverNativeValue, _>( + self.call, + params, + ) + })(); + + self.commit_or_rollback(std::result::Result::is_ok(&res)); + + res.map(#crate_::NativeOrEncoded::into_encoded) + } + }); + input } +} + +impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { + fn fold_type_path(&mut self, input: TypePath) -> TypePath { + let new_ty_path = + if input == *self.runtime_block { parse_quote!(__SR_API_BLOCK__) } else { input }; + + fold::fold_type_path(self, new_ty_path) + } fn fold_item_impl(&mut self, mut input: ItemImpl) -> ItemImpl { // All this `UnwindSafe` magic below here is required for this rust bug: @@ -594,45 +536,55 @@ fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result for impl_ in impls { let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; - let impl_trait = &impl_trait_path - .segments - .last() - .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? - .clone(); let runtime_block = extract_block_type_from_trait_path(impl_trait_path)?; - let runtime_type = &impl_.self_ty; let mut runtime_mod_path = extend_with_runtime_decl_path(impl_trait_path.clone()); // remove the trait to get just the module path runtime_mod_path.segments.pop(); - let trait_generic_arguments = match impl_trait.arguments { - PathArguments::Parenthesized(_) | PathArguments::None => vec![], - PathArguments::AngleBracketed(ref b) => b.args.iter().cloned().collect(), - }; - - let mut visitor = ApiRuntimeImplToApiRuntimeApiImpl { - runtime_block, - runtime_mod_path: &runtime_mod_path, - runtime_type, - trait_generic_arguments: &trait_generic_arguments, - impl_trait: &impl_trait.ident, - }; + let processed_impl = + ApiRuntimeImplToApiRuntimeApiImpl { runtime_block }.process(impl_.clone()); - result.push(visitor.fold_item_impl(impl_.clone())); + result.push(processed_impl); } Ok(quote!( #( #result )* )) } +fn populate_runtime_api_versions( + result: &mut Vec, + sections: &mut Vec, + attrs: Vec, + id: Path, + version: TokenStream, + crate_access: &TokenStream, +) { + result.push(quote!( + #( #attrs )* + (#id, #version) + )); + + sections.push(quote!( + #( #attrs )* + const _: () = { + // All sections with the same name are going to be merged by concatenation. + #[cfg(not(feature = "std"))] + #[link_section = "runtime_apis"] + static SECTION_CONTENTS: [u8; 12] = #crate_access::serialize_runtime_api_info(#id, #version); + }; + )); +} + /// Generates `RUNTIME_API_VERSIONS` that holds all version information about the implemented /// runtime apis. fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { - let mut result = Vec::with_capacity(impls.len()); - let mut sections = Vec::with_capacity(impls.len()); + let mut result = Vec::::with_capacity(impls.len()); + let mut sections = Vec::::with_capacity(impls.len()); let mut processed_traits = HashSet::new(); let c = generate_crate_access(HIDDEN_INCLUDES_ID); for impl_ in impls { + let api_ver = extract_api_version(&impl_.attrs, impl_.span())?.map(|a| a as u32); + let mut path = extend_with_runtime_decl_path( extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?.clone(), ); @@ -655,23 +607,11 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { } let id: Path = parse_quote!( #path ID ); - let version: Path = parse_quote!( #path VERSION ); + let version = quote!( #path VERSION ); let attrs = filter_cfg_attrs(&impl_.attrs); - result.push(quote!( - #( #attrs )* - (#id, #version) - )); - - sections.push(quote!( - #( #attrs )* - const _: () = { - // All sections with the same name are going to be merged by concatenation. - #[cfg(not(feature = "std"))] - #[link_section = "runtime_apis"] - static SECTION_CONTENTS: [u8; 12] = #c::serialize_runtime_api_info(#id, #version); - }; - )); + let api_ver = api_ver.map(|a| quote!( #a )).unwrap_or_else(|| version); + populate_runtime_api_versions(&mut result, &mut sections, attrs, id, api_ver, &c) } Ok(quote!( @@ -726,6 +666,33 @@ fn filter_cfg_attrs(attrs: &[Attribute]) -> Vec { attrs.iter().filter(|a| a.path.is_ident("cfg")).cloned().collect() } +// Extracts the value of `API_VERSION_ATTRIBUTE` and handles errors. +// Returns: +// - Err if the version is malformed +// - Some(u64) if the version is set +// - None if the version is not set (this is valid). +fn extract_api_version(attrs: &Vec, span: Span) -> Result> { + // First fetch all `API_VERSION_ATTRIBUTE` values (should be only one) + let api_ver = attrs + .iter() + .filter(|a| a.path.is_ident(API_VERSION_ATTRIBUTE)) + .collect::>(); + + if api_ver.len() > 1 { + return Err(Error::new( + span, + format!( + "Found multiple #[{}] attributes for an API implementation. \ + Each runtime API can have only one version.", + API_VERSION_ATTRIBUTE + ), + )) + } + + // Parse the runtime version if there exists one. + api_ver.first().map(|v| parse_runtime_api_version(v)).transpose() +} + #[cfg(test)] mod tests { use super::*; diff --git a/primitives/api/proc-macro/src/lib.rs b/primitives/api/proc-macro/src/lib.rs index 20a2f76f2c83d..31636b8e2d545 100644 --- a/primitives/api/proc-macro/src/lib.rs +++ b/primitives/api/proc-macro/src/lib.rs @@ -21,6 +21,7 @@ use proc_macro::TokenStream; +mod common; mod decl_runtime_apis; mod impl_runtime_apis; mod mock_impl_runtime_apis; diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 6098f8d6bd741..4de0f6b9e571b 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -18,8 +18,7 @@ use crate::utils::{ extract_block_type_from_trait_path, extract_impl_trait, extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, - generate_method_runtime_api_impl_name, return_type_extract_type, AllowSelfRefInParameters, - RequireQualifiedTraitPath, + return_type_extract_type, AllowSelfRefInParameters, RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -31,7 +30,7 @@ use syn::{ parse::{Error, Parse, ParseStream, Result}, parse_macro_input, parse_quote, spanned::Spanned, - Attribute, Ident, ItemImpl, Pat, Type, TypePath, + Attribute, ItemImpl, Pat, Type, TypePath, }; /// Unique identifier used to make the hidden includes unique for this macro. @@ -40,7 +39,7 @@ const HIDDEN_INCLUDES_ID: &str = "MOCK_IMPL_RUNTIME_APIS"; /// The `advanced` attribute. /// /// If this attribute is given to a function, the function gets access to the `BlockId` as first -/// parameter and needs to return a `Result` with the appropiate error type. +/// parameter and needs to return a `Result` with the appropriate error type. const ADVANCED_ATTRIBUTE: &str = "advanced"; /// The structure used for parsing the runtime api implementations. @@ -126,34 +125,63 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result for #self_ty { - fn Core_version_runtime_api_impl( + fn __runtime_api_internal_call_api_at( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: Option<()>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #crate_::ApiError> { - unimplemented!("Not required for testing!") + _: std::vec::Vec, + _: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError> { + unimplemented!("`__runtime_api_internal_call_api_at` not implemented for runtime api mocks") } - fn Core_execute_block_runtime_api_impl( + fn version( + &self, + _: &#crate_::BlockId<#block_type>, + ) -> std::result::Result<#crate_::RuntimeVersion, #crate_::ApiError> { + unimplemented!("`Core::version` not implemented for runtime api mocks") + } + + fn version_with_context( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + ) -> std::result::Result<#crate_::RuntimeVersion, #crate_::ApiError> { + unimplemented!("`Core::version` not implemented for runtime api mocks") + } + + fn execute_block( + &self, + _: &#crate_::BlockId<#block_type>, + _: #block_type, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::execute_block` not implemented for runtime api mocks") + } + + fn execute_block_with_context( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: Option<#block_type>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { - unimplemented!("Not required for testing!") + _: #block_type, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::execute_block` not implemented for runtime api mocks") } - fn Core_initialize_block_runtime_api_impl( + fn initialize_block( + &self, + _: &#crate_::BlockId<#block_type>, + _: &<#block_type as #crate_::BlockT>::Header, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") + } + + fn initialize_block_with_context( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: Option<&<#block_type as #crate_::BlockT>::Header>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { - unimplemented!("Not required for testing!") + _: &<#block_type as #crate_::BlockT>::Header, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") } } )) @@ -216,14 +244,53 @@ fn get_at_param_name( } } -/// Auxialiry structure to fold a runtime api trait implementation into the expected format. +/// Auxiliary structure to fold a runtime api trait implementation into the expected format. /// /// This renames the methods, changes the method parameters and extracts the error type. struct FoldRuntimeApiImpl<'a> { /// The block type that is being used. block_type: &'a TypePath, - /// The identifier of the trait being implemented. - impl_trait: &'a Ident, +} + +impl<'a> FoldRuntimeApiImpl<'a> { + /// Process the given [`syn::ItemImpl`]. + fn process(mut self, impl_item: syn::ItemImpl) -> syn::ItemImpl { + let mut impl_item = self.fold_item_impl(impl_item); + + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // We also need to overwrite all the `_with_context` methods. To do this, + // we clone all methods and add them again with the new name plus one more argument. + impl_item.items.extend(impl_item.items.clone().into_iter().filter_map(|i| { + if let syn::ImplItem::Method(mut m) = i { + m.sig.ident = quote::format_ident!("{}_with_context", m.sig.ident); + m.sig.inputs.insert(2, parse_quote!( _: #crate_::ExecutionContext )); + + Some(m.into()) + } else { + None + } + })); + + let block_type = self.block_type; + + impl_item.items.push(parse_quote! { + fn __runtime_api_internal_call_api_at( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + _: std::vec::Vec, + _: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError> { + unimplemented!( + "`__runtime_api_internal_call_api_at` not implemented for runtime api mocks. \ + Calling deprecated methods is not supported by mocked runtime api." + ) + } + }); + + impl_item + } } impl<'a> Fold for FoldRuntimeApiImpl<'a> { @@ -277,14 +344,9 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { input.sig.inputs = parse_quote! { &self, #at_param_name: #block_id_type, - _: #crate_::ExecutionContext, - ___params___sp___api___: Option<( #( #param_types ),* )>, - _: Vec, + #( #param_names: #param_types ),* }; - input.sig.ident = - generate_method_runtime_api_impl_name(self.impl_trait, &input.sig.ident); - // When using advanced, the user needs to declare the correct return type on its own, // otherwise do it for the user. if !is_advanced { @@ -292,7 +354,7 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { // Generate the correct return type. input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> + -> std::result::Result<#ret_type, #crate_::ApiError> ); } @@ -304,7 +366,7 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { quote! { let __fn_implementation__ = move || #orig_block; - Ok(#crate_::NativeOrEncoded::Native(__fn_implementation__())) + Ok(__fn_implementation__()) } }; @@ -314,9 +376,6 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { // Get the error to the user (if we have one). #( #errors )* - let (#( #param_names ),*) = ___params___sp___api___ - .expect("Mocked runtime apis don't support calling deprecated api versions"); - #construct_return_value } ) @@ -351,11 +410,6 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Result Some(block_type.clone()), }; - let mut visitor = FoldRuntimeApiImpl { block_type, impl_trait: &impl_trait.ident }; - - result.push(visitor.fold_item_impl(impl_.clone())); + result.push(FoldRuntimeApiImpl { block_type }.process(impl_.clone())); } Ok(GeneratedRuntimeApiImpls { diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 97b456b62dfa6..2ccd050cfb151 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -18,16 +18,18 @@ use proc_macro2::{Span, TokenStream}; use syn::{ - parse_quote, spanned::Spanned, token::And, Error, FnArg, GenericArgument, Ident, ImplItem, - ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, + parse_quote, spanned::Spanned, token::And, Attribute, Error, FnArg, GenericArgument, Ident, + ImplItem, ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; -use quote::quote; +use quote::{format_ident, quote}; use std::env; use proc_macro_crate::{crate_name, FoundCrate}; +use crate::common::API_VERSION_ATTRIBUTE; + fn generate_hidden_includes_mod_name(unique_id: &'static str) -> Ident { Ident::new(&format!("sp_api_hidden_includes_{}", unique_id), Span::call_site()) } @@ -68,11 +70,6 @@ pub fn generate_runtime_mod_name_for_trait(trait_: &Ident) -> Ident { Ident::new(&format!("runtime_decl_for_{}", trait_), Span::call_site()) } -/// Generates a name for a method that needs to be implemented in the runtime for the client side. -pub fn generate_method_runtime_api_impl_name(trait_: &Ident, method: &Ident) -> Ident { - Ident::new(&format!("{}_{}_runtime_api_impl", trait_, method), Span::call_site()) -} - /// Get the type of a `syn::ReturnType`. pub fn return_type_extract_type(rt: &ReturnType) -> Type { match rt { @@ -166,16 +163,6 @@ pub fn extract_parameter_names_types_and_borrows( Ok(result) } -/// Generates the name for the native call generator function. -pub fn generate_native_call_generator_fn_name(fn_name: &Ident) -> Ident { - Ident::new(&format!("{}_native_call_generator", fn_name), Span::call_site()) -} - -/// Generates the name for the call api at function. -pub fn generate_call_api_at_fn_name(fn_name: &Ident) -> Ident { - Ident::new(&format!("{}_call_api_at", fn_name), Span::call_site()) -} - /// Prefix the given function with the trait name. pub fn prefix_function_with_trait(trait_: &Ident, function: &F) -> String { format!("{}_{}", trait_, function.to_string()) @@ -267,3 +254,23 @@ pub fn extract_impl_trait(impl_: &ItemImpl, require: RequireQualifiedTraitPath) } }) } + +/// Parse the given attribute as `API_VERSION_ATTRIBUTE`. +pub fn parse_runtime_api_version(version: &Attribute) -> Result { + let version = version.parse_args::().map_err(|_| { + Error::new( + version.span(), + &format!( + "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", + api_version = API_VERSION_ATTRIBUTE + ), + ) + })?; + + version.base10_parse() +} + +// Each versioned trait is named 'ApiNameVN' where N is the specific version. E.g. ParachainHostV2 +pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { + format_ident!("{}V{}", trait_ident, version) +} diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 2635c81948ff3..a5230cfc5f116 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -78,12 +78,12 @@ pub use hash_db::Hasher; #[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; -#[doc(hidden)] -#[cfg(feature = "std")] -pub use sp_core::NativeOrEncoded; use sp_core::OpaqueMetadata; #[doc(hidden)] pub use sp_core::{offchain, ExecutionContext}; +#[doc(hidden)] +#[cfg(feature = "std")] +pub use sp_core::{NativeOrEncoded, NeverNativeValue}; #[cfg(feature = "std")] pub use sp_runtime::StateVersion; #[doc(hidden)] @@ -187,6 +187,56 @@ pub const MAX_EXTRINSIC_DEPTH: u32 = 256; /// To check if a given runtime implements a runtime api trait, the `RuntimeVersion` has the /// function `has_api()`. Also the `ApiExt` provides a function `has_api(at: &BlockId)` /// to check if the runtime at the given block id implements the requested runtime api trait. +/// +/// # Declaring multiple api versions +/// +/// Optionally multiple versions of the same api can be declared. This is useful for +/// development purposes. For example you want to have a testing version of the api which is +/// available only on a testnet. You can define one stable and one development version. This +/// can be done like this: +/// ```rust +/// sp_api::decl_runtime_apis! { +/// /// Declare the api trait. +/// #[api_version(2)] +/// pub trait Balance { +/// /// Get the balance. +/// fn get_balance() -> u64; +/// /// Set the balance. +/// fn set_balance(val: u64); +/// /// Transfer the balance to another user id +/// #[api_version(3)] +/// fn transfer_balance(uid: u64); +/// } +/// } +/// +/// # fn main() {} +/// ``` +/// The example above defines two api versions - 2 and 3. Version 2 contains `get_balance` and +/// `set_balance`. Version 3 additionally contains `transfer_balance`, which is not available +/// in version 2. Version 2 in this case is considered the default/base version of the api. +/// More than two versions can be defined this way. For example: +/// ```rust +/// sp_api::decl_runtime_apis! { +/// /// Declare the api trait. +/// #[api_version(2)] +/// pub trait Balance { +/// /// Get the balance. +/// fn get_balance() -> u64; +/// /// Set the balance. +/// fn set_balance(val: u64); +/// /// Transfer the balance to another user id +/// #[api_version(3)] +/// fn transfer_balance(uid: u64); +/// /// Clears the balance +/// #[api_version(4)] +/// fn clear_balance(); +/// } +/// } +/// +/// # fn main() {} +/// ``` +/// Note that the latest version (4 in our example above) always contains all methods from all +/// the versions before. pub use sp_api_proc_macro::decl_runtime_apis; /// Tags given trait implementations as runtime apis. @@ -276,6 +326,22 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// /// # fn main() {} /// ``` +/// +/// # Implementing specific api version +/// +/// If `decl_runtime_apis!` declares multiple versions for an api `impl_runtime_apis!` +/// should specify which version it implements by adding `api_version` attribute to the +/// `impl` block. If omitted - the base/default version is implemented. Here is an example: +/// ```ignore +/// sp_api::impl_runtime_apis! { +/// #[api_version(3)] +/// impl self::Balance for Runtime { +/// // implementation +/// } +/// } +/// ``` +/// In this case `Balance` api version 3 is being implemented for `Runtime`. The `impl` block +/// must contain all methods declared in version 3 and below. pub use sp_api_proc_macro::impl_runtime_apis; /// Mocks given trait implementations as runtime apis. @@ -341,15 +407,13 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// using the `advanced` attribute, the macro expects that the first parameter of the function /// is this `at` parameter. Besides that the macro also doesn't do the automatic return value /// rewrite, which means that full return value must be specified. The full return value is -/// constructed like [`Result`]`<`[`NativeOrEncoded`](sp_api::NativeOrEncoded)`, -/// Error>` while `ReturnValue` being the return value that is specified in the trait -/// declaration. +/// constructed like [`Result`]`<, Error>` while `ReturnValue` being the return +/// value that is specified in the trait declaration. /// /// ## Example /// ```rust /// # use sp_runtime::{traits::Block as BlockT, generic::BlockId}; /// # use sp_test_primitives::Block; -/// # use sp_core::NativeOrEncoded; /// # use codec; /// # /// # sp_api::decl_runtime_apis! { @@ -368,13 +432,13 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// sp_api::mock_impl_runtime_apis! { /// impl Balance for MockApi { /// #[advanced] -/// fn get_balance(&self, at: &BlockId) -> Result, sp_api::ApiError> { +/// fn get_balance(&self, at: &BlockId) -> Result { /// println!("Being called at: {}", at); /// /// Ok(self.balance.into()) /// } /// #[advanced] -/// fn set_balance(at: &BlockId, val: u64) -> Result, sp_api::ApiError> { +/// fn set_balance(at: &BlockId, val: u64) -> Result<(), sp_api::ApiError> { /// if let BlockId::Number(1) = at { /// println!("Being called to set balance to: {}", val); /// } diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index 1db416a1d3db6..42628830cc7fa 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -18,7 +18,6 @@ use sp_api::{ decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, ApiError, ApiExt, RuntimeApiInfo, }; -use sp_core::NativeOrEncoded; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, GetNodeBlockType}, @@ -47,6 +46,15 @@ decl_runtime_apis! { #[changed_in(2)] fn same_name() -> String; } + + #[api_version(2)] + pub trait ApiWithMultipleVersions { + fn stable_one(data: u64); + #[api_version(3)] + fn new_one(); + #[api_version(4)] + fn glory_one(); + } } impl_runtime_apis! { @@ -72,6 +80,13 @@ impl_runtime_apis! { fn same_name() {} } + #[api_version(3)] + impl self::ApiWithMultipleVersions for Runtime { + fn stable_one(_: u64) {} + + fn new_one() {} + } + impl sp_api::Core for Runtime { fn version() -> sp_version::RuntimeVersion { unimplemented!() @@ -104,22 +119,12 @@ mock_impl_runtime_apis! { } #[advanced] - fn same_name(_: &BlockId) -> - Result< - NativeOrEncoded<()>, - ApiError - > - { + fn same_name(_: &BlockId) -> Result<(), ApiError> { Ok(().into()) } #[advanced] - fn wild_card(at: &BlockId, _: u32) -> - Result< - NativeOrEncoded<()>, - ApiError - > - { + fn wild_card(at: &BlockId, _: u32) -> Result<(), ApiError> { if let BlockId::Number(1337) = at { // yeah Ok(().into()) @@ -176,6 +181,9 @@ fn check_runtime_api_info() { &runtime_decl_for_ApiWithCustomVersion::ID, ); assert_eq!(>::VERSION, 2); + + // The stable version of the API + assert_eq!(>::VERSION, 2); } fn check_runtime_api_versions_contains() { @@ -186,6 +194,9 @@ fn check_runtime_api_versions_contains() { fn check_runtime_api_versions() { check_runtime_api_versions_contains::>(); check_runtime_api_versions_contains::>(); + assert!(RUNTIME_API_VERSIONS + .iter() + .any(|v| v == &(>::ID, 3))); check_runtime_api_versions_contains::>(); } @@ -198,7 +209,7 @@ fn mock_runtime_api_has_api() { } #[test] -#[should_panic(expected = "Mocked runtime apis don't support calling deprecated api versions")] +#[should_panic(expected = "Calling deprecated methods is not supported by mocked runtime api.")] fn mock_runtime_api_panics_on_calling_old_version() { let mock = MockApi { block: None }; diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index ba42b342377c7..968bda6885d0d 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -25,7 +25,7 @@ use sp_state_machine::{ }; use substrate_test_runtime_client::{ prelude::*, - runtime::{Block, DecodeFails, Header, TestAPI, Transfer}, + runtime::{Block, Header, TestAPI, Transfer}, DefaultTestClientBuilderExt, TestClientBuilder, }; @@ -51,28 +51,6 @@ fn calling_wasm_runtime_function() { calling_function_with_strat(ExecutionStrategy::AlwaysWasm); } -#[test] -#[should_panic(expected = "FailedToConvertParameter { function: \"fail_convert_parameter\"")] -fn calling_native_runtime_function_with_non_decodable_parameter() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) - .build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - runtime_api.fail_convert_parameter(&block_id, DecodeFails::default()).unwrap(); -} - -#[test] -#[should_panic(expected = "FailedToConvertReturnValue { function: \"fail_convert_return_value\"")] -fn calling_native_runtime_function_with_non_decodable_return_value() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) - .build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - runtime_api.fail_convert_return_value(&block_id).unwrap(); -} - #[test] fn calling_native_runtime_signature_changed_function() { let client = TestClientBuilder::new() diff --git a/primitives/api/test/tests/trybuild.rs b/primitives/api/test/tests/trybuild.rs index f3d6aa59a0336..13af1ded7dc6b 100644 --- a/primitives/api/test/tests/trybuild.rs +++ b/primitives/api/test/tests/trybuild.rs @@ -30,4 +30,5 @@ fn ui() { let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); + t.pass("tests/ui/positive_cases/*.rs"); } diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index b1478e2f53344..2c47c2f480add 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -15,49 +15,6 @@ note: type in trait = note: expected fn pointer `fn(u64)` found fn pointer `fn(std::string::String)` -error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait - --> tests/ui/impl_incorrect_method_signature.rs:17:1 - | -17 | sp_api::impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found struct `std::string::String` - | | -18 | | impl self::Api for Runtime { -19 | | fn test(data: String) {} -20 | | } -... | -32 | | } -33 | | } - | |_- help: change the parameter type to match the trait: `std::option::Option` - | -note: type in trait - --> tests/ui/impl_incorrect_method_signature.rs:11:1 - | -11 | / sp_api::decl_runtime_apis! { -12 | | pub trait Api { -13 | | fn test(data: u64); -14 | | } -15 | | } - | |_^ - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0308]: mismatched types - --> tests/ui/impl_incorrect_method_signature.rs:17:1 - | -17 | / sp_api::impl_runtime_apis! { -18 | | impl self::Api for Runtime { -19 | | fn test(data: String) {} -20 | | } -... | -32 | | } -33 | | } - | |_^ expected `u64`, found struct `std::string::String` - | - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - error[E0308]: mismatched types --> tests/ui/impl_incorrect_method_signature.rs:19:11 | diff --git a/primitives/api/test/tests/ui/impl_missing_version.rs b/primitives/api/test/tests/ui/impl_missing_version.rs new file mode 100644 index 0000000000000..63e0599622ac9 --- /dev/null +++ b/primitives/api/test/tests/ui/impl_missing_version.rs @@ -0,0 +1,40 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(4)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + fn test3() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/impl_missing_version.stderr b/primitives/api/test/tests/ui/impl_missing_version.stderr new file mode 100644 index 0000000000000..c0abeffe0cccf --- /dev/null +++ b/primitives/api/test/tests/ui/impl_missing_version.stderr @@ -0,0 +1,14 @@ +error[E0433]: failed to resolve: could not find `ApiV4` in `runtime_decl_for_Api` + --> tests/ui/impl_missing_version.rs:21:13 + | +21 | impl self::Api for Runtime { + | ^^^ could not find `ApiV4` in `runtime_decl_for_Api` + +error[E0405]: cannot find trait `ApiV4` in module `self::runtime_decl_for_Api` + --> tests/ui/impl_missing_version.rs:21:13 + | +11 | pub trait Api { + | ------------- similarly named trait `ApiV2` defined here +... +21 | impl self::Api for Runtime { + | ^^^ help: a trait with a similar name exists: `ApiV2` diff --git a/primitives/api/test/tests/ui/invalid_api_version.rs b/primitives/api/test/tests/ui/invalid_api_version_1.rs similarity index 100% rename from primitives/api/test/tests/ui/invalid_api_version.rs rename to primitives/api/test/tests/ui/invalid_api_version_1.rs diff --git a/primitives/api/test/tests/ui/invalid_api_version.stderr b/primitives/api/test/tests/ui/invalid_api_version_1.stderr similarity index 65% rename from primitives/api/test/tests/ui/invalid_api_version.stderr rename to primitives/api/test/tests/ui/invalid_api_version_1.stderr index 7770bc70e72d6..53ffce959bb66 100644 --- a/primitives/api/test/tests/ui/invalid_api_version.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_1.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> $DIR/invalid_api_version.rs:2:4 + --> tests/ui/invalid_api_version_1.rs:2:2 | 2 | #[api_version] - | ^^^^^^^^^^^ + | ^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_2.stderr b/primitives/api/test/tests/ui/invalid_api_version_2.stderr index 7ca6a7eebe49c..0c5274d4680ff 100644 --- a/primitives/api/test/tests/ui/invalid_api_version_2.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_2.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> $DIR/invalid_api_version_2.rs:2:4 + --> tests/ui/invalid_api_version_2.rs:2:2 | 2 | #[api_version("1")] - | ^^^^^^^^^^^ + | ^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_3.stderr b/primitives/api/test/tests/ui/invalid_api_version_3.stderr index cef4763a6de96..4a34a7aa9b47a 100644 --- a/primitives/api/test/tests/ui/invalid_api_version_3.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_3.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> $DIR/invalid_api_version_3.rs:2:4 + --> tests/ui/invalid_api_version_3.rs:2:2 | 2 | #[api_version()] - | ^^^^^^^^^^^ + | ^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_4.rs b/primitives/api/test/tests/ui/invalid_api_version_4.rs new file mode 100644 index 0000000000000..37b5b6ffa25d1 --- /dev/null +++ b/primitives/api/test/tests/ui/invalid_api_version_4.rs @@ -0,0 +1,8 @@ +sp_api::decl_runtime_apis! { + pub trait Api { + #[api_version("1")] + fn test(data: u64); + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/invalid_api_version_4.stderr b/primitives/api/test/tests/ui/invalid_api_version_4.stderr new file mode 100644 index 0000000000000..57541a97f6c91 --- /dev/null +++ b/primitives/api/test/tests/ui/invalid_api_version_4.stderr @@ -0,0 +1,5 @@ +error: Unexpected `api_version` attribute. The supported format is `api_version(1)` + --> tests/ui/invalid_api_version_4.rs:3:3 + | +3 | #[api_version("1")] + | ^ diff --git a/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs new file mode 100644 index 0000000000000..b4f43cd401bba --- /dev/null +++ b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs @@ -0,0 +1,9 @@ +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + #[api_version(1)] + fn test(data: u64); + } +} + +fn main() {} \ No newline at end of file diff --git a/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr new file mode 100644 index 0000000000000..ec4b594023a05 --- /dev/null +++ b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr @@ -0,0 +1,11 @@ +error: Method version `1` is older than (or equal to) trait version `2`.Methods can't define versions older than the trait version. + --> tests/ui/method_ver_lower_than_trait_ver.rs:4:3 + | +4 | #[api_version(1)] + | ^ + +error: Trait version is set here. + --> tests/ui/method_ver_lower_than_trait_ver.rs:2:2 + | +2 | #[api_version(2)] + | ^ diff --git a/primitives/api/test/tests/ui/missing_versioned_method.rs b/primitives/api/test/tests/ui/missing_versioned_method.rs new file mode 100644 index 0000000000000..d973a94c2101d --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method.rs @@ -0,0 +1,39 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(3)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/missing_versioned_method.stderr b/primitives/api/test/tests/ui/missing_versioned_method.stderr new file mode 100644 index 0000000000000..e3ace7979c27e --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method.stderr @@ -0,0 +1,8 @@ +error[E0046]: not all trait items implemented, missing: `test3` + --> tests/ui/missing_versioned_method.rs:21:2 + | +15 | fn test3(); + | ----------- `test3` from trait +... +21 | impl self::Api for Runtime { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `test3` in implementation diff --git a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs new file mode 100644 index 0000000000000..72358b99164d5 --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs @@ -0,0 +1,42 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + #[api_version(4)] + fn test4(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(4)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + fn test4() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr new file mode 100644 index 0000000000000..7354fbd537fd7 --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr @@ -0,0 +1,8 @@ +error[E0046]: not all trait items implemented, missing: `test3` + --> tests/ui/missing_versioned_method_multiple_vers.rs:23:2 + | +15 | fn test3(); + | ----------- `test3` from trait +... +23 | impl self::Api for Runtime { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `test3` in implementation diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs index fd654ffdc63d6..aeef40f4c77d6 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs @@ -12,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self, _: BlockId) -> Result, ApiError> { + fn test(&self, _: BlockId) -> Result<(), ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr index befe67c1d0b4a..3b3c4e94c3121 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr @@ -1,10 +1,10 @@ error: `BlockId` needs to be taken by reference and not by value! - --> $DIR/mock_advanced_block_id_by_value.rs:12:1 + --> tests/ui/mock_advanced_block_id_by_value.rs:12:1 | 12 | / sp_api::mock_impl_runtime_apis! { 13 | | impl Api for MockApi { 14 | | #[advanced] -15 | | fn test(&self, _: BlockId) -> Result, ApiError> { +15 | | fn test(&self, _: BlockId) -> Result<(), ApiError> { ... | 18 | | } 19 | | } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs index a15ef133fa6c4..76bf5f1aa7459 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs @@ -12,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self) -> Result, ApiError> { + fn test(&self) -> Result<(), ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr index 87d3660316b1e..b9ce7324b5caa 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr @@ -1,5 +1,5 @@ error: If using the `advanced` attribute, it is required that the function takes at least one argument, the `BlockId`. - --> $DIR/mock_advanced_missing_blockid.rs:15:3 + --> tests/ui/mock_advanced_missing_blockid.rs:15:3 | -15 | fn test(&self) -> Result, ApiError> { +15 | fn test(&self) -> Result<(), ApiError> { | ^^ diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index c67de70b9c140..430f63eee1660 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -10,62 +10,80 @@ error: Only `&self` is supported! 16 | fn test2(&mut self, data: u64) {} | ^ -error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait +error[E0050]: method `test` has 2 parameters but the declaration in trait `Api::test` has 3 --> tests/ui/mock_only_self_reference.rs:12:1 | -12 | sp_api::mock_impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found `()` - | | +3 | / sp_api::decl_runtime_apis! { +4 | | pub trait Api { +5 | | fn test(data: u64); + | |_________________________- trait requires 3 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { 13 | | impl Api for MockApi { 14 | | fn test(self, data: u64) {} 15 | | 16 | | fn test2(&mut self, data: u64) {} 17 | | } 18 | | } - | |_- help: change the parameter type to match the trait: `Option` + | |_^ expected 3 parameters, found 2 | -note: type in trait - --> tests/ui/mock_only_self_reference.rs:3:1 + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0050]: method `test2` has 2 parameters but the declaration in trait `Api::test2` has 3 + --> tests/ui/mock_only_self_reference.rs:12:1 | 3 | / sp_api::decl_runtime_apis! { 4 | | pub trait Api { 5 | | fn test(data: u64); 6 | | fn test2(data: u64); -7 | | } -8 | | } - | |_^ - = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` + | |__________________________- trait requires 3 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_^ expected 3 parameters, found 2 + | = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait +error[E0050]: method `test_with_context` has 3 parameters but the declaration in trait `Api::test_with_context` has 4 --> tests/ui/mock_only_self_reference.rs:12:1 | -12 | sp_api::mock_impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found `()` - | | +3 | / sp_api::decl_runtime_apis! { +4 | | pub trait Api { +5 | | fn test(data: u64); + | |_________________________- trait requires 4 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { 13 | | impl Api for MockApi { 14 | | fn test(self, data: u64) {} 15 | | 16 | | fn test2(&mut self, data: u64) {} 17 | | } 18 | | } - | |_- help: change the parameter type to match the trait: `Option` + | |_^ expected 4 parameters, found 3 | -note: type in trait - --> tests/ui/mock_only_self_reference.rs:3:1 + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0050]: method `test2_with_context` has 3 parameters but the declaration in trait `Api::test2_with_context` has 4 + --> tests/ui/mock_only_self_reference.rs:12:1 | 3 | / sp_api::decl_runtime_apis! { 4 | | pub trait Api { 5 | | fn test(data: u64); 6 | | fn test2(data: u64); -7 | | } -8 | | } - | |_^ - = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` + | |__________________________- trait requires 4 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_^ expected 4 parameters, found 3 + | = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/positive_cases/default_impls.rs b/primitives/api/test/tests/ui/positive_cases/default_impls.rs new file mode 100644 index 0000000000000..3434db1089f05 --- /dev/null +++ b/primitives/api/test/tests/ui/positive_cases/default_impls.rs @@ -0,0 +1,41 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + #[api_version(4)] + fn test4(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(2)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index dbc0f6def3aa5..479e1cf05a9d1 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -15,49 +15,6 @@ note: type in trait = note: expected fn pointer `fn(u64)` found fn pointer `fn(&u64)` -error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:17:1 - | -17 | sp_api::impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found `&u64` - | | -18 | | impl self::Api for Runtime { -19 | | fn test(data: &u64) { -20 | | unimplemented!() -... | -34 | | } -35 | | } - | |_- help: change the parameter type to match the trait: `std::option::Option` - | -note: type in trait - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:11:1 - | -11 | / sp_api::decl_runtime_apis! { -12 | | pub trait Api { -13 | | fn test(data: u64); -14 | | } -15 | | } - | |_^ - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> Result<_, _>` - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0308]: mismatched types - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:17:1 - | -17 | / sp_api::impl_runtime_apis! { -18 | | impl self::Api for Runtime { -19 | | fn test(data: &u64) { -20 | | unimplemented!() -... | -34 | | } -35 | | } - | |_^ expected `u64`, found `&u64` - | - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - error[E0308]: mismatched types --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:19:11 | From d558a21dda1a6f9df919c442527b84975bcfee47 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sat, 13 Aug 2022 13:05:39 +0100 Subject: [PATCH 129/135] Fix Gov V2 Benchmarks (#12022) * better benchmark log * Update lib.rs * fix gov2 benchmarks * saturating math + fmt * add comment --- frame/referenda/src/benchmarking.rs | 12 ++++++++++-- frame/referenda/src/lib.rs | 6 ++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index 9abd3768f780c..dfca7cdf465dd 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -101,8 +101,16 @@ fn info(index: ReferendumIndex) -> &'static TrackInfoOf { } fn make_passing_after(index: ReferendumIndex, period_portion: Perbill) { - let support = info::(index).min_support.threshold(period_portion); - let approval = info::(index).min_approval.threshold(period_portion); + // We add an extra 1 percent to handle any perbill rounding errors which may cause + // a proposal to not actually pass. + let support = info::(index) + .min_support + .threshold(period_portion) + .saturating_add(Perbill::from_percent(1)); + let approval = info::(index) + .min_approval + .threshold(period_portion) + .saturating_add(Perbill::from_percent(1)); Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { *tally = T::Tally::from_requirements(support, approval, class); diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index 15c5562d64c84..e9e14e1d4a96a 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -699,7 +699,8 @@ impl, I: 'static> Pallet { when: T::BlockNumber, ) -> Option<(T::BlockNumber, ScheduleAddressOf)> { let alarm_interval = T::AlarmInterval::get().max(One::one()); - let when = (when + alarm_interval - One::one()) / alarm_interval * alarm_interval; + let when = when.saturating_add(alarm_interval).saturating_sub(One::one()) / + (alarm_interval.saturating_mul(alarm_interval)).max(One::one()); let maybe_result = T::Scheduler::schedule( DispatchTime::At(when), None, @@ -752,7 +753,8 @@ impl, I: 'static> Pallet { None }; let deciding_status = DecidingStatus { since: now, confirming }; - let alarm = Self::decision_time(&deciding_status, &status.tally, status.track, track); + let alarm = Self::decision_time(&deciding_status, &status.tally, status.track, track) + .max(now.saturating_add(One::one())); status.deciding = Some(deciding_status); let branch = if is_passing { BeginDecidingBranch::Passing } else { BeginDecidingBranch::Failing }; From 055453ebdf00db9e59ef5af0c59c1b4e39267813 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sat, 13 Aug 2022 16:07:10 +0100 Subject: [PATCH 130/135] Add Benchmarking Instance to Pallets (#12026) * benchmark instance * add benchmark instance to conviction voting * instance on bags list * fmt --- frame/bags-list/src/benchmarks.rs | 6 +- frame/conviction-voting/src/benchmarking.rs | 117 +++--- frame/referenda/src/benchmarking.rs | 408 ++++++++++---------- 3 files changed, 266 insertions(+), 265 deletions(-) diff --git a/frame/bags-list/src/benchmarks.rs b/frame/bags-list/src/benchmarks.rs index dba0c9ee1e623..bb45da076f751 100644 --- a/frame/bags-list/src/benchmarks.rs +++ b/frame/bags-list/src/benchmarks.rs @@ -25,7 +25,7 @@ use frame_support::{assert_ok, traits::Get}; use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::One; -frame_benchmarking::benchmarks! { +frame_benchmarking::benchmarks_instance_pallet! { rebag_non_terminal { // An expensive case for rebag-ing (rebag a non-terminal node): // @@ -97,7 +97,7 @@ frame_benchmarking::benchmarks! { // clear any pre-existing storage. // NOTE: safe to call outside block production - List::::unsafe_clear(); + List::::unsafe_clear(); // define our origin and destination thresholds. let origin_bag_thresh = T::BagThresholds::get()[0]; @@ -146,7 +146,7 @@ frame_benchmarking::benchmarks! { // clear any pre-existing storage. // NOTE: safe to call outside block production - List::::unsafe_clear(); + List::::unsafe_clear(); let bag_thresh = T::BagThresholds::get()[0]; diff --git a/frame/conviction-voting/src/benchmarking.rs b/frame/conviction-voting/src/benchmarking.rs index 53ac7a07302f9..d472770832497 100644 --- a/frame/conviction-voting/src/benchmarking.rs +++ b/frame/conviction-voting/src/benchmarking.rs @@ -20,7 +20,7 @@ use super::*; use assert_matches::assert_matches; -use frame_benchmarking::{account, benchmarks, whitelist_account}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ dispatch::RawOrigin, traits::{fungible, Currency, Get}, @@ -34,8 +34,9 @@ const SEED: u32 = 0; /// Fill all classes as much as possible up to `MaxVotes` and return the Class with the most votes /// ongoing. -fn fill_voting() -> (ClassOf, BTreeMap, Vec>>) { - let mut r = BTreeMap::, Vec>>::new(); +fn fill_voting, I: 'static>( +) -> (ClassOf, BTreeMap, Vec>>) { + let mut r = BTreeMap::, Vec>>::new(); for class in T::Polls::classes().into_iter() { for _ in 0..T::MaxVotes::get() { match T::Polls::create_ongoing(class.clone()) { @@ -48,34 +49,34 @@ fn fill_voting() -> (ClassOf, BTreeMap, Vec> (c, r) } -fn funded_account(name: &'static str, index: u32) -> T::AccountId { +fn funded_account, I: 'static>(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn account_vote(b: BalanceOf) -> AccountVote> { +fn account_vote, I: 'static>(b: BalanceOf) -> AccountVote> { let v = Vote { aye: true, conviction: Conviction::Locked1x }; AccountVote::Standard { vote: v, balance: b } } -benchmarks! { +benchmarks_instance_pallet! { where_clause { where T::MaxVotes: core::fmt::Debug } vote_new { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let account_vote = account_vote::(100u32.into()); + let account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len() - 1; // We need to create existing votes for i in polls.iter().skip(1) { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -85,52 +86,52 @@ benchmarks! { }: vote(RawOrigin::Signed(caller.clone()), index, account_vote) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r + 1) as usize ); } vote_existing { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), r, "Votes were not recorded."); - let new_account_vote = account_vote::(200u32.into()); + let new_account_vote = account_vote::(200u32.into()); let index = polls[0]; }: vote(RawOrigin::Signed(caller.clone()), index, new_account_vote) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); } remove_vote { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -140,25 +141,25 @@ benchmarks! { }: _(RawOrigin::Signed(caller.clone()), Some(class.clone()), index) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r - 1) as usize ); } remove_other_vote { - let caller = funded_account::("caller", 0); - let voter = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); + let voter = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -169,7 +170,7 @@ benchmarks! { }: _(RawOrigin::Signed(caller.clone()), voter.clone(), class.clone(), index) verify { assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r - 1) as usize ); } @@ -177,44 +178,44 @@ benchmarks! { delegate { let r in 0 .. T::MaxVotes::get().min(T::Polls::max_ongoing().1); - let all_polls = fill_voting::().1; + let all_polls = fill_voting::().1; let class = T::Polls::max_ongoing().0; let polls = &all_polls[&class]; - let voter = funded_account::("voter", 0); - let caller = funded_account::("caller", 0); + let voter = funded_account::("voter", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let delegated_balance: BalanceOf = 1000u32.into(); - let delegate_vote = account_vote::(delegated_balance); + let delegated_balance: BalanceOf = 1000u32.into(); + let delegate_vote = account_vote::(delegated_balance); // We need to create existing delegations for i in polls.iter().take(r as usize) { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; } assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); }: _(RawOrigin::Signed(caller.clone()), class.clone(), voter, Conviction::Locked1x, delegated_balance) verify { - assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); } undelegate { let r in 0 .. T::MaxVotes::get().min(T::Polls::max_ongoing().1); - let all_polls = fill_voting::().1; + let all_polls = fill_voting::().1; let class = T::Polls::max_ongoing().0; let polls = &all_polls[&class]; - let voter = funded_account::("voter", 0); - let caller = funded_account::("caller", 0); + let voter = funded_account::("voter", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let delegated_balance: BalanceOf = 1000u32.into(); - let delegate_vote = account_vote::(delegated_balance); + let delegated_balance: BalanceOf = 1000u32.into(); + let delegate_vote = account_vote::(delegated_balance); - ConvictionVoting::::delegate( + ConvictionVoting::::delegate( RawOrigin::Signed(caller.clone()).into(), class.clone(), voter.clone(), @@ -224,31 +225,31 @@ benchmarks! { // We need to create delegations for i in polls.iter().take(r as usize) { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; } assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); - assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); }: _(RawOrigin::Signed(caller.clone()), class.clone()) verify { - assert_matches!(VotingFor::::get(&caller, &class), Voting::Casting(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Casting(_)); } unlock { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let normal_account_vote = account_vote::(T::Currency::free_balance(&caller) - 100u32.into()); - let big_account_vote = account_vote::(T::Currency::free_balance(&caller)); + let normal_account_vote = account_vote::(T::Currency::free_balance(&caller) - 100u32.into()); + let big_account_vote = account_vote::(T::Currency::free_balance(&caller)); // Fill everything up to the max by filling all classes with votes and voting on them all. - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); assert!(all_polls.len() > 0); for (class, polls) in all_polls.iter() { assert!(polls.len() > 0); for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, normal_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, normal_account_vote)?; } } @@ -257,12 +258,12 @@ benchmarks! { // Vote big on the class with the most ongoing votes of them to bump the lock and make it // hard to recompute when removed. - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), polls[0], big_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), polls[0], big_account_vote)?; let now_usable = >::reducible_balance(&caller, false); assert_eq!(orig_usable - now_usable, 100u32.into()); // Remove the vote - ConvictionVoting::::remove_vote(RawOrigin::Signed(caller.clone()).into(), Some(class.clone()), polls[0])?; + ConvictionVoting::::remove_vote(RawOrigin::Signed(caller.clone()).into(), Some(class.clone()), polls[0])?; // We can now unlock on `class` from 200 to 100... }: _(RawOrigin::Signed(caller.clone()), class, caller.clone()) diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index dfca7cdf465dd..b9c64a61f510d 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -20,7 +20,7 @@ use super::*; use crate::Pallet as Referenda; use assert_matches::assert_matches; -use frame_benchmarking::{account, benchmarks, whitelist_account}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ assert_ok, traits::{Currency, EnsureOrigin}, @@ -31,157 +31,157 @@ use sp_runtime::traits::{Bounded, Hash}; const SEED: u32 = 0; #[allow(dead_code)] -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn funded_account(name: &'static str, index: u32) -> T::AccountId { +fn funded_account, I: 'static>(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn create_referendum() -> (T::AccountId, ReferendumIndex) { - let caller = funded_account::("caller", 0); +fn create_referendum, I: 'static>() -> (T::AccountId, ReferendumIndex) { + let caller = funded_account::("caller", 0); whitelist_account!(caller); - assert_ok!(Referenda::::submit( + assert_ok!(Referenda::::submit( RawOrigin::Signed(caller.clone()).into(), Box::new(RawOrigin::Root.into()), T::Hashing::hash_of(&0), DispatchTime::After(0u32.into()) )); - let index = ReferendumCount::::get() - 1; + let index = ReferendumCount::::get() - 1; (caller, index) } -fn place_deposit(index: ReferendumIndex) { - let caller = funded_account::("caller", 0); +fn place_deposit, I: 'static>(index: ReferendumIndex) { + let caller = funded_account::("caller", 0); whitelist_account!(caller); - assert_ok!(Referenda::::place_decision_deposit(RawOrigin::Signed(caller).into(), index)); + assert_ok!(Referenda::::place_decision_deposit(RawOrigin::Signed(caller).into(), index)); } -fn nudge(index: ReferendumIndex) { - assert_ok!(Referenda::::nudge_referendum(RawOrigin::Root.into(), index)); +fn nudge, I: 'static>(index: ReferendumIndex) { + assert_ok!(Referenda::::nudge_referendum(RawOrigin::Root.into(), index)); } -fn fill_queue( +fn fill_queue, I: 'static>( index: ReferendumIndex, spaces: u32, pass_after: u32, ) -> Vec { // First, create enough other referendums to fill the track. let mut others = vec![]; - for _ in 0..info::(index).max_deciding { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + for _ in 0..info::(index).max_deciding { + let (_caller, index) = create_referendum::(); + place_deposit::(index); others.push(index); } // We will also need enough referenda which are queued and passing, we want `MaxQueued - 1` // in order to force the maximum amount of work to insert ours into the queue. for _ in spaces..T::MaxQueued::get() { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - make_passing_after::(index, Perbill::from_percent(pass_after)); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + make_passing_after::(index, Perbill::from_percent(pass_after)); others.push(index); } // Skip to when they can start being decided. - skip_prepare_period::(index); + skip_prepare_period::(index); // Manually nudge the other referenda first to ensure that they begin. - others.iter().for_each(|&i| nudge::(i)); + others.iter().for_each(|&i| nudge::(i)); others } -fn info(index: ReferendumIndex) -> &'static TrackInfoOf { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn info, I: 'static>(index: ReferendumIndex) -> &'static TrackInfoOf { + let status = Referenda::::ensure_ongoing(index).unwrap(); T::Tracks::info(status.track).expect("Id value returned from T::Tracks") } -fn make_passing_after(index: ReferendumIndex, period_portion: Perbill) { +fn make_passing_after, I: 'static>(index: ReferendumIndex, period_portion: Perbill) { // We add an extra 1 percent to handle any perbill rounding errors which may cause // a proposal to not actually pass. - let support = info::(index) + let support = info::(index) .min_support .threshold(period_portion) .saturating_add(Perbill::from_percent(1)); - let approval = info::(index) + let approval = info::(index) .min_approval .threshold(period_portion) .saturating_add(Perbill::from_percent(1)); - Referenda::::access_poll(index, |status| { + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { *tally = T::Tally::from_requirements(support, approval, class); } }); } -fn make_passing(index: ReferendumIndex) { - Referenda::::access_poll(index, |status| { +fn make_passing, I: 'static>(index: ReferendumIndex) { + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { *tally = T::Tally::unanimity(class); } }); } -fn make_failing(index: ReferendumIndex) { - Referenda::::access_poll(index, |status| { +fn make_failing, I: 'static>(index: ReferendumIndex) { + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { *tally = T::Tally::rejection(class); } }); } -fn skip_prepare_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); - let prepare_period_over = status.submitted + info::(index).prepare_period; +fn skip_prepare_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); + let prepare_period_over = status.submitted + info::(index).prepare_period; frame_system::Pallet::::set_block_number(prepare_period_over); } -fn skip_decision_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); - let decision_period_over = status.deciding.unwrap().since + info::(index).decision_period; +fn skip_decision_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); + let decision_period_over = status.deciding.unwrap().since + info::(index).decision_period; frame_system::Pallet::::set_block_number(decision_period_over); } -fn skip_confirm_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn skip_confirm_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); let confirm_period_over = status.deciding.unwrap().confirming.unwrap(); frame_system::Pallet::::set_block_number(confirm_period_over); } -fn skip_timeout_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn skip_timeout_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); let timeout_period_over = status.submitted + T::UndecidingTimeout::get(); frame_system::Pallet::::set_block_number(timeout_period_over); } -fn alarm_time(index: ReferendumIndex) -> T::BlockNumber { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn alarm_time, I: 'static>(index: ReferendumIndex) -> T::BlockNumber { + let status = Referenda::::ensure_ongoing(index).unwrap(); status.alarm.unwrap().0 } -fn is_confirming(index: ReferendumIndex) -> bool { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn is_confirming, I: 'static>(index: ReferendumIndex) -> bool { + let status = Referenda::::ensure_ongoing(index).unwrap(); matches!( status, ReferendumStatus { deciding: Some(DecidingStatus { confirming: Some(_), .. }), .. } ) } -fn is_not_confirming(index: ReferendumIndex) -> bool { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn is_not_confirming, I: 'static>(index: ReferendumIndex) -> bool { + let status = Referenda::::ensure_ongoing(index).unwrap(); matches!( status, ReferendumStatus { deciding: Some(DecidingStatus { confirming: None, .. }), .. } ) } -benchmarks! { +benchmarks_instance_pallet! { submit { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); }: _( RawOrigin::Signed(caller), @@ -189,105 +189,105 @@ benchmarks! { T::Hashing::hash_of(&0), DispatchTime::After(0u32.into()) ) verify { - let index = ReferendumCount::::get().checked_sub(1).unwrap(); - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Ongoing(_))); + let index = ReferendumCount::::get().checked_sub(1).unwrap(); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Ongoing(_))); } place_decision_deposit_preparing { - let (caller, index) = create_referendum::(); + let (caller, index) = create_referendum::(); }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); + assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); } place_decision_deposit_queued { - let (caller, index) = create_referendum::(); - fill_queue::(index, 1, 90); + let (caller, index) = create_referendum::(); + fill_queue::(index, 1, 90); }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); } place_decision_deposit_not_queued { - let (caller, index) = create_referendum::(); - fill_queue::(index, 0, 90); + let (caller, index) = create_referendum::(); + fill_queue::(index, 0, 90); }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); } place_decision_deposit_passing { - let (caller, index) = create_referendum::(); - skip_prepare_period::(index); - make_passing::(index); + let (caller, index) = create_referendum::(); + skip_prepare_period::(index); + make_passing::(index); }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } place_decision_deposit_failing { - let (caller, index) = create_referendum::(); - skip_prepare_period::(index); + let (caller, index) = create_referendum::(); + skip_prepare_period::(index); }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - assert!(is_not_confirming::(index)); + assert!(is_not_confirming::(index)); } refund_decision_deposit { - let (caller, index) = create_referendum::(); - place_deposit::(index); - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); + let (caller, index) = create_referendum::(); + place_deposit::(index); + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); }: _(RawOrigin::Signed(caller), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); } cancel { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); }: _(T::CancelOrigin::successful_origin(), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(..))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(..))); } kill { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); }: _(T::KillOrigin::successful_origin(), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Killed(..))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Killed(..))); } one_fewer_deciding_queue_empty { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); - assert_eq!(DecidingCount::::get(&track), 1); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); + assert_eq!(DecidingCount::::get(&track), 1); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), 0); + assert_eq!(DecidingCount::::get(&track), 0); } one_fewer_deciding_failing { - let (_caller, index) = create_referendum::(); + let (_caller, index) = create_referendum::(); // No spaces free in the queue. - let queued = fill_queue::(index, 0, 90); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - let deciding_count = DecidingCount::::get(&track); + let queued = fill_queue::(index, 0, 90); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + let deciding_count = DecidingCount::::get(&track); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), deciding_count); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) + assert_eq!(DecidingCount::::get(&track), deciding_count); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) .unwrap() .deciding .map_or(true, |d| d.confirming.is_none()) @@ -295,18 +295,18 @@ benchmarks! { } one_fewer_deciding_passing { - let (_caller, index) = create_referendum::(); + let (_caller, index) = create_referendum::(); // No spaces free in the queue. - let queued = fill_queue::(index, 0, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - let deciding_count = DecidingCount::::get(&track); + let queued = fill_queue::(index, 0, 0); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + let deciding_count = DecidingCount::::get(&track); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), deciding_count); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) + assert_eq!(DecidingCount::::get(&track), deciding_count); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) .unwrap() .deciding .map_or(true, |d| d.confirming.is_some()) @@ -315,43 +315,43 @@ benchmarks! { nudge_referendum_requeued_insertion { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 0, 90); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 0, 90); // Now nudge ours, with the track now full and the queue full of referenda with votes, // ours will not be in the queue. - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); // Now alter the voting, so that ours goes into pole-position and shifts others down. - make_passing::(index); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let t = TrackQueue::::get(&track); + let t = TrackQueue::::get(&track); assert_eq!(t.len() as u32, T::MaxQueued::get()); assert_eq!(t[t.len() - 1].0, index); } nudge_referendum_requeued_slide { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 1, 90); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 1, 90); // Now nudge ours, with the track now full, ours will be queued, but with no votes, it // will have the worst position. - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); // Now alter the voting, so that ours leap-frogs all into the best position. - make_passing::(index); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let t = TrackQueue::::get(&track); + let t = TrackQueue::::get(&track); assert_eq!(t.len() as u32, T::MaxQueued::get()); assert_eq!(t[t.len() - 1].0, index); } @@ -362,159 +362,159 @@ benchmarks! { // insertion at the beginning. // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 1, 0); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 1, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); // Then nudge ours, with the track now full, ours will be queued. }: nudge_referendum(RawOrigin::Root, index) verify { - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); } nudge_referendum_not_queued { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 0, 0); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 0, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); // Then nudge ours, with the track now full, ours will be queued. }: nudge_referendum(RawOrigin::Root, index) verify { - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); } nudge_referendum_no_deposit { - let (_caller, index) = create_referendum::(); - skip_prepare_period::(index); + let (_caller, index) = create_referendum::(); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let status = Referenda::::ensure_ongoing(index).unwrap(); + let status = Referenda::::ensure_ongoing(index).unwrap(); assert_matches!(status, ReferendumStatus { deciding: None, .. }); } nudge_referendum_preparing { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let status = Referenda::::ensure_ongoing(index).unwrap(); + let status = Referenda::::ensure_ongoing(index).unwrap(); assert_matches!(status, ReferendumStatus { deciding: None, .. }); } nudge_referendum_timed_out { - let (_caller, index) = create_referendum::(); - skip_timeout_period::(index); + let (_caller, index) = create_referendum::(); + skip_timeout_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::TimedOut(..)); } nudge_referendum_begin_deciding_failing { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_not_confirming::(index)); + assert!(is_not_confirming::(index)); } nudge_referendum_begin_deciding_passing { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - make_passing::(index); - skip_prepare_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + make_passing::(index); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_begin_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(!is_confirming::(index)); - make_passing::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(!is_confirming::(index)); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_end_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_passing::(index); - nudge::(index); - assert!(is_confirming::(index)); - make_failing::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_passing::(index); + nudge::(index); + assert!(is_confirming::(index)); + make_failing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(!is_confirming::(index)); + assert!(!is_confirming::(index)); } nudge_referendum_continue_not_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(!is_confirming::(index)); - let old_alarm = alarm_time::(index); - make_passing_after::(index, Perbill::from_percent(50)); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(!is_confirming::(index)); + let old_alarm = alarm_time::(index); + make_passing_after::(index, Perbill::from_percent(50)); }: nudge_referendum(RawOrigin::Root, index) verify { - assert_ne!(old_alarm, alarm_time::(index)); - assert!(!is_confirming::(index)); + assert_ne!(old_alarm, alarm_time::(index)); + assert!(!is_confirming::(index)); } nudge_referendum_continue_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - make_passing::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(is_confirming::(index)); - let old_alarm = alarm_time::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + make_passing::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(is_confirming::(index)); + let old_alarm = alarm_time::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_approved { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_passing::(index); - nudge::(index); - skip_confirm_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_passing::(index); + nudge::(index); + skip_confirm_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::Approved(..)); } nudge_referendum_rejected { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_failing::(index); - nudge::(index); - skip_decision_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_failing::(index); + nudge::(index); + skip_decision_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::Rejected(..)); } From 34a0621761c4a333cb2074ff720f7acbfb92dbb8 Mon Sep 17 00:00:00 2001 From: Sudip Ghimire <39845901+sudipghimire533@users.noreply.github.com> Date: Sun, 14 Aug 2022 23:50:06 +0545 Subject: [PATCH 131/135] make MAX_VOTERS and MAX_CANDIDATES in elections-phragmen configurable. Fix: #11092 (#11908) * make MAX_VOTERS and MAX_CANDIDATES in elections-phragmen configurable * Configure election-phragmen in node bin configuring max candidates & voters * Add document comment for added Config parameter * Incorporate suggestion * fix benchmarks * Update frame/elections-phragmen/src/lib.rs * Update frame/elections-phragmen/src/lib.rs * fix wrong values * fix typo * docs * more detailed docs * fmt * ".git/.scripts/bench-bot.sh" pallet dev pallet_elections_phragmen Co-authored-by: Szegoo Co-authored-by: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Co-authored-by: command-bot <> --- bin/node/runtime/src/lib.rs | 4 + frame/elections-phragmen/src/benchmarking.rs | 26 ++--- frame/elections-phragmen/src/lib.rs | 40 +++++--- frame/elections-phragmen/src/weights.rs | 102 +++++++++---------- 4 files changed, 94 insertions(+), 78 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 6bfbe3413058a..ff793a49b5ce6 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -966,6 +966,8 @@ parameter_types! { pub const TermDuration: BlockNumber = 7 * DAYS; pub const DesiredMembers: u32 = 13; pub const DesiredRunnersUp: u32 = 7; + pub const MaxVoters: u32 = 10 * 1000; + pub const MaxCandidates: u32 = 1000; pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } @@ -989,6 +991,8 @@ impl pallet_elections_phragmen::Config for Runtime { type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; type TermDuration = TermDuration; + type MaxVoters = MaxVoters; + type MaxCandidates = MaxCandidates; type WeightInfo = pallet_elections_phragmen::weights::SubstrateWeight; } diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 9a9d448449eca..2d1b698940a9d 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -36,7 +36,7 @@ fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); // Fund each account with at-least his stake but still a sane amount as to not mess up // the vote calculation. - let amount = default_stake::(MAX_VOTERS) * BalanceOf::::from(BALANCE_FACTOR); + let amount = default_stake::(T::MaxVoters::get()) * BalanceOf::::from(BALANCE_FACTOR); let _ = T::Currency::make_free_balance_be(&account, amount); // important to increase the total issuance since T::CurrencyToVote will need it to be sane for // phragmen to work. @@ -230,7 +230,7 @@ benchmarks! { submit_candidacy { // number of already existing candidates. - let c in 1 .. MAX_CANDIDATES; + let c in 1 .. T::MaxCandidates::get(); // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -261,7 +261,7 @@ benchmarks! { // this will check members, runners-up and candidate for removal. Members and runners-up are // limited by the runtime bound, nonetheless we fill them by `m`. // number of already existing candidates. - let c in 1 .. MAX_CANDIDATES; + let c in 1 .. T::MaxCandidates::get(); // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -362,14 +362,14 @@ benchmarks! { clean_defunct_voters { // total number of voters. - let v in (MAX_VOTERS / 2) .. MAX_VOTERS; + let v in (T::MaxVoters::get() / 2) .. T::MaxVoters::get(); // those that are defunct and need removal. - let d in 1 .. (MAX_VOTERS / 2); + let d in 1 .. (T::MaxVoters::get() / 2); // remove any previous stuff. clean::(); - let all_candidates = submit_candidates::(MAX_CANDIDATES, "candidates")?; + let all_candidates = submit_candidates::(T::MaxCandidates::get(), "candidates")?; distribute_voters::(all_candidates, v, MAXIMUM_VOTE)?; // all candidates leave. @@ -389,9 +389,9 @@ benchmarks! { // members, this is hard-coded in the runtime and cannot be trivially changed at this stage. // Yet, change the number of voters, candidates and edge per voter to see the impact. Note // that we give all candidates a self vote to make sure they are all considered. - let c in 1 .. MAX_CANDIDATES; - let v in 1 .. MAX_VOTERS; - let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; + let c in 1 .. T::MaxCandidates::get(); + let v in 1 .. T::MaxVoters::get(); + let e in (T::MaxVoters::get()) .. T::MaxVoters::get() as u32 * MAXIMUM_VOTE as u32; clean::(); // so we have a situation with v and e. we want e to basically always be in the range of `e @@ -425,9 +425,9 @@ benchmarks! { #[extra] election_phragmen_c_e { - let c in 1 .. MAX_CANDIDATES; - let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; - let fixed_v = MAX_VOTERS; + let c in 1 .. T::MaxCandidates::get(); + let e in (T::MaxVoters::get()) .. T::MaxVoters::get() * MAXIMUM_VOTE as u32; + let fixed_v = T::MaxVoters::get(); clean::(); let votes_per_voter = e / fixed_v; @@ -459,7 +459,7 @@ benchmarks! { #[extra] election_phragmen_v { let v in 4 .. 16; - let fixed_c = MAX_CANDIDATES / 10; + let fixed_c = T::MaxCandidates::get() / 10; let fixed_e = 64; clean::(); diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 28fed28f18e5c..ffd1c6ab4de9c 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -125,17 +125,6 @@ pub mod migrations; /// The maximum votes allowed per voter. pub const MAXIMUM_VOTE: usize = 16; -// Some safe temp values to make the wasm execution sane while we still use this pallet. -#[cfg(test)] -pub(crate) const MAX_CANDIDATES: u32 = 100; -#[cfg(not(test))] -pub(crate) const MAX_CANDIDATES: u32 = 1000; - -#[cfg(test)] -pub(crate) const MAX_VOTERS: u32 = 1000; -#[cfg(not(test))] -pub(crate) const MAX_VOTERS: u32 = 10 * 1000; - type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = <::Currency as Currency< @@ -259,6 +248,21 @@ pub mod pallet { #[pallet::constant] type TermDuration: Get; + /// The maximum number of candidates in a phragmen election. + /// + /// Warning: The election happens onchain, and this value will determine + /// the size of the election. When this limit is reached no more + /// candidates are accepted in the election. + #[pallet::constant] + type MaxCandidates: Get; + + /// The maximum number of voters to allow in a phragmen election. + /// + /// Warning: This impacts the size of the election which is run onchain. + /// When the limit is reached the new voters are ignored. + #[pallet::constant] + type MaxVoters: Get; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -397,7 +401,10 @@ pub mod pallet { let actual_count = >::decode_len().unwrap_or(0) as u32; ensure!(actual_count <= candidate_count, Error::::InvalidWitnessData); - ensure!(actual_count <= MAX_CANDIDATES, Error::::TooManyCandidates); + ensure!( + actual_count <= ::MaxCandidates::get(), + Error::::TooManyCandidates + ); let index = Self::is_candidate(&who).err().ok_or(Error::::DuplicatedCandidate)?; @@ -913,10 +920,11 @@ impl Pallet { let mut num_edges: u32 = 0; + let max_voters = ::MaxVoters::get() as usize; // used for prime election. let mut voters_and_stakes = Vec::new(); match Voting::::iter().try_for_each(|(voter, Voter { stake, votes, .. })| { - if voters_and_stakes.len() < MAX_VOTERS as usize { + if voters_and_stakes.len() < max_voters { voters_and_stakes.push((voter, stake, votes)); Ok(()) } else { @@ -930,7 +938,7 @@ impl Pallet { "Failed to run election. Number of voters exceeded", ); Self::deposit_event(Event::ElectionError); - return T::DbWeight::get().reads(3 + MAX_VOTERS as u64) + return T::DbWeight::get().reads(3 + max_voters as u64) }, } @@ -1266,6 +1274,8 @@ mod tests { parameter_types! { pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; + pub const PhragmenMaxVoters: u32 = 1000; + pub const PhragmenMaxCandidates: u32 = 100; } impl Config for Test { @@ -1284,6 +1294,8 @@ mod tests { type LoserCandidate = (); type KickedMember = (); type WeightInfo = (); + type MaxVoters = PhragmenMaxVoters; + type MaxCandidates = PhragmenMaxCandidates; } pub type Block = sp_runtime::generic::Block; diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index 07ee7aa2012ad..0e067699e5fac 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_elections_phragmen //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-08-01, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-08-08, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -70,9 +70,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - (26_143_000 as Weight) - // Standard Error: 23_000 - .saturating_add((297_000 as Weight).saturating_mul(v as Weight)) + (27_011_000 as Weight) + // Standard Error: 3_000 + .saturating_add((214_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -83,9 +83,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - (40_431_000 as Weight) - // Standard Error: 6_000 - .saturating_add((205_000 as Weight).saturating_mul(v as Weight)) + (40_240_000 as Weight) + // Standard Error: 5_000 + .saturating_add((244_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -96,16 +96,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - (40_188_000 as Weight) - // Standard Error: 6_000 - .saturating_add((225_000 as Weight).saturating_mul(v as Weight)) + (40_394_000 as Weight) + // Standard Error: 5_000 + .saturating_add((217_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - (38_031_000 as Weight) + (37_651_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -114,18 +114,18 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - (43_715_000 as Weight) + (42_217_000 as Weight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((50_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (47_882_000 as Weight) - // Standard Error: 1_000 - .saturating_add((25_000 as Weight).saturating_mul(c as Weight)) + (46_459_000 as Weight) + // Standard Error: 0 + .saturating_add((26_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -135,13 +135,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - (45_600_000 as Weight) + (45_189_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - (34_959_000 as Weight) + (34_516_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -156,7 +156,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - (52_684_000 as Weight) + (51_838_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } @@ -170,8 +170,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[1, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 65_000 - .saturating_add((64_009_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 76_000 + .saturating_add((63_721_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) @@ -184,17 +184,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections ElectionRounds (r:1 w:1) // Storage: Council Members (r:0 w:1) // Storage: Council Prime (r:0 w:1) - // Storage: System Account (r:19 w:19) + // Storage: System Account (r:1 w:1) /// The range of component `c` is `[1, 1000]`. /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 778_000 - .saturating_add((81_049_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 773_000 + .saturating_add((81_534_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 51_000 - .saturating_add((4_420_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads(279 as Weight)) + .saturating_add((4_453_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(T::DbWeight::get().reads(280 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(c as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) @@ -210,9 +210,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - (26_143_000 as Weight) - // Standard Error: 23_000 - .saturating_add((297_000 as Weight).saturating_mul(v as Weight)) + (27_011_000 as Weight) + // Standard Error: 3_000 + .saturating_add((214_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -223,9 +223,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - (40_431_000 as Weight) - // Standard Error: 6_000 - .saturating_add((205_000 as Weight).saturating_mul(v as Weight)) + (40_240_000 as Weight) + // Standard Error: 5_000 + .saturating_add((244_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -236,16 +236,16 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - (40_188_000 as Weight) - // Standard Error: 6_000 - .saturating_add((225_000 as Weight).saturating_mul(v as Weight)) + (40_394_000 as Weight) + // Standard Error: 5_000 + .saturating_add((217_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - (38_031_000 as Weight) + (37_651_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -254,18 +254,18 @@ impl WeightInfo for () { // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - (43_715_000 as Weight) + (42_217_000 as Weight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((50_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (47_882_000 as Weight) - // Standard Error: 1_000 - .saturating_add((25_000 as Weight).saturating_mul(c as Weight)) + (46_459_000 as Weight) + // Standard Error: 0 + .saturating_add((26_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -275,13 +275,13 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - (45_600_000 as Weight) + (45_189_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - (34_959_000 as Weight) + (34_516_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -296,7 +296,7 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - (52_684_000 as Weight) + (51_838_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } @@ -310,8 +310,8 @@ impl WeightInfo for () { /// The range of component `d` is `[1, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 65_000 - .saturating_add((64_009_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 76_000 + .saturating_add((63_721_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) @@ -324,17 +324,17 @@ impl WeightInfo for () { // Storage: Elections ElectionRounds (r:1 w:1) // Storage: Council Members (r:0 w:1) // Storage: Council Prime (r:0 w:1) - // Storage: System Account (r:19 w:19) + // Storage: System Account (r:1 w:1) /// The range of component `c` is `[1, 1000]`. /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 778_000 - .saturating_add((81_049_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 773_000 + .saturating_add((81_534_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 51_000 - .saturating_add((4_420_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(RocksDbWeight::get().reads(279 as Weight)) + .saturating_add((4_453_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(RocksDbWeight::get().reads(280 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(c as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) From bfc2d085f392ccfb369251709bccb4bfa0edebe7 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sun, 14 Aug 2022 19:36:19 +0100 Subject: [PATCH 132/135] Further Improve Gov V2 Stuff (#12028) * refactor and improve successful origin * use appropriate origin for creating referendum * fmt * add setup tally feature for benchmarks * feedback updates * fmt * Update frame/referenda/src/benchmarking.rs Co-authored-by: Gavin Wood * feedback on `setup` trait * fix Co-authored-by: Gavin Wood --- frame/conviction-voting/src/types.rs | 3 + frame/ranked-collective/src/lib.rs | 137 +++++++++++++++++++++------ frame/ranked-collective/src/tests.rs | 17 ++++ frame/referenda/src/benchmarking.rs | 118 ++++++++++++----------- frame/referenda/src/mock.rs | 3 + frame/support/src/traits/voting.rs | 13 +++ 6 files changed, 210 insertions(+), 81 deletions(-) diff --git a/frame/conviction-voting/src/types.rs b/frame/conviction-voting/src/types.rs index 8f4f3697e9766..d6051dff62569 100644 --- a/frame/conviction-voting/src/types.rs +++ b/frame/conviction-voting/src/types.rs @@ -93,6 +93,9 @@ impl< let ayes = approval.mul_ceil(support); Self { ayes, nays: support - ayes, support, dummy: PhantomData } } + + #[cfg(feature = "runtime-benchmarks")] + fn setup(_: Class, _: Perbill) {} } impl< diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 7ea43a9017445..79910c259a7cd 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -85,16 +85,16 @@ pub type Votes = u32; Decode, MaxEncodedLen, )] -#[scale_info(skip_type_params(M))] +#[scale_info(skip_type_params(T, I, M))] #[codec(mel_bound())] -pub struct Tally { +pub struct Tally { bare_ayes: MemberIndex, ayes: Votes, nays: Votes, - dummy: PhantomData, + dummy: PhantomData<(T, I, M)>, } -impl Tally { +impl, I: 'static, M: GetMaxVoters> Tally { pub fn from_parts(bare_ayes: MemberIndex, ayes: Votes, nays: Votes) -> Self { Tally { bare_ayes, ayes, nays, dummy: PhantomData } } @@ -107,10 +107,10 @@ impl Tally { // All functions of VoteTally now include the class as a param. -pub type TallyOf = Tally>; +pub type TallyOf = Tally>; pub type PollIndexOf = <>::Polls as Polling>>::Index; -impl VoteTally for Tally { +impl, I: 'static, M: GetMaxVoters> VoteTally for Tally { fn new(_: Rank) -> Self { Self { bare_ayes: 0, ayes: 0, nays: 0, dummy: PhantomData } } @@ -143,6 +143,20 @@ impl VoteTally for Tally { let nays = ((ayes as u64) * 1_000_000_000u64 / approval.deconstruct() as u64) as u32 - ayes; Self { bare_ayes: ayes, ayes, nays, dummy: PhantomData } } + + #[cfg(feature = "runtime-benchmarks")] + fn setup(class: Rank, granularity: Perbill) { + if M::get_max_voters(class) == 0 { + let max_voters = granularity.saturating_reciprocal_mul(1u32); + for i in 0..max_voters { + let who: T::AccountId = + frame_benchmarking::account("ranked_collective_benchmarking", i, 0); + crate::Pallet::::do_add_member_to_rank(who, class) + .expect("could not add members for benchmarks"); + } + assert_eq!(M::get_max_voters(class), max_voters); + } + } } /// Record needed for every member. @@ -241,6 +255,19 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + match Self::try_successful_origin() { + Ok(o) => o, + Err(()) => { + let who: T::AccountId = frame_benchmarking::whitelisted_caller(); + crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) + .expect("failed to add ranked member"); + frame_system::RawOrigin::Signed(who).into() + }, + } + } } /// Guard to ensure that the given origin is a member of the collective. The account ID of the @@ -264,6 +291,19 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + match Self::try_successful_origin() { + Ok(o) => o, + Err(()) => { + let who: T::AccountId = frame_benchmarking::whitelisted_caller(); + crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) + .expect("failed to add ranked member"); + frame_system::RawOrigin::Signed(who).into() + }, + } + } } /// Guard to ensure that the given origin is a member of the collective. The pair of including both @@ -287,6 +327,19 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + match Self::try_successful_origin() { + Ok(o) => o, + Err(()) => { + let who: T::AccountId = frame_benchmarking::whitelisted_caller(); + crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) + .expect("failed to add ranked member"); + frame_system::RawOrigin::Signed(who).into() + }, + } + } } #[frame_support::pallet] @@ -415,17 +468,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_member())] pub fn add_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { let _ = T::PromoteOrigin::ensure_origin(origin)?; - ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); - let index = MemberCount::::get(0); - let count = index.checked_add(1).ok_or(Overflow)?; - - Members::::insert(&who, MemberRecord { rank: 0 }); - IdToIndex::::insert(0, &who, index); - IndexToId::::insert(0, index, &who); - MemberCount::::insert(0, count); - Self::deposit_event(Event::MemberAdded { who }); - - Ok(()) + Self::do_add_member(who) } /// Increment the rank of an existing member by one. @@ -437,17 +480,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::promote_member(0))] pub fn promote_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { let max_rank = T::PromoteOrigin::ensure_origin(origin)?; - let record = Self::ensure_member(&who)?; - let rank = record.rank.checked_add(1).ok_or(Overflow)?; - ensure!(max_rank >= rank, Error::::NoPermission); - let index = MemberCount::::get(rank); - MemberCount::::insert(rank, index.checked_add(1).ok_or(Overflow)?); - IdToIndex::::insert(rank, &who, index); - IndexToId::::insert(rank, index, &who); - Members::::insert(&who, MemberRecord { rank }); - Self::deposit_event(Event::RankChanged { who, rank }); - - Ok(()) + Self::do_promote_member(who, Some(max_rank)) } /// Decrement the rank of an existing member by one. If the member is already at rank zero, @@ -626,5 +659,53 @@ pub mod pallet { MemberCount::::mutate(rank, |r| r.saturating_dec()); Ok(()) } + + /// Adds a member into the ranked collective at level 0. + /// + /// No origin checks are executed. + pub fn do_add_member(who: T::AccountId) -> DispatchResult { + ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); + let index = MemberCount::::get(0); + let count = index.checked_add(1).ok_or(Overflow)?; + + Members::::insert(&who, MemberRecord { rank: 0 }); + IdToIndex::::insert(0, &who, index); + IndexToId::::insert(0, index, &who); + MemberCount::::insert(0, count); + Self::deposit_event(Event::MemberAdded { who }); + Ok(()) + } + + /// Promotes a member in the ranked collective into the next role. + /// + /// A `maybe_max_rank` may be provided to check that the member does not get promoted beyond + /// a certain rank. Is `None` is provided, then the rank will be incremented without checks. + pub fn do_promote_member( + who: T::AccountId, + maybe_max_rank: Option, + ) -> DispatchResult { + let record = Self::ensure_member(&who)?; + let rank = record.rank.checked_add(1).ok_or(Overflow)?; + if let Some(max_rank) = maybe_max_rank { + ensure!(max_rank >= rank, Error::::NoPermission); + } + let index = MemberCount::::get(rank); + MemberCount::::insert(rank, index.checked_add(1).ok_or(Overflow)?); + IdToIndex::::insert(rank, &who, index); + IndexToId::::insert(rank, index, &who); + Members::::insert(&who, MemberRecord { rank }); + Self::deposit_event(Event::RankChanged { who, rank }); + Ok(()) + } + + /// Add a member to the rank collective, and continue to promote them until a certain rank + /// is reached. + pub fn do_add_member_to_rank(who: T::AccountId, rank: Rank) -> DispatchResult { + Self::do_add_member(who.clone())?; + for _ in 0..rank { + Self::do_promote_member(who.clone(), None)?; + } + Ok(()) + } } } diff --git a/frame/ranked-collective/src/tests.rs b/frame/ranked-collective/src/tests.rs index 4344a1be730fb..530388d83f3c4 100644 --- a/frame/ranked-collective/src/tests.rs +++ b/frame/ranked-collective/src/tests.rs @@ -455,3 +455,20 @@ fn ensure_ranked_works() { assert_eq!(Rank4::try_origin(Origin::signed(3)).unwrap_err().as_signed().unwrap(), 3); }); } + +#[test] +fn do_add_member_to_rank_works() { + new_test_ext().execute_with(|| { + let max_rank = 9u16; + assert_ok!(Club::do_add_member_to_rank(69, max_rank / 2)); + assert_ok!(Club::do_add_member_to_rank(1337, max_rank)); + for i in 0..=max_rank { + if i <= max_rank / 2 { + assert_eq!(member_count(i), 2); + } else { + assert_eq!(member_count(i), 1); + } + } + assert_eq!(member_count(max_rank + 1), 0); + }) +} diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index b9c64a61f510d..4a64a9aa03b77 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -23,6 +23,7 @@ use assert_matches::assert_matches; use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ assert_ok, + dispatch::UnfilteredDispatchable, traits::{Currency, EnsureOrigin}, }; use frame_system::RawOrigin; @@ -41,17 +42,20 @@ fn funded_account, I: 'static>(name: &'static str, index: u32) -> T caller } -fn create_referendum, I: 'static>() -> (T::AccountId, ReferendumIndex) { - let caller = funded_account::("caller", 0); - whitelist_account!(caller); - assert_ok!(Referenda::::submit( - RawOrigin::Signed(caller.clone()).into(), - Box::new(RawOrigin::Root.into()), - T::Hashing::hash_of(&0), - DispatchTime::After(0u32.into()) - )); +fn create_referendum, I: 'static>() -> (T::Origin, ReferendumIndex) { + let origin: T::Origin = T::SubmitOrigin::successful_origin(); + if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + whitelist_account!(caller); + } + + let proposal_origin = Box::new(RawOrigin::Root.into()); + let proposal_hash = T::Hashing::hash_of(&0); + let enactment_moment = DispatchTime::After(0u32.into()); + let call = Call::::submit { proposal_origin, proposal_hash, enactment_moment }; + assert_ok!(call.dispatch_bypass_filter(origin.clone())); let index = ReferendumCount::::get() - 1; - (caller, index) + (origin, index) } fn place_deposit, I: 'static>(index: ReferendumIndex) { @@ -72,7 +76,7 @@ fn fill_queue, I: 'static>( // First, create enough other referendums to fill the track. let mut others = vec![]; for _ in 0..info::(index).max_deciding { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); others.push(index); } @@ -80,7 +84,7 @@ fn fill_queue, I: 'static>( // We will also need enough referenda which are queued and passing, we want `MaxQueued - 1` // in order to force the maximum amount of work to insert ours into the queue. for _ in spaces..T::MaxQueued::get() { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); make_passing_after::(index, Perbill::from_percent(pass_after)); others.push(index); @@ -113,6 +117,7 @@ fn make_passing_after, I: 'static>(index: ReferendumIndex, period_p .saturating_add(Perbill::from_percent(1)); Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { + T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::from_requirements(support, approval, class); } }); @@ -121,6 +126,7 @@ fn make_passing_after, I: 'static>(index: ReferendumIndex, period_p fn make_passing, I: 'static>(index: ReferendumIndex) { Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { + T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::unanimity(class); } }); @@ -129,6 +135,7 @@ fn make_passing, I: 'static>(index: ReferendumIndex) { fn make_failing, I: 'static>(index: ReferendumIndex) { Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { + T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::rejection(class); } }); @@ -181,10 +188,13 @@ fn is_not_confirming, I: 'static>(index: ReferendumIndex) -> bool { benchmarks_instance_pallet! { submit { - let caller = funded_account::("caller", 0); - whitelist_account!(caller); - }: _( - RawOrigin::Signed(caller), + let origin: T::Origin = T::SubmitOrigin::successful_origin(); + if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + whitelist_account!(caller); + } + }: _( + origin, Box::new(RawOrigin::Root.into()), T::Hashing::hash_of(&0), DispatchTime::After(0u32.into()) @@ -194,60 +204,62 @@ benchmarks_instance_pallet! { } place_decision_deposit_preparing { - let (caller, index) = create_referendum::(); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + let (origin, index) = create_referendum::(); + }: place_decision_deposit(origin, index) verify { assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); } place_decision_deposit_queued { - let (caller, index) = create_referendum::(); + let (origin, index) = create_referendum::(); fill_queue::(index, 1, 90); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + }: place_decision_deposit(origin, index) verify { let track = Referenda::::ensure_ongoing(index).unwrap().track; assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + assert!(TrackQueue::::get(&track).contains(&(index, 0u32.into()))); } place_decision_deposit_not_queued { - let (caller, index) = create_referendum::(); + let (origin, index) = create_referendum::(); fill_queue::(index, 0, 90); - }: place_decision_deposit(RawOrigin::Signed(caller), index) - verify { let track = Referenda::::ensure_ongoing(index).unwrap().track; assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + }: place_decision_deposit(origin, index) + verify { + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); } place_decision_deposit_passing { - let (caller, index) = create_referendum::(); + let (origin, index) = create_referendum::(); skip_prepare_period::(index); make_passing::(index); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + }: place_decision_deposit(origin, index) verify { assert!(is_confirming::(index)); } place_decision_deposit_failing { - let (caller, index) = create_referendum::(); + let (origin, index) = create_referendum::(); skip_prepare_period::(index); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + }: place_decision_deposit(origin, index) verify { assert!(is_not_confirming::(index)); } refund_decision_deposit { - let (caller, index) = create_referendum::(); + let (origin, index) = create_referendum::(); place_deposit::(index); assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); - }: _(RawOrigin::Signed(caller), index) + }: _(origin, index) verify { assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); } cancel { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); }: _(T::CancelOrigin::successful_origin(), index) verify { @@ -255,7 +267,7 @@ benchmarks_instance_pallet! { } kill { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); }: _(T::KillOrigin::successful_origin(), index) verify { @@ -263,7 +275,7 @@ benchmarks_instance_pallet! { } one_fewer_deciding_queue_empty { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); nudge::(index); @@ -276,7 +288,7 @@ benchmarks_instance_pallet! { } one_fewer_deciding_failing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); // No spaces free in the queue. let queued = fill_queue::(index, 0, 90); let track = Referenda::::ensure_ongoing(index).unwrap().track; @@ -295,7 +307,7 @@ benchmarks_instance_pallet! { } one_fewer_deciding_passing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); // No spaces free in the queue. let queued = fill_queue::(index, 0, 0); let track = Referenda::::ensure_ongoing(index).unwrap().track; @@ -306,16 +318,16 @@ benchmarks_instance_pallet! { verify { assert_eq!(DecidingCount::::get(&track), deciding_count); assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) + assert!(queued.into_iter().skip(1).filter(|i| Referenda::::ensure_ongoing(*i) .unwrap() .deciding - .map_or(true, |d| d.confirming.is_some()) - )); + .map_or(false, |d| d.confirming.is_some()) + ).count() == 1); } nudge_referendum_requeued_insertion { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); fill_queue::(index, 0, 90); @@ -336,7 +348,7 @@ benchmarks_instance_pallet! { nudge_referendum_requeued_slide { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); fill_queue::(index, 1, 90); @@ -362,7 +374,7 @@ benchmarks_instance_pallet! { // insertion at the beginning. // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); fill_queue::(index, 1, 0); @@ -379,7 +391,7 @@ benchmarks_instance_pallet! { nudge_referendum_not_queued { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); fill_queue::(index, 0, 0); @@ -395,7 +407,7 @@ benchmarks_instance_pallet! { } nudge_referendum_no_deposit { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { @@ -404,7 +416,7 @@ benchmarks_instance_pallet! { } nudge_referendum_preparing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); }: nudge_referendum(RawOrigin::Root, index) verify { @@ -413,7 +425,7 @@ benchmarks_instance_pallet! { } nudge_referendum_timed_out { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); skip_timeout_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { @@ -422,7 +434,7 @@ benchmarks_instance_pallet! { } nudge_referendum_begin_deciding_failing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) @@ -431,7 +443,7 @@ benchmarks_instance_pallet! { } nudge_referendum_begin_deciding_passing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); make_passing::(index); skip_prepare_period::(index); @@ -441,7 +453,7 @@ benchmarks_instance_pallet! { } nudge_referendum_begin_confirming { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); nudge::(index); @@ -453,7 +465,7 @@ benchmarks_instance_pallet! { } nudge_referendum_end_confirming { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); make_passing::(index); @@ -466,7 +478,7 @@ benchmarks_instance_pallet! { } nudge_referendum_continue_not_confirming { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); nudge::(index); @@ -480,7 +492,7 @@ benchmarks_instance_pallet! { } nudge_referendum_continue_confirming { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); make_passing::(index); skip_prepare_period::(index); @@ -493,7 +505,7 @@ benchmarks_instance_pallet! { } nudge_referendum_approved { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); make_passing::(index); @@ -506,7 +518,7 @@ benchmarks_instance_pallet! { } nudge_referendum_rejected { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); make_failing::(index); diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index 1a24911603990..dbe22cd562349 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -289,6 +289,9 @@ impl VoteTally for Tally { let nays = ((ayes as u64) * 1_000_000_000u64 / approval.deconstruct() as u64) as u32 - ayes; Self { ayes, nays } } + + #[cfg(feature = "runtime-benchmarks")] + fn setup(_: Class, _: Perbill) {} } pub fn set_balance_proposal(value: u64) -> Vec { diff --git a/frame/support/src/traits/voting.rs b/frame/support/src/traits/voting.rs index b5e7d27073895..49ae3163d0cd1 100644 --- a/frame/support/src/traits/voting.rs +++ b/frame/support/src/traits/voting.rs @@ -106,6 +106,19 @@ pub trait VoteTally { fn rejection(class: Class) -> Self; #[cfg(feature = "runtime-benchmarks")] fn from_requirements(support: Perbill, approval: Perbill, class: Class) -> Self; + #[cfg(feature = "runtime-benchmarks")] + /// A function that should be called before any use of the `runtime-benchmarks` gated functions + /// of the `VoteTally` trait. + /// + /// Should be used to set up any needed state in a Pallet which implements `VoteTally` so that + /// benchmarks that execute will complete successfully. `class` can be used to set up a + /// particular class of voters, and `granularity` is used to determine the weight of one vote + /// relative to total unanimity. + /// + /// For example, in the case where there are a number of unique voters, and each voter has equal + /// voting weight, a granularity of `Perbill::from_rational(1, 1000)` should create `1_000` + /// users. + fn setup(class: Class, granularity: Perbill); } pub enum PollStatus { None, From 669ed362f922c679ab1416fc8b9e9f4f68a21483 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Sun, 14 Aug 2022 20:06:02 +0100 Subject: [PATCH 133/135] Proposal: Flatten `AllPallets` and similar types (#11813) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * flratten AllPallets types * feature flag it * fix * fix * fmt * remove todo * Update frame/support/src/traits/metadata.rs Co-authored-by: Bastian Köcher * Update frame/support/src/migrations.rs Co-authored-by: Bastian Köcher * fix * mark as deprecated * add docs * fix ui test? * fmt Co-authored-by: parity-processbot <> Co-authored-by: Bastian Köcher --- frame/support/Cargo.toml | 4 ++ .../procedural/src/construct_runtime/mod.rs | 60 +++++++------------ .../src/pallet/expand/pallet_struct.rs | 6 +- frame/support/src/dispatch.rs | 4 +- frame/support/src/lib.rs | 10 ++++ frame/support/src/migrations.rs | 5 +- frame/support/src/traits.rs | 5 +- frame/support/src/traits/filter.rs | 11 ---- frame/support/src/traits/hooks.rs | 37 ++++++++++-- frame/support/src/traits/members.rs | 9 ++- frame/support/src/traits/metadata.rs | 41 ++++--------- frame/support/src/traits/misc.rs | 13 +++- frame/support/src/traits/storage.rs | 5 +- frame/support/test/tests/pallet.rs | 13 ++-- .../tests/pallet_ui/hooks_invalid_item.stderr | 6 +- 15 files changed, 121 insertions(+), 108 deletions(-) diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index a69133961e970..1b6488bbd4712 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -73,3 +73,7 @@ no-metadata-docs = ["frame-support-procedural/no-metadata-docs"] # By default some types have documentation, `full-metadata-docs` allows to add documentation to # more types in the metadata. full-metadata-docs = ["scale-info/docs"] +# Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of +# pallets in a runtime grows. Does increase the compile time! +tuples-96 = [] +tuples-128 = [] diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 7b4156a94db58..042af31be6bf4 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -308,47 +308,26 @@ fn decl_all_pallets<'a>( names.push(&pallet_declaration.name); } - // Make nested tuple structure like: - // `((FirstPallet, (SecondPallet, ( ... , LastPallet) ... ))))` - // But ignore the system pallet. - let all_pallets_without_system = names - .iter() - .filter(|n| **n != SYSTEM_PALLET_NAME) - .rev() - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - - // Make nested tuple structure like: - // `((FirstPallet, (SecondPallet, ( ... , LastPallet) ... ))))` - let all_pallets_with_system = names - .iter() - .rev() - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - - // Make nested tuple structure like: - // `((LastPallet, (SecondLastPallet, ( ... , FirstPallet) ... ))))` - // But ignore the system pallet. - let all_pallets_without_system_reversed = names - .iter() - .filter(|n| **n != SYSTEM_PALLET_NAME) - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - - // Make nested tuple structure like: - // `((LastPallet, (SecondLastPallet, ( ... , FirstPallet) ... ))))` - let all_pallets_with_system_reversed = names - .iter() - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - let system_pallet = match names.iter().find(|n| **n == SYSTEM_PALLET_NAME) { Some(name) => name, None => return syn::Error::new( proc_macro2::Span::call_site(), "`System` pallet declaration is missing. \ - Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", + Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", ) .into_compile_error(), }; + let names_without_system = + names.iter().filter(|n| **n != SYSTEM_PALLET_NAME).collect::>(); + let names_reversed = names.clone().into_iter().rev().collect::>(); + let names_without_system_reverse = + names_without_system.clone().into_iter().rev().collect::>(); + let names_reversed_with_system_first = std::iter::once(system_pallet) + .chain(names_without_system_reverse.clone().into_iter()) + .collect::>(); + quote!( #types @@ -364,25 +343,28 @@ fn decl_all_pallets<'a>( pub type AllPallets = AllPalletsWithSystem; /// All pallets included in the runtime as a nested tuple of types. - pub type AllPalletsWithSystem = ( #all_pallets_with_system ); + pub type AllPalletsWithSystem = ( #(#names),* ); /// All pallets included in the runtime as a nested tuple of types. /// Excludes the System pallet. - pub type AllPalletsWithoutSystem = ( #all_pallets_without_system ); + pub type AllPalletsWithoutSystem = ( #(#names_without_system),* ); /// All pallets included in the runtime as a nested tuple of types in reversed order. /// Excludes the System pallet. - pub type AllPalletsWithoutSystemReversed = ( #all_pallets_without_system_reversed ); + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsWithoutSystemReversed =( #(#names_without_system_reverse),* ); /// All pallets included in the runtime as a nested tuple of types in reversed order. - pub type AllPalletsWithSystemReversed = ( #all_pallets_with_system_reversed ); + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsWithSystemReversed = ( #(#names_reversed),* ); /// All pallets included in the runtime as a nested tuple of types in reversed order. /// With the system pallet first. - pub type AllPalletsReversedWithSystemFirst = ( - #system_pallet, - AllPalletsWithoutSystemReversed - ); + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsReversedWithSystemFirst = ( #(#names_reversed_with_system_first),* ); ) } diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index a4a8acc10f799..f0fb6bacedffb 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -240,9 +240,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { #config_where_clause { fn count() -> usize { 1 } - fn accumulate( - acc: &mut #frame_support::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> - ) { + fn infos() -> #frame_support::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> { use #frame_support::traits::PalletInfoAccess; let item = #frame_support::traits::PalletInfoData { index: Self::index(), @@ -250,7 +248,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { module_name: Self::module_name(), crate_version: Self::crate_version(), }; - acc.push(item); + #frame_support::sp_std::vec![item] } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index fb0b658cd9303..a658f01fa6854 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -2207,7 +2207,7 @@ macro_rules! decl_module { for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { fn count() -> usize { 1 } - fn accumulate(acc: &mut $crate::sp_std::vec::Vec<$crate::traits::PalletInfoData>) { + fn infos() -> $crate::sp_std::vec::Vec<$crate::traits::PalletInfoData> { use $crate::traits::PalletInfoAccess; let item = $crate::traits::PalletInfoData { index: Self::index(), @@ -2215,7 +2215,7 @@ macro_rules! decl_module { module_name: Self::module_name(), crate_version: Self::crate_version(), }; - acc.push(item); + vec![item] } } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 8e43df82a284c..7e4c944330fe3 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -16,6 +16,16 @@ // limitations under the License. //! Support code for the runtime. +//! +//! ## Note on Tuple Traits +//! +//! Many of the traits defined in [`traits`] have auto-implementations on tuples as well. Usually, +//! the tuple is a function of number of pallets in the runtime. By default, the traits are +//! implemented for tuples of up to 64 items. +// +// If you have more pallets in your runtime, or for any other reason need more, enabled `tuples-96` +// or the `tuples-128` complication flag. Note that these features *will increase* the compilation +// of this crate. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs index 05833e0515c07..f594b98ede4ff 100644 --- a/frame/support/src/migrations.rs +++ b/frame/support/src/migrations.rs @@ -19,6 +19,7 @@ use crate::{ traits::{GetStorageVersion, PalletInfoAccess}, weights::{RuntimeDbWeight, Weight}, }; +use impl_trait_for_tuples::impl_for_tuples; /// Trait used by [`migrate_from_pallet_version_to_storage_version`] to do the actual migration. pub trait PalletVersionToStorageVersionHelper { @@ -42,7 +43,9 @@ impl PalletVersionToStorageVersionHelpe } } -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl PalletVersionToStorageVersionHelper for T { fn migrate(db_weight: &RuntimeDbWeight) -> Weight { let mut weight: Weight = 0; diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 72d6d6682f14a..16504beb16907 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -50,7 +50,7 @@ mod error; pub use error::PalletError; mod filter; -pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter, IntegrityTest}; +pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter}; mod misc; pub use misc::{ @@ -81,7 +81,8 @@ mod hooks; #[cfg(feature = "std")] pub use hooks::GenesisBuild; pub use hooks::{ - Hooks, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, + Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, + OnTimestampSet, }; #[cfg(feature = "try-runtime")] pub use hooks::{OnRuntimeUpgradeHelpersExt, ON_RUNTIME_UPGRADE_PREFIX}; diff --git a/frame/support/src/traits/filter.rs b/frame/support/src/traits/filter.rs index 95e5954184b4b..cdd82a3124e63 100644 --- a/frame/support/src/traits/filter.rs +++ b/frame/support/src/traits/filter.rs @@ -180,17 +180,6 @@ macro_rules! impl_filter_stack { } } -/// Type that provide some integrity tests. -/// -/// This implemented for modules by `decl_module`. -#[impl_trait_for_tuples::impl_for_tuples(30)] -pub trait IntegrityTest { - /// Run integrity test. - /// - /// The test is not executed in a externalities provided environment. - fn integrity_test() {} -} - #[cfg(test)] pub mod test_impl_filter_stack { use super::*; diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index 385db4e4d1ad9..2b7234006e0ff 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -38,7 +38,9 @@ pub trait OnInitialize { } } -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnInitialize for Tuple { fn on_initialize(n: BlockNumber) -> crate::weights::Weight { let mut weight = 0; @@ -50,7 +52,9 @@ impl OnInitialize for Tuple { /// The block finalization trait. /// /// Implementing this lets you express what should happen for your pallet when the block is ending. -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnFinalize { /// The block is being finalized. Implement to have something happen. /// @@ -79,7 +83,9 @@ pub trait OnIdle { } } -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnIdle for Tuple { fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { let on_idle_functions: &[fn( @@ -105,7 +111,9 @@ impl OnIdle for Tuple { /// Implementing this trait for a pallet let's you express operations that should /// happen at genesis. It will be called in an externalities provided environment and /// will see the genesis state after all pallets have written their genesis state. -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnGenesis { /// Something that should happen at genesis. fn on_genesis() {} @@ -187,7 +195,9 @@ pub trait OnRuntimeUpgrade { } } -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnRuntimeUpgrade for Tuple { fn on_runtime_upgrade() -> crate::weights::Weight { let mut weight = 0; @@ -210,6 +220,19 @@ impl OnRuntimeUpgrade for Tuple { } } +/// Type that provide some integrity tests. +/// +/// This implemented for modules by `decl_module`. +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +pub trait IntegrityTest { + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + /// The pallet hooks trait. Implementing this lets you express some logic to execute. pub trait Hooks { /// The block is being finalized. Implement to have something happen. @@ -321,7 +344,9 @@ pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeD } /// A trait which is called when the timestamp is set in the runtime. -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnTimestampSet { /// Called when the timestamp is set. fn on_timestamp_set(moment: Moment); diff --git a/frame/support/src/traits/members.rs b/frame/support/src/traits/members.rs index 8c69a2aaccb33..daf2d3aa6517d 100644 --- a/frame/support/src/traits/members.rs +++ b/frame/support/src/traits/members.rs @@ -17,6 +17,7 @@ //! Traits for dealing with the idea of membership. +use impl_trait_for_tuples::impl_for_tuples; use sp_std::{marker::PhantomData, prelude::*}; /// A trait for querying whether a type can be said to "contain" a value. @@ -25,7 +26,9 @@ pub trait Contains { fn contains(t: &T) -> bool; } -#[impl_trait_for_tuples::impl_for_tuples(1, 30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl Contains for Tuple { fn contains(t: &T) -> bool { for_tuples!( #( @@ -41,7 +44,9 @@ pub trait ContainsPair { fn contains(a: &A, b: &B) -> bool; } -#[impl_trait_for_tuples::impl_for_tuples(0, 30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl ContainsPair for Tuple { fn contains(a: &A, b: &B) -> bool { for_tuples!( #( diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index d3dc57e1ee52d..b0dd5bd5160b4 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -18,6 +18,7 @@ //! Traits for managing information attached to pallets and their constituents. use codec::{Decode, Encode}; +use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::RuntimeDebug; use sp_std::prelude::*; @@ -70,40 +71,22 @@ pub trait PalletsInfoAccess { /// /// You probably don't want this function but `infos()` instead. fn count() -> usize { - 0 + // for backwards compatibility with XCM-3, Mark is deprecated. + Self::infos().len() } - /// Extend the given vector by all of the pallets' information that this type represents. - /// - /// You probably don't want this function but `infos()` instead. - fn accumulate(_accumulator: &mut Vec) {} - /// All of the pallets' information that this type represents. - fn infos() -> Vec { - let mut result = Vec::with_capacity(Self::count()); - Self::accumulate(&mut result); - result - } + fn infos() -> Vec; } -impl PalletsInfoAccess for () {} -impl PalletsInfoAccess for (T,) { - fn count() -> usize { - T::count() - } - fn accumulate(acc: &mut Vec) { - T::accumulate(acc) - } -} - -impl PalletsInfoAccess for (T1, T2) { - fn count() -> usize { - T1::count() + T2::count() - } - fn accumulate(acc: &mut Vec) { - // The AllPallets type tuplises the pallets in reverse order, so we unreverse them here. - T2::accumulate(acc); - T1::accumulate(acc); +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PalletsInfoAccess for Tuple { + fn infos() -> Vec { + let mut res = vec![]; + for_tuples!( #( res.extend(Tuple::infos()); )* ); + res } } diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index fa59fa92f920d..0f781d0bbd4cf 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -19,6 +19,7 @@ use crate::dispatch::Parameter; use codec::{CompactLen, Decode, DecodeLimit, Encode, EncodeLike, Input, MaxEncodedLen}; +use impl_trait_for_tuples::impl_for_tuples; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, Saturating}; #[doc(hidden)] @@ -467,14 +468,18 @@ impl SameOrOther { } /// Handler for when a new account has been created. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnNewAccount { /// A new account `who` has been registered. fn on_new_account(who: &AccountId); } /// The account with the given id was reaped. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnKilledAccount { /// The account with the given id was reaped. fn on_killed_account(who: &AccountId); @@ -632,7 +637,9 @@ impl PrivilegeCmp for EqualPrivilegeOnly { /// but cannot preform any alterations. More specifically alterations are /// not forbidden, but they are not persisted in any way after the worker /// has finished. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OffchainWorker { /// This function is being called after every block import (when fully synced). /// diff --git a/frame/support/src/traits/storage.rs b/frame/support/src/traits/storage.rs index e484140cc2fd9..d40d82c28e87e 100644 --- a/frame/support/src/traits/storage.rs +++ b/frame/support/src/traits/storage.rs @@ -17,6 +17,7 @@ //! Traits for encoding data related to pallet's storage items. +use impl_trait_for_tuples::impl_for_tuples; use sp_std::prelude::*; /// An instance of a pallet in the storage. @@ -71,7 +72,9 @@ pub trait StorageInfoTrait { fn storage_info() -> Vec; } -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl StorageInfoTrait for Tuple { fn storage_info() -> Vec { let mut res = vec![]; diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 6b72327eb4989..c4bbc59c70373 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -1597,8 +1597,9 @@ fn test_storage_info() { #[test] fn assert_type_all_pallets_reversed_with_system_first_is_correct() { // Just ensure the 2 types are same. + #[allow(deprecated)] fn _a(_t: AllPalletsReversedWithSystemFirst) {} - fn _b(t: (System, (Example4, (Example2, (Example,))))) { + fn _b(t: (System, Example4, Example2, Example)) { _a(t) } } @@ -1607,7 +1608,7 @@ fn assert_type_all_pallets_reversed_with_system_first_is_correct() { fn assert_type_all_pallets_with_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithSystem) {} - fn _b(t: (System, (Example, (Example2, (Example4,))))) { + fn _b(t: (System, Example, Example2, Example4)) { _a(t) } } @@ -1616,7 +1617,7 @@ fn assert_type_all_pallets_with_system_is_correct() { fn assert_type_all_pallets_without_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithoutSystem) {} - fn _b(t: (Example, (Example2, (Example4,)))) { + fn _b(t: (Example, Example2, Example4)) { _a(t) } } @@ -1624,8 +1625,9 @@ fn assert_type_all_pallets_without_system_is_correct() { #[test] fn assert_type_all_pallets_with_system_reversed_is_correct() { // Just ensure the 2 types are same. + #[allow(deprecated)] fn _a(_t: AllPalletsWithSystemReversed) {} - fn _b(t: (Example4, (Example2, (Example, (System,))))) { + fn _b(t: (Example4, Example2, Example, System)) { _a(t) } } @@ -1633,8 +1635,9 @@ fn assert_type_all_pallets_with_system_reversed_is_correct() { #[test] fn assert_type_all_pallets_without_system_reversed_is_correct() { // Just ensure the 2 types are same. + #[allow(deprecated)] fn _a(_t: AllPalletsWithoutSystemReversed) {} - fn _b(t: (Example4, (Example2, (Example,)))) { + fn _b(t: (Example4, Example2, Example)) { _a(t) } } diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index d1a89fbb850e9..ff52a094d6f8d 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -1,13 +1,13 @@ error[E0107]: missing generics for trait `Hooks` - --> $DIR/hooks_invalid_item.rs:12:18 + --> tests/pallet_ui/hooks_invalid_item.rs:12:18 | 12 | impl Hooks for Pallet {} | ^^^^^ expected 1 generic argument | note: trait defined here, with 1 generic parameter: `BlockNumber` - --> $DIR/hooks.rs:214:11 + --> $WORKSPACE/frame/support/src/traits/hooks.rs | -214 | pub trait Hooks { + | pub trait Hooks { | ^^^^^ ----------- help: add missing generic argument | From cf36d2f7eaaee27516c601141a2ea0acd3db641c Mon Sep 17 00:00:00 2001 From: Jake Hemmerle Date: Mon, 15 Aug 2022 03:53:39 -0400 Subject: [PATCH 134/135] swap ed25519-dalek for ed25519-zebra (#11781) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * swap ed25519-dalek for ed25519-zebra; no batch verificaiton fixed batch verificaiton tests removed additional zero verificaiton tests removed comments, fixed test bug, added #[derive(Clone)] Update primitives/core/src/ed25519.rs Co-authored-by: Squirrel * modified assertion to allow default ed25519-zebra zero key behavior * cargo clippy * Update primitives/core/Cargo.toml Co-authored-by: Bastian Köcher * Update primitives/core/src/ed25519.rs Co-authored-by: Davide Galassi * Update primitives/core/src/ed25519.rs Co-authored-by: Davide Galassi * Update primitives/core/src/ed25519.rs Co-authored-by: Davide Galassi * Update primitives/core/src/ed25519.rs Co-authored-by: Davide Galassi * updated Cargo.lock for sp-core * fix inaccurate comment Co-authored-by: Squirrel Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher Co-authored-by: Davide Galassi --- Cargo.lock | 16 ++++++++++- Cargo.toml | 2 +- primitives/core/Cargo.toml | 6 ++-- primitives/core/src/ed25519.rs | 51 ++++++++++++++-------------------- primitives/io/src/lib.rs | 27 +++++++++++------- 5 files changed, 57 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2db192dcbf1a1..b5506b1259436 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1788,6 +1788,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ed25519-zebra" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "403ef3e961ab98f0ba902771d29f842058578bb1ce7e3c59dad5a6a93e784c69" +dependencies = [ + "curve25519-dalek 3.0.2", + "hex", + "rand_core 0.6.2", + "sha2 0.9.8", + "thiserror", + "zeroize", +] + [[package]] name = "either" version = "1.6.1" @@ -9624,7 +9638,7 @@ dependencies = [ "byteorder", "criterion", "dyn-clonable", - "ed25519-dalek", + "ed25519-zebra", "futures", "hash-db", "hash256-std-hasher", diff --git a/Cargo.toml b/Cargo.toml index 1f22343c002a8..e2907716ca9f2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -258,7 +258,7 @@ crc32fast = { opt-level = 3 } crossbeam-deque = { opt-level = 3 } crypto-mac = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } -ed25519-dalek = { opt-level = 3 } +ed25519-zebra = { opt-level = 3 } flate2 = { opt-level = 3 } futures-channel = { opt-level = 3 } hashbrown = { opt-level = 3 } diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index fced678293140..2cdfd04e942d7 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -47,7 +47,7 @@ thiserror = { version = "1.0.30", optional = true } bitflags = "1.3" # full crypto -ed25519-dalek = { version = "1.0.1", default-features = false, features = ["u64_backend", "alloc"], optional = true } +ed25519-zebra = { version = "3.0.0", default-features = false, optional = true} blake2-rfc = { version = "0.2.18", default-features = false, optional = true } schnorrkel = { version = "0.9.1", features = [ "preaudit_deprecated", @@ -97,7 +97,7 @@ std = [ "sp-std/std", "serde", "blake2-rfc/std", - "ed25519-dalek/std", + "ed25519-zebra", "hex/std", "base58", "substrate-bip39", @@ -127,7 +127,7 @@ std = [ # or Intel SGX. # For the regular wasm runtime builds this should not be used. full_crypto = [ - "ed25519-dalek", + "ed25519-zebra", "blake2-rfc", "schnorrkel", "hex", diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index 177af0651c0ef..0553cf4843df5 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -39,7 +39,9 @@ use crate::crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}; #[cfg(feature = "std")] use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "full_crypto")] -use ed25519_dalek::{Signer as _, Verifier as _}; +use core::convert::TryFrom; +#[cfg(feature = "full_crypto")] +use ed25519_zebra::{SigningKey, VerificationKey}; #[cfg(feature = "std")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; @@ -75,17 +77,10 @@ pub struct Public(pub [u8; 32]); /// A key pair. #[cfg(feature = "full_crypto")] -pub struct Pair(ed25519_dalek::Keypair); - -#[cfg(feature = "full_crypto")] -impl Clone for Pair { - fn clone(&self) -> Self { - Pair(ed25519_dalek::Keypair { - public: self.0.public, - secret: ed25519_dalek::SecretKey::from_bytes(self.0.secret.as_bytes()) - .expect("key is always the correct size; qed"), - }) - } +#[derive(Copy, Clone)] +pub struct Pair { + public: VerificationKey, + secret: SigningKey, } impl AsRef<[u8; 32]> for Public { @@ -456,10 +451,10 @@ impl TraitPair for Pair { /// /// You should never need to use this; generate(), generate_with_phrase fn from_seed_slice(seed_slice: &[u8]) -> Result { - let secret = ed25519_dalek::SecretKey::from_bytes(seed_slice) - .map_err(|_| SecretStringError::InvalidSeedLength)?; - let public = ed25519_dalek::PublicKey::from(&secret); - Ok(Pair(ed25519_dalek::Keypair { secret, public })) + let secret = + SigningKey::try_from(seed_slice).map_err(|_| SecretStringError::InvalidSeedLength)?; + let public = VerificationKey::from(&secret); + Ok(Pair { secret, public }) } /// Derive a child key from a series of given junctions. @@ -468,7 +463,7 @@ impl TraitPair for Pair { path: Iter, _seed: Option, ) -> Result<(Pair, Option), DeriveError> { - let mut acc = self.0.secret.to_bytes(); + let mut acc = self.secret.into(); for j in path { match j { DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), @@ -480,16 +475,12 @@ impl TraitPair for Pair { /// Get the public key. fn public(&self) -> Public { - let mut r = [0u8; 32]; - let pk = self.0.public.as_bytes(); - r.copy_from_slice(pk); - Public(r) + Public(self.public.into()) } /// Sign a message. fn sign(&self, message: &[u8]) -> Signature { - let r = self.0.sign(message).to_bytes(); - Signature::from_raw(r) + Signature::from_raw(self.secret.sign(message).into()) } /// Verify a signature on a message. Returns true if the signature is good. @@ -502,17 +493,17 @@ impl TraitPair for Pair { /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct /// size. Use it only if you're coming from byte buffers and need the speed. fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let public_key = match ed25519_dalek::PublicKey::from_bytes(pubkey.as_ref()) { + let public_key = match VerificationKey::try_from(pubkey.as_ref()) { Ok(pk) => pk, Err(_) => return false, }; - let sig = match ed25519_dalek::Signature::try_from(sig) { + let sig = match ed25519_zebra::Signature::try_from(sig) { Ok(s) => s, Err(_) => return false, }; - public_key.verify(message.as_ref(), &sig).is_ok() + public_key.verify(&sig, message.as_ref()).is_ok() } /// Return a vec filled with raw data. @@ -524,8 +515,8 @@ impl TraitPair for Pair { #[cfg(feature = "full_crypto")] impl Pair { /// Get the seed for this key. - pub fn seed(&self) -> &Seed { - self.0.secret.as_bytes() + pub fn seed(&self) -> Seed { + self.secret.into() } /// Exactly as `from_string` except that if no matches are found then, the the first 32 @@ -577,12 +568,12 @@ mod test { fn seed_and_derive_should_work() { let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); let pair = Pair::from_seed(&seed); - assert_eq!(pair.seed(), &seed); + assert_eq!(pair.seed(), seed); let path = vec![DeriveJunction::Hard([0u8; 32])]; let derived = pair.derive(path.into_iter(), None).ok().unwrap().0; assert_eq!( derived.seed(), - &hex!("ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c") + hex!("ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c") ); } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 9bf9345e594c3..7942bafcc2a1b 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1895,6 +1895,7 @@ mod tests { ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { let pair = sr25519::Pair::generate_with_phrase(None).0; + let pair_unused = sr25519::Pair::generate_with_phrase(None).0; crypto::start_batch_verify(); for it in 0..70 { let msg = format!("Schnorrkel {}!", it); @@ -1902,8 +1903,10 @@ mod tests { crypto::sr25519_batch_verify(&signature, msg.as_bytes(), &pair.public()); } - // push invlaid - crypto::sr25519_batch_verify(&zero_sr_sig(), &Vec::new(), &zero_sr_pub()); + // push invalid + let msg = b"asdf!"; + let signature = pair.sign(msg); + crypto::sr25519_batch_verify(&signature, msg, &pair_unused.public()); assert!(!crypto::finish_batch_verify()); crypto::start_batch_verify(); @@ -1938,10 +1941,10 @@ mod tests { ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { - // invalid ed25519 signature + // valid ed25519 signature crypto::start_batch_verify(); crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub()); - assert!(!crypto::finish_batch_verify()); + assert!(crypto::finish_batch_verify()); // 2 valid ed25519 signatures crypto::start_batch_verify(); @@ -1961,12 +1964,14 @@ mod tests { // 1 valid, 1 invalid ed25519 signature crypto::start_batch_verify(); - let pair = ed25519::Pair::generate_with_phrase(None).0; + let pair1 = ed25519::Pair::generate_with_phrase(None).0; + let pair2 = ed25519::Pair::generate_with_phrase(None).0; let msg = b"Important message"; - let signature = pair.sign(msg); - crypto::ed25519_batch_verify(&signature, msg, &pair.public()); + let signature = pair1.sign(msg); crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub()); + crypto::ed25519_batch_verify(&signature, msg, &pair1.public()); + crypto::ed25519_batch_verify(&signature, msg, &pair2.public()); assert!(!crypto::finish_batch_verify()); @@ -1993,11 +1998,13 @@ mod tests { // 1 valid sr25519, 1 invalid sr25519 crypto::start_batch_verify(); - let pair = sr25519::Pair::generate_with_phrase(None).0; + let pair1 = sr25519::Pair::generate_with_phrase(None).0; + let pair2 = sr25519::Pair::generate_with_phrase(None).0; let msg = b"Schnorrkcel!"; - let signature = pair.sign(msg); - crypto::sr25519_batch_verify(&signature, msg, &pair.public()); + let signature = pair1.sign(msg); + crypto::sr25519_batch_verify(&signature, msg, &pair1.public()); + crypto::sr25519_batch_verify(&signature, msg, &pair2.public()); crypto::sr25519_batch_verify(&zero_sr_sig(), &Vec::new(), &zero_sr_pub()); assert!(!crypto::finish_batch_verify()); From 90c8ac3410966ef538fb1444412b60077d073a05 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Aug 2022 07:57:15 +0000 Subject: [PATCH 135/135] Bump rpassword from 5.0.1 to 7.0.0 (#11826) * Bump rpassword from 5.0.1 to 7.0.0 Bumps [rpassword](https://github.com/conradkleinespel/rpassword) from 5.0.1 to 7.0.0. - [Release notes](https://github.com/conradkleinespel/rpassword/releases) - [Commits](https://github.com/conradkleinespel/rpassword/compare/v5.0.1...v7.0.0) --- updated-dependencies: - dependency-name: rpassword dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Use new API Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi Signed-off-by: dependabot[bot] Signed-off-by: Oliver Tale-Yazdi Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Oliver Tale-Yazdi --- Cargo.lock | 4 ++-- client/cli/Cargo.toml | 2 +- client/cli/src/commands/utils.rs | 2 +- client/cli/src/params/keystore_params.rs | 5 ++--- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5506b1259436..da047d4cba6d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7534,9 +7534,9 @@ dependencies = [ [[package]] name = "rpassword" -version = "5.0.1" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" +checksum = "26b763cb66df1c928432cc35053f8bd4cec3335d8559fc16010017d16b3c1680" dependencies = [ "libc", "winapi", diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index ea60e4c9f87e5..5f4c00d06aea3 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -24,7 +24,7 @@ names = { version = "0.13.0", default-features = false } parity-scale-codec = "3.0.0" rand = "0.7.3" regex = "1.5.5" -rpassword = "5.0.0" +rpassword = "7.0.0" serde = "1.0.136" serde_json = "1.0.79" thiserror = "1.0.30" diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 32556f0ea728d..95849065471b4 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -48,7 +48,7 @@ pub fn read_uri(uri: Option<&String>) -> error::Result { uri.into() } } else { - rpassword::read_password_from_tty(Some("URI: "))? + rpassword::prompt_password("URI: ")? }; Ok(uri) diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index 46403f95fbc4b..386d1791dd805 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -94,7 +94,7 @@ impl KeystoreParams { let (password_interactive, password) = (self.password_interactive, self.password.clone()); let pass = if password_interactive { - let password = rpassword::read_password_from_tty(Some("Key password: "))?; + let password = rpassword::prompt_password("Key password: ")?; Some(SecretString::new(password)) } else { password @@ -105,6 +105,5 @@ impl KeystoreParams { } fn input_keystore_password() -> Result { - rpassword::read_password_from_tty(Some("Keystore password: ")) - .map_err(|e| format!("{:?}", e).into()) + rpassword::prompt_password("Keystore password: ").map_err(|e| format!("{:?}", e).into()) }